query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
WAV file is loaded and transformed into Fourier Series. This Fourier Series is limited.
def wav_to_fourier(wav_file, rate_limit=6000.0, step=1.0): rate, aud_data = read(wav_file) # Should be mono if len(aud_data) != len(aud_data.ravel()): aud_data = np.mean(aud_data, axis=1) # Zero padding len_data = aud_data.shape[0] channel_1 = np.zeros(2 ** (int(np.ceil(np.log2(len_data))))) channel_1[0:len_data] = aud_data # Fourier analysis fourier = np.abs(np.fft.fft(channel_1)) freq = np.linspace(0, rate, fourier.shape[0]) freq, fourier = limit_by_freq(freq, fourier, upper_limit=rate_limit) freq, fourier = group_by_freq(freq, fourier, step=step) # Max frequency should be 100.0 a = np.max(np.abs(fourier)) / 100.0 fourier = fourier / a return freq, fourier
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def _to_wav(self):\n self._status = 0\n fname = fm.file2wav(self.get_filename()) \n if fname != self.get_filename(): # can change the name\n self._set_filename(fname) # in case of wave transcoding\n self._status = 1", "def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):\n length = length*divide\n #fs = sample rate, sound = multichannel sound signal\n try:\n fs1, sound = wavfile.read(filename)\n except ValueError:\n print(str(filename) + ' failed to process')\n return 'failed'\n if fs1 != fs_in:\n raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)\n sig1 = sound[:0] #left channel\n pre_emphasis = 0.97\n sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])\n\n \n fs2, sig2 = downsample(sig1,fs1,q)\n N2 = len(sig2)\n sig3 = sig2[N2//2-length:N2//2+length]\n #print(len(sig3))\n\n FFT = abs(scipy.fft(sig3))\n FFT_side = FFT[range(len(FFT)//2)]\n #freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n #plt.plot(freqs,FFT)\n if len(FFT_side) != length:\n print('ERROR MESSAGE DETAILS')\n print('filename: ' + filename)\n print('length = ' + str(length))\n print('fs_in = ' + str(fs_in))\n print('q = ' + str(q))\n print('divide = ' + str(divide))\n total_time = len(sig1)/fs1\n print('total_time = ' + str(total_time))\n print('Please check: length < total_time*fs//(2*q)')\n print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))\n raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))\n \n \n FFT_log = []\n # normalize FFT\n for value in FFT_side:\n value = np.log(value)\n FFT_log.append(value)\n max_val = getMax(FFT_log)[1]\n FFT_norm = []\n for value in FFT_log:\n FFT_norm.append(value/max_val)\n \n \n FFT_side = np.array(FFT_norm)\n FFT_divided = FFT_side[range(length//divide)]\n #plot = True\n if plot == True:\n freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n freqs_divided = np.array(freqs[range(len(FFT_divided))])\n plt.plot(freqs_divided,FFT_divided) # plotting the complete fft spectrum\n plt.show()\n \n return FFT_divided", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def load_wav_to_torch(self, full_path):\n data, sampling_rate = load(full_path, sr=self.sampling_rate)\n data = 0.95 * normalize(data)\n\n if self.augment:\n amplitude = np.random.uniform(low=0.3, high=1.0)\n data = data * amplitude\n\n return torch.from_numpy(data).float(), sampling_rate", "def from_wav(cls, fps):\n fpi = iter(fps)\n fs, data = wavfile.read(next(fpi))\n hlist = [data] + [wavfile.read(fp)[1] for fp in fpi]\n\n h = np.array(hlist)\n if data.dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (np.iinfo(data.dtype).min, np.iinfo(data.dtype).max)\n lim_new = (-1.0, 1.0)\n h = _rescale(h, lim_orig, lim_new).astype(np.double)\n\n return cls.from_time(fs, h)", "def load_wav_to_torch(full_path):\n sampling_rate, data = read(full_path)\n return torch.from_numpy(data).float(), sampling_rate", "def load_wavfile(total_frame, wav_file):\n wav_data, sr = sf.load(wav_file, sr=audio_params.SAMPLE_RATE, dtype='float32')\n assert sf.get_duration(wav_data, sr) > 1\n \n features = waveform_to_feature(wav_data, sr)\n features = np.resize(features, (int(total_frame), features.shape[1], features.shape[2]))\n\n return features", "def load_wav_16k_mono(self, filename):\n filename = utils.get_file_path('webapp/static/processed', filename)\n\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(file_contents,\n desired_channels=1)\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data", "def _wave(self):\n try:\n return wave.open(StringIO(self.contents))\n except wave.Error, err:\n err.message += \"\\nInvalid wave file: %s\" % self\n err.args = (err.message,)\n raise", "def load_wav(wav_path, downsample, n_steps):\n data = scipy.io.wavfile.read(wav_path)[1]\n data = scipy.signal.decimate(data, downsample) \n out = np.zeros((1, n_steps))\n out[0, n_steps - np.shape(data)[0]:] = data\n return out", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def read_audiofile(audio_name,cutToLength):\n fs, data = wavfile.read(audio_name)\n # sa.play_buffer(audio_data, num_channels, bydeftes_per_sample,sample_rate)\n #play_obj = sa.play_buffer(data,1,2,fs)\n #play_obj.stop()\n # delete one column. Make mono channel\n if data.shape[1]>1:\n data = numpy.delete(data,1,1)\n #downsample if signal is broad\n if fs>24000:\n data = numpy.delete(data, numpy.s_[::2], 0)\n fs = int(fs/2)\n \n data = data[data!=0]\n data = numpy.delete(data,numpy.s_[ int(cutToLength*fs):len(data)] )\n return data", "def export_wav(\n filename_wav: Path,\n tradb: vae.io.TraDatabase,\n channel: int,\n time_start: Optional[float] = None,\n time_stop: Optional[float] = None,\n decimation_factor: int = 1,\n):\n y, fs = tradb.read_continuous_wave(\n channel=channel,\n time_start=time_start,\n time_stop=time_stop,\n time_axis=False,\n show_progress=False,\n raw=True, # read as ADC values (int16)\n )\n\n if decimation_factor > 1:\n y = signal.decimate(y, decimation_factor).astype(np.int16)\n fs //= decimation_factor\n\n wavfile.write(filename_wav, fs, y)", "def remove_silence_audio() -> None:\n # Read the wav file and get rate and list of data\n rate, data = scipy.io.wavfile.read('Test.wav')\n\n # Create list for data of amended wav file\n data2 = []\n\n # Loop through data of original file and add data that doesn't meed condition: values >= -10 and <= 10\n for i in range(len(data)):\n if data[i][0] >= -10 and data[i][0] <= 10:\n pass\n else:\n data2.append(data[i])\n\n # Create NumPy array from revised data\n data2 = np.asarray(data2, dtype=np.int16)\n\n # Write new data to wav file\n scipy.io.wavfile.write('Test.wav', rate, data2)\n\n return None", "def load_wav(wav_filepath):\n wv, _ = librosa.load(wav_filepath, sr=44100, mono=False) \n return wv", "def single_analyze_wav(self, filePath):\n\n tChopped, vChopped, fVals,\\\n powerFFT, peakFreqs, peakAmps = Utils.AnalyzeFFT(filePath, tChop=self.settings['processing']['tChop'],\n detail=self.settings['processing']['detail'])\n\n self.analyzeDone.emit(tChopped, vChopped, fVals, powerFFT, peakFreqs, peakAmps, filePath)\n self.update_table(peakFreqs, peakAmps)", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def load_wav_file(fname):\n fp = wave.open(fname, \"rb\")\n channels = fp.getnchannels()\n bitrate = fp.getsampwidth() * 8\n samplerate = fp.getframerate()\n buf = fp.readframes(fp.getnframes())\n return SoundData(buf, channels, bitrate, len(buf), samplerate)", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def apply_fourier_transform(chunked_audio):\n pass", "def get_audio(filepath, restrict=restrict_range, use_librosa=False, normalize=True):\n try:\n audio, fs = librosa.load(path=filepath, sr=22050)\n except Exception as e:\n fs, audio_ro = scipy.io.wavfile.read(filepath)\n audio = np.copy(audio_ro) / 32767\n if fs != 22050:\n print(\"incorrect fs\")\n return None\n # frame-wise calculation\n if restrict:\n start = start_sec * fs\n end = end_sec * fs\n audio = np.array(audio[start:end], dtype=np.float32)\n if normalize is True:\n audio = (cqt_params['normalizing_constant'] * audio) / np.std(audio[np.abs(audio > 0.00001)])\n return audio", "def get_features(filename, training=True):\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n wav, _ = librosa.load(filename, \n sr=SAMPLE_RATE, \n mono=True,\n dtype=np.float64)\n energy = np.abs(wav)\n silence_threshold = np.percentile(energy, 95)\n offsets = np.where(energy > silence_threshold)[0]\n if training:\n audio_voice_only = wav[offsets[0]:offsets[-1]]\n else:\n #avoid cutting off too abruptly\n audio_voice_only = wav[offsets[0]:offsets[-1] + 4800]\n if training:\n if len(audio_voice_only) >= 160 * NUM_FRAMES:\n start_ = np.random.randint(len(audio_voice_only) - 160 * NUM_FRAMES + 1)\n end_ = start_ + 160 * NUM_FRAMES - 1\n audio_voice_only = audio_voice_only[start_:end_]\n else:\n return [0], [0]\n wav = librosa.util.normalize(audio_voice_only)\n #deep speaker uses preemphasis here, I do not, because I want the model to correctly transform lower\n #frequencies, too. I apply preemphasis to spectrum before putting data into model embedder instead.\n wav = lfilter([1., -PREEMPH], [1.], wav)[1:]\n #f0 extraction (most time consuming operation in this function)\n f0, timeaxis = pyworld.harvest(wav, SAMPLE_RATE, frame_period=FRAME_PERIOD, f0_floor=71.0, f0_ceil=800.0)\n sp = pyworld.cheaptrick(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n ap = pyworld.d4c(wav, f0, timeaxis, SAMPLE_RATE, fft_size=NFFT)\n mfe = sp2mfe(sp)\n lmfe = np.log(mfe)\n mean = np.mean(lmfe)\n std = np.std(lmfe)\n nmfe = (lmfe - mean) / std\n \n if training:\n return nmfe.T, f0\n else:\n out_len = len(f0) // 4 * 4\n# out_len = len(f0)\n return nmfe[:out_len].T, mean, std, sp[:out_len], f0[:out_len], ap[:out_len]", "def _record_wav(stream, N, CHUNK):\n frames = []\n for i in range(N):\n data = stream.read(CHUNK)\n frames.append(data)\n return np.fromstring(b\"\".join(frames), 'Int16')", "def __init__(self, secs, path, concat=True):\n audio = np.empty((1,))\n secs_loaded = 0\n files_loaded = 0\n files = glob.glob(path + \"*.wav\")\n for file in files:\n (sr, samples) = wavfile.read(file)\n audio = np.concatenate((audio, samples))\n\n # Keep track of the duration (in seconds) of our audio clip\n dur = len(samples) / sr\n secs_loaded = secs_loaded + dur\n files_loaded = files_loaded + 1\n if (secs_loaded >= secs):\n break\n if not concat:\n break\n \n # We're assuming that all files use the same sampling frequency.\n # Truncate audio samples so that we end up with the duration specified.\n total_samples = int(round(secs * sr))\n if total_samples > len(audio):\n warnings.warn(\"Found fewer than %.2f seconds of audio. \"\n \"Returning %.2f seconds of audio.\" % (secs, len(audio) / sr)) \n audio = audio[0:total_samples]\n\n self.audio = audio\n self.sampling_rate = sr", "def wavread(fname):\n fh = wave.open(fname,'rb')\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = fh.getparams()\n if sampwidth == 2:\n frames = fh.readframes(nframes * nchannels)\n dn = struct.unpack_from('%dh' % nframes*nchannels, frames)\n if nchannels > 1:\n out = np.array([dn[i::nchannels] for i in range(nchannels)])/float(2**15)\n else:\n out = np.array(dn)/float(2**15)\n else:\n print('not a 16 bit wav-file')\n out = [0]\n fh.close()\n return (out,framerate)", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def read_wave(f):\n # w will be an object of type wave.Wav_read.\n file = file_utils.open_or_fd(f, encoding=None)\n wav = wavio.read(file)\n # see https://github.com/WarrenWeckesser/wavio/blob/master/wavio.py for\n # format of `wav`\n\n # we want data as (num_channels, num_samples).. this is the\n # format that seems most compatible with convolutional code and\n # resampling.\n data = wav.data.swapaxes(0, 1)\n if data.dtype == np.int16:\n data = data.astype(np.float32) * (1.0 / 2**15)\n elif data.dtype == np.int24:\n data = data.astype(np.float32) * (1.0 / 2**23)\n else:\n if data.dtype != np.float32:\n raise RuntimeError(\"Array returned from wavio.read had \"\n \"unexpected dtype \".format(data.dtype))\n return (data, float(wav.rate))", "def spectrum_misc(f):\n\n end = False\n while not end:\n try:\n line = f.readline().split()\n wavnew = [float(w) for w in line]\n wav = np.append(wav, wavnew)\n prevwav = wavnew[-1]\n\n except BaseException:\n end = True\n aflux = f.readlines()\n for line in aflux:\n line = re.sub(r\"-10\\d\", \"e-100\", line)\n flux = np.append(flux, line.rstrip().split())\n\n wav, flux = np.array(wav), np.array(flux)\n return wav, flux", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def waveReadAsFloat(wavFileIn):\n \n sr, wavdata = scipy.io.wavfile.read(wavFileIn)\n \n if wavdata.dtype is np.dtype(np.int16):\n wavdata = np.array(wavdata, dtype=np.float32) / \\\n np.power(2.0, 16-1)\n elif wavdata.dtype is np.dtype(np.int32):\n wavdata = np.array(wavdata, dtype=np.float32) / \\\n np.power(2.0, 32-1)\n elif wavdata.dtype is np.dtype(np.float32):\n pass\n else:\n print(\"Unknown waveform format %s\" % (wavFileIn))\n sys.exit(1)\n return sr, wavdata", "def read_sound(self, inFile):\n\n # Python can natively only read \"wav\" files. To be flexible, use \"ffmpeg\" for conversion for other formats\n if not os.path.exists(inFile):\n print('{0} does not exist!'.format(inFile))\n raise FileNotFoundError\n \n (root, ext) = os.path.splitext(inFile)\n if ext[1:].lower() != 'wav':\n if self.ffmpeg_info.ffmpeg == None:\n print('Sorry, need FFMPEG for non-WAV files!')\n self.rate = None\n self.data = None\n raise NoFFMPEG_Error\n \n outFile = root + '.wav'\n cmd = [self.ffmpeg_info.ffmpeg, '-i', inFile, outFile, '-y']\n subprocess.run(cmd)\n print('Infile converted from ' + ext + ' to \".wav\"')\n \n inFile = outFile\n self.source = outFile\n\n self.rate, self.data = read(inFile)\n \n # Set the filename\n self.source = inFile\n \n # Make sure that the data are in some integer format\n # Otherwise, e.g. Windows has difficulty playing the sound\n # Note that \"self.source\" is set to \"None\", in order to\n # play the correct, converted file with \"play\"\n if not np.issubdtype(self.data.dtype, np.integer):\n self.generate_sound(self.data, self.rate)\n \n self._setInfo()\n print('data read in!')", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Le fichier est trop court')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def load_wav(file_path):\n sample_rate, data = wavfile.read(file_path)\n return data, sample_rate", "def float2wav(rawData, wavFile, bit=16, samplingRate = 16000):\n rawData = rawData * np.power(2.0, bit-1)\n rawData[rawData >= np.power(2.0, bit-1)] = np.power(2.0, bit-1)-1\n rawData[rawData < -1*np.power(2.0, bit-1)] = -1*np.power(2.0, bit-1)\n \n # write as signed 16bit PCM\n if bit == 16:\n rawData = np.asarray(rawData, dtype=np.int16)\n elif bit == 32:\n rawData = np.asarray(rawData, dtype=np.int32)\n else:\n print(\"Only be able to save wav in int16 and int32 type\")\n print(\"Save to int16\")\n rawData = np.asarray(rawData, dtype=np.int16)\n scipy.io.wavfile.write(wavFile, samplingRate, rawData)\n return", "def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def test_ulaw(self):\n duration = 1\n num_channels = 1\n sample_rate = 8000\n path = self.get_temp_path(\"data.wav\")\n sox_utils.gen_audio_file(\n path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=8, encoding=\"u-law\", duration=duration\n )\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == 8\n assert info.encoding == \"ULAW\"", "def wavPlayer(data, rate, scale=False, autoplay=False):\r\n #if np.max(abs(data)) > 1 or scale:\r\n # data = data/np.max(abs(data))\r\n #data = (2**13*data).astype(np.int16)\r\n \r\n buffer = BytesIO()\r\n buffer.write(b'RIFF')\r\n buffer.write(b'\\x00\\x00\\x00\\x00')\r\n buffer.write(b'WAVE')\r\n \r\n buffer.write(b'fmt ')\r\n if data.ndim == 1:\r\n noc = 1\r\n else:\r\n noc = data.shape[1]\r\n \r\n bits = data.dtype.itemsize * 8\r\n sbytes = rate*(bits // 8)*noc\r\n ba = noc * (bits // 8)\r\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\r\n\r\n # data chunk\r\n buffer.write(b'data')\r\n buffer.write(struct.pack('<i', data.nbytes))\r\n\r\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\r\n data = data.byteswap()\r\n\r\n buffer.write(data.astype(np.int16).tostring())\r\n\r\n # Determine file size and place it in correct position at start of the file.\r\n size = buffer.tell()\r\n buffer.seek(4)\r\n buffer.write(struct.pack('<i', size-8))\r\n \r\n val = buffer.getvalue()\r\n autoplay = \" autoplay=\\\"autoplay\\\"\"*autoplay + \"\"\r\n \r\n src = \"\"\"<audio controls=\"controls\" style=\"width:600px\"{autoplay}>\r\n <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\r\n Your browser does not support the audio element.\r\n </audio>\"\"\".format(base64=base64.b64encode(val).decode(\"ascii\"), autoplay=autoplay)\r\n display(HTML(src))", "def downmixWAV(self, wavf: str) -> None:\n # HACK: https://github.com/jiaaro/pydub/issues/129\n # FIXME: a reliable method to get number of wav channels\n multichannel = True\n try:\n w = wave.open(wavf, 'rb')\n if w.getnchannels() < 3:\n multichannel = False\n w.close()\n except Exception:\n pass\n if multichannel:\n newwavf = wavf[:-4] + \"-stereo.wav\"\n FNULL = open(os.devnull, 'w')\n subprocess.call(['ffmpeg', '-y', '-i', wavf, '-c:a', 'pcm_s24le', '-ac', '2', newwavf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n os.remove(wavf)\n os.rename(newwavf, wavf)", "def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath", "def spectre_csv(f):\n \n skip = 0\n while True:\n try: \n wav, flux = np.loadtxt(f, delimiter = ',',\n skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def readNextGenSpectrum(fname=''):\n\n print('Reading : ', fname)\n\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n sdum = dum.split()\n teff = float(sdum[0])\n logg = float(sdum[1])\n mph = float(sdum[2])\n dum = rfile.readline()\n nwav = float(dum.split()[0])\n\n bigline = []\n dum = rfile.readline()\n while dum.strip() != '':\n sdum = dum.split()\n for i in range(len(sdum)):\n bigline.append(float(sdum[i]))\n dum = rfile.readline()\n\n bigline = np.array(bigline)\n # Convert wavelength from angstrom to micron\n wav = bigline[:nwav] / 1e4\n inu = bigline[nwav:2 * nwav]\n bnu = bigline[nwav * 2:nwav * 3]\n\n ii = wav.argsort()\n wav = wav[ii]\n inu = inu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n bnu = bnu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n\n #\n # The unit is now erg/s/cm/Hz/ster\n #\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def load_wav_file(file_path: str):\n rate, data = wavfile.read(file_path)\n return rate, data", "def _default_wave(wavemin=None, wavemax=None, dw=0.2):\n from desimodel.io import load_throughput\n\n if wavemin is None:\n wavemin = load_throughput('b').wavemin - 10.0\n if wavemax is None:\n wavemax = load_throughput('z').wavemax + 10.0\n\n return np.arange(round(wavemin, 1), wavemax, dw)", "def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def data_to_waves(self, data):\n raise NotImplementedError", "def waveFloatToPCMFile(waveData, wavFile, bit=16, sr=16000):\n \n # recover to 16bit range [-32768, +32767]\n rawData = waveData * np.power(2.0, bit-1)\n rawData[rawData >= np.power(2.0, bit-1)] = np.power(2.0, bit-1)-1\n rawData[rawData < -1*np.power(2.0, bit-1)] = -1*np.power(2.0, bit-1)\n \n # write as signed 16bit PCM\n if bit == 16:\n rawData = np.asarray(rawData, dtype=np.int16)\n elif bit == 32:\n rawData = np.asarray(rawData, dtype=np.int32)\n else:\n print(\"Only be able to save wav in int16 and int32 type\")\n print(\"Save to int16\")\n rawData = np.asarray(rawData, dtype=np.int16)\n scipy.io.wavfile.write(wavFile, sr, rawData)\n return", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def read_wavetxt(path):\n with open(path) as f:\n for line in f.readlines():\n line = line.strip()\n if 'SampleFrequence' in line:\n freq = int(line[16:])\n elif 'DataInput' in line:\n series = np.array(line[10:].split(',')).astype(np.float64)\n return (freq, series)", "def wave(self):\n return self._wave", "def wav_to_raw(path, log=False):\n rate, data = wavfile.read(path)\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n try:\n if data.shape[1] == 2:\n # If stereo (2-channel), take the average of the two channels.\n data = 0.5 * (data[:, 0] + data[:, 1])\n if log:\n logging.info('Stereo audio')\n except IndexError:\n if log:\n logging.info('Mono audio')\n return rate, data", "def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n f0 = pitch.get_value_at_time(time)\n f0_nan = 0\n if np.isnan(f0):\n f0 = 0\n f0_nan = 1\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([f0, f0_nan, int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def spectrogram_from_file(filename, step=10, window=20, max_freq=None,\n eps=1e-14, time_up=12, time_down=2):\n sample_rate, audio = wavfile.read(filename) \n audio = audio / np.sqrt(np.sum(np.square(audio)))\n if audio.ndim >= 2:\n audio = np.mean(audio, 1)\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must not be greater than half of \"\n \" sample rate\")\n if step > window:\n raise ValueError(\"step size must not be greater than window size\")\n hop_length = int(0.001 * step * sample_rate)\n fft_length = int(0.001 * window * sample_rate)\n pxx, freqs = spectrogram(\n audio, fft_length=fft_length, sample_rate=sample_rate,\n hop_length=hop_length)\n ind = np.where(freqs <= max_freq)[0][-1] + 1\n\n # audio record time limit\n is_saved = False\n sample_time = int(len(audio) / sample_rate * 1000)\n if sample_time <= time_up * 1000 and sample_time >= time_down * 1000:\n is_saved = True\n\n return np.transpose(np.log(pxx[:ind, :] + eps)), is_saved", "def fingerprint_wave(file):\n\n\twav = wave.open(file, 'rb')\n\tif wav.getnchannels() == 1:\n\t\tstereo = 0\n\telif wav.getnchannels() == 2:\n\t\tstereo = 1\n\telse:\n\t\twav.close()\n\t\traise Exception(\"Only 1 or 2 channel WAV files supported\")\n\n\twidth = wav.getsampwidth()\n\tif width != 2:\n\t\twav.close()\n\t\traise Exception(\"Only 16-bit sample widths supported\")\n\n\tsrate = wav.getframerate()\t\n\n\tbuffer = wav.readframes(wav.getnframes())\n\twav.close()\n\n\tms = (len(buffer) / 2)/(srate/1000)\n\tif stereo == 1:\n\t\tms = ms / 2\n\t\n\tfprint = libofa.create_print(buffer, libofa.BYTE_ORDER_LE, len(buffer) / 2,\n\t\t\t\t\t\t\t\tsrate, stereo);\n\n\treturn (fprint, ms)", "def load(path, rate= 8000, nfft= 512, hop= 256):\n wav, _ = librosa.load(path, rate)\n frame = librosa.stft(wav, nfft, hop)\n return frame[:-1].T", "def __newSampleFile(self):\n self.__newFileName()\n self.__sampleFile = wav.open(self.__fileName, self.OPEN_MODE)\n self.__sampleFile.setnchannels(NUM_CHANNELS)\n self.__sampleFile.setsampwidth(self.__audio.get_sample_size(self.FORMAT))\n self.__sampleFile.setframerate(FS)", "def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def reconstruct_waveform(self, mel, n_iter=32):\n amp_mel = self._denormalize(mel)\n S = librosa.feature.inverse.mel_to_stft(\n amp_mel,\n power=1,\n sr=self.config['sampling_rate'],\n n_fft=self.config['n_fft'],\n fmin=self.config['f_min'],\n fmax=self.config['f_max'])\n wav = librosa.core.griffinlim(\n S,\n n_iter=n_iter,\n hop_length=self.config['hop_length'],\n win_length=self.config['win_length'])\n return wav", "def wav2mfcc(file_path, max_len=44, n_mfcc=20):", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Wave file too short')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def load_wav_dic(wav_dic):\n noisy_path, clean_path = wav_dic[\"noisy\"], wav_dic[\"clean\"]\n noisy, fs = sf.read(noisy_path, dtype=\"float32\")\n clean, fs = sf.read(clean_path, dtype=\"float32\")\n return noisy, clean, fs", "def play_wav_on_index(audio_data, stream_object):\n\n stream_object.write(audio_data)", "def wav2mfccDataAugmnetation(file_path):\r\n #Load .wav to array\r\n augmentArray =[]\r\n wave, _ = librosa.load(file_path, mono=Constants.channelMap[Tunable.tunableDict['channels']], sr=Tunable.tunableDict['samplingRate'])\r\n for i in range(Tunable.tunableDict['pitchShiftLower'], Tunable.tunableDict['pitchShiftUpper']):\r\n wave = librosa.effects.pitch_shift(wave, sr=Tunable.tunableDict['samplingRate'], n_steps=i)\r\n wave = np.asfortranarray(wave)\r\n\r\n #Convert to Mel-Frequency Cepstral Coefficients\r\n mfcc = librosa.feature.mfcc(wave, sr=Tunable.tunableDict['samplingRate'], n_mfcc=Tunable.tunableDict['buckets'])\r\n\r\n # If maximum length exceeds mfcc lengths then pad the remaining ones\r\n if Tunable.tunableDict['maxLen'] > mfcc.shape[1]:\r\n pad_width = Tunable.tunableDict['maxLen'] - mfcc.shape[1]\r\n mfcc = np.pad(mfcc, pad_width=((0, 0), (0, pad_width)), mode='minimum')\r\n\r\n # Else cutoff the remaining parts\r\n else:\r\n mfcc = mfcc[:, :Tunable.tunableDict['maxLen']]\r\n augmentArray.append(mfcc)\r\n\r\n return augmentArray", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def play(self):\n assert pyaudio is not None, (\"You need to have pyaudio installed to \"\n \"use the play_wav function\")\n filename = os.path.join(tempfile.gettempdir(),\n '6003_wave_%s.wav' % abs(hash(tuple(self.samples))))\n self.save(filename)\n f = wave.open(filename, 'r')\n try:\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(),\n rate=f.getframerate(),\n output=True)\n\n data = f.readframes(10240)\n while data:\n stream.write(data)\n data = f.readframes(10240)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n finally:\n f.close()\n os.unlink(filename)", "def main():\n\n import os\n import numpy as np\n\n # Re-set FFMPEG\n # ffmpeg = FFMPEG_info()\n # ffmpeg.set()\n\n # Import a file, and play the sound\n # data_dir = r'/home/thomas/Coding/scikit-sound/sksound/tests'\n data_dir = 'tests'\n in_file = 'a1.wav'\n\n full_file = os.path.join(data_dir, in_file)\n try:\n # mySound = Sound(full_file)\n # mySound.play()\n # time.sleep(mySound.duration)\n mySound2 = Sound()\n mySound2.play()\n except NoFFMPEG_Error:\n pass\n\n # Test with self-generated data\n rate = 22050\n dt = 1./rate\n t = np.arange(0,0.5,dt)\n freq = 880\n x = np.sin(2*np.pi*freq*t)\n sounddata = np.int16(x*2**13)\n\n in_sound = Sound(inData=sounddata, inRate=rate)\n in_sound.summary()\n in_sound.play()\n time.sleep(in_sound.duration)\n\n print('hi')\n\n # Test if type conversion works\n in_sound2 = Sound(inData=x, inRate=rate)\n in_sound2.play()\n\n # Test with GUI\n in_sound = Sound()\n in_sound.play()\n print(in_sound.summary())\n out = in_sound.get_info()\n print(out)\n in_sound.write_wav()", "def read(self, filename, normalize=True):\n if self.gcp == False:\n\n\t\t filepath = self.mixed_dir + filename\n\t\t sf, time_signal = wavfile.read(filepath, mmap=True)\n\n else:\n\n blob = list(self.bucket.list_blobs(prefix=filename))[0]\n # download blob as string\n file_as_string = blob.download_as_string()\n sf, time_signal = wavfile.read(io.BytesIO(file_as_string), mmap=True)\n\n\t\tif normalize == True:\n\t\t\t\n # normalization, assuming 2^15 is the highest possible quantization\n\t\t\ttime_signal = time_signal/np.power(2,15)\n\n\t\treturn time_signal", "def spectrum_csv(f):\n\n skip = 0\n while True:\n try:\n wav, flux = np.loadtxt(f, delimiter=\",\", skiprows=skip, unpack=True)\n\n except ValueError:\n # If the first lines have a header\n skip += 1\n\n else:\n break\n\n return wav, flux", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def decode_wav(raw_data):\n return _kaldi_module.decode_wav(raw_data)", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def load_audio(path, target_fs=None):\n y, fs = sf.read(path)\n if y.ndim>1:\n y = np.mean(y, axis=1)\n if target_fs is not None and fs!=target_fs:\n #print('Resampling %d->%d...' %(fs, target_fs))\n y = librosa.resample(y, orig_sr=fs, target_sr=target_fs)\n fs = target_fs\n return y, fs", "def output_wave_file(predicted_mfccs, filename):\n global eng\n predicted_mfccs_transposed = np.transpose(predicted_mfccs)\n\n\n # MFCC features need to be a numpy array of shape (num_coefficients x num_frames) in order to be passed to the invmelfcc function\n inverted_wav_data = eng.invmelfcc(matlab.double(predicted_mfccs_transposed.tolist()), 16000.0, 25, 100.0, 0.005, 0.005)\n\n inverted_wav_data = np.squeeze(np.array(inverted_wav_data))\n\n # scales the waveform to be between -1 and 1\n maxVec = np.max(inverted_wav_data)\n minVec = np.min(inverted_wav_data)\n inverted_wav_data = ((inverted_wav_data - minVec) / (maxVec - minVec) - 0.5) * 2\n\n wav.write(filename + '.wav', 16000.0, inverted_wav_data)", "def toFourier(self):\n\n\t\tif self.space==\"fourier\":\n\t\t\tpass \n\t\telse:\n\t\t\tself.data = fftengine.rfft2(self.data)\n\t\t\tself.space=\"fourier\"", "def load_wave_np(self):\r\n self.wavenpfileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Single File', 'M:/tnw/ist/do/projects/Neurophotonics/Brinkslab/Data',\"(*.npy)\") \r\n \r\n temp_loaded_container = np.load(self.wavenpfileName, allow_pickle=True)\r\n\r\n try:\r\n self.uiDaq_sample_rate = int(os.path.split(self.wavenpfileName)[1][20:-4])\r\n except:\r\n try:\r\n self.uiDaq_sample_rate = int(float(self.wavenpfileName[self.wavenpfileName.find('sr_')+3:-4])) #Locate sr_ in the file name to get sampling rate.\r\n except:\r\n self.uiDaq_sample_rate = 50000\r\n \r\n if self.uiDaq_sample_rate != int(self.SamplingRateTextbox.value()):\r\n print('ERROR: Sampling rates is different!')\r\n \r\n self.PlotDataItem_dict = {}\r\n self.waveform_data_dict = {}\r\n \r\n for i in range(len(temp_loaded_container)):\r\n \r\n channel_keyword = temp_loaded_container[i]['Sepcification']\r\n \r\n if channel_keyword != \"galvos_X_contour\" and channel_keyword != \"galvos_Y_contour\":\r\n self.waveform_data_dict[channel_keyword] = temp_loaded_container[i]['Waveform']\r\n self.generate_graphy(channel_keyword, self.waveform_data_dict[channel_keyword])", "def forward(self, audio, feat_kinds=['sp','mcc','f0','ap','en']):\n device = audio.device\n audio = audio.detach().cpu().numpy()\n feat = dict()\n for feat_kind in feat_kinds:\n feat[feat_kind] = list()\n\n for x in audio:\n # Preprocess\n x = x * MAX_WAV_VALUE\n x = self.low_cut_filter(x, cutoff=self.cutoff_freq)\n # Extract f0\n f0, time_axis = pyworld.harvest(x, self.fs, f0_floor=self.minf0, f0_ceil=self.maxf0, frame_period=self.shiftms)\n\n # Extract sp \n sp = pyworld.cheaptrick(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n if 'sp' in feat_kinds:\n feat['sp'].append(torch.from_numpy(sp).float().t())\n\n # Extract ap\n if 'ap' in feat_kinds:\n ap = pyworld.d4c(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n feat['ap'].append(torch.from_numpy(ap).float().t())\n\n # Extract mcc\n if 'mcc' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n feat['mcc'].append(torch.from_numpy(mcc).float().t())\n\n # Extract energy\n if 'en' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n en = pysptk.mc2e(mcc, alpha=self.mcc_alpha, irlen=256)\n # en = np.clip(en, 1e-10, None)\n feat['en'].append(torch.from_numpy(en).float().view(-1)) \n\n # Fix f0\n if 'f0' in feat_kinds:\n f0[f0 < 0] = 0\n feat['f0'].append(torch.from_numpy(f0).float().view(-1))\n\n for key, val_list in feat.items():\n feat[key] = torch.cat([val.unsqueeze(0) for val in val_list],dim=0).to(device)\n\n return feat", "def limit(filename,threshold,makeup,wout=True,plot=False):\n start=time.time()\n n, data, data_dB,sr,ch=inputwav(filename)\n dataL,dataL_bit=compress(filename,threshold,1000.0,makeup,1.0,500.0,wout=False,plot=plot)\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_limit.wav',dataL_bit,44100,'PCM_16')\n end=time.time()\n elapsed=int(1000*(end-start))\n print('Done!')\n print('...............................')\n print('Completed in '+str(elapsed)+' milliseconds.') \n return dataL,dataL_bit", "def load(cls, path):\n assert os.path.exists(path), \"No such file: %r\" % path\n\n (folder, filename) = os.path.split(path)\n (name, extension) = os.path.splitext(filename)\n\n wave = Waveform(None)\n wave._path = path\n return wave", "def wav_wav(orig, dest, **_kwargs):\n\n # options = kwargs.get(\"tree\").cmd_options.get(\"options\", [])\n\n # first demux it to 16 bit 48khz\n dest_list = []\n for index, orig_elem in enumerate(tools.get_iter(orig)):\n tmp_dest = os.path.join(\n os.path.dirname(dest),\n \"{0}_{1}\".format(index, os.path.basename(dest)))\n cmd = \"ffmpeg -i {orig} -acodec pcm_s16le -ar 48000 {dest}\".format(\n dest=tmp_dest,\n orig=orig_elem)\n logger.debug(cmd)\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n continue\n dest_list.append(tmp_dest)\n\n if len(dest_list) > 1:\n cmd = \"sox {orig} {dest}\".format(\n orig=\" \".join(orig),\n dest=dest)\n logger.debug(cmd)\n try:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n else:\n os.rename(dest_list[0], dest)\n return dest", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def samp_from_freq(n_samples):\n datapath = os.path.normpath(os.getcwd()+os.sep+os.pardir+os.sep+\"Dataset\"+os.sep+\"All_channels_500hz.npy\")\n data = np.load(datapath)\n fourier = np.fft.rfft(data,axis=0)\n fourier_mean = np.mean(fourier,axis=1)\n print(fourier.shape)\n print(fourier_mean.shape)\n signal = np.fft.irfft(fourier_mean)[1:]\n plt.plot(fourier_mean[1:])\n plt.show()\n plt.plot(signal)\n plt.show()\n #Generating frequency spectrum\n \"\"\"\n x = np.linspace(0,100,251)\n x2 = np.linspace(0,5,251)\n spectrum = 50*np.exp(-(x-30)**2/2)\n spectrum += 60*np.sin(np.random.randn(251)*2*np.pi)*np.exp(-x2)\n #spectrum += np.random.randn(251)\n plt.plot(spectrum)\n plt.show()\n signal = np.fft.irfft(spectrum)\n plt.plot(signal)\n plt.show()\n \"\"\"\n signal = None\n return signal", "def spectre_tsv3(f):\n \n skip = 0\n while True:\n try: \n wav, flux, dflux = np.loadtxt(f, skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux" ]
[ "0.6820377", "0.6551853", "0.65371555", "0.649495", "0.6372757", "0.6339702", "0.63110816", "0.6309993", "0.63011295", "0.6290294", "0.62898475", "0.62364256", "0.62239075", "0.62043417", "0.61933094", "0.61776847", "0.61596906", "0.6157661", "0.6120229", "0.60863763", "0.6062033", "0.6056519", "0.6051152", "0.603562", "0.6019296", "0.59930617", "0.597846", "0.59663963", "0.59542644", "0.5947514", "0.59432864", "0.59305096", "0.592349", "0.5920889", "0.59154445", "0.5915224", "0.5912544", "0.59107625", "0.58904004", "0.58855313", "0.5882491", "0.58746153", "0.5873905", "0.5867924", "0.5854024", "0.5850759", "0.58285856", "0.5824555", "0.5824555", "0.5818585", "0.5812261", "0.58037853", "0.57981986", "0.5791362", "0.5789505", "0.578824", "0.57852846", "0.57790816", "0.5775277", "0.57698137", "0.57665753", "0.5764156", "0.5763399", "0.5757024", "0.57530695", "0.575185", "0.57481974", "0.574343", "0.5742754", "0.5739397", "0.5738794", "0.5728289", "0.572696", "0.5720248", "0.571973", "0.5719529", "0.5692468", "0.56902426", "0.56788826", "0.5677307", "0.5667307", "0.5645231", "0.5643188", "0.5637357", "0.5635391", "0.5632094", "0.56280476", "0.5623272", "0.561502", "0.56117463", "0.5603915", "0.56014967", "0.55723923", "0.5566094", "0.5556776", "0.55536443", "0.5548341", "0.5546341", "0.5545787", "0.55455273" ]
0.6188984
15
Limit arrays of frequency and features by maximum frequency and bottom frequency.
def limit_by_freq(freq, features, upper_limit, lower_limit=None): # Copy into arrays, in order to apply mask freq = np.array(freq, dtype=np.float) features = np.array(features, dtype=np.float) # Mask for bottom limit if lower_limit is not None: bottom_mask = freq >= lower_limit features = features[bottom_mask] freq = freq[bottom_mask] # Mask for upper limit upper_mask = freq <= upper_limit features = features[upper_mask] freq = freq[upper_mask] return freq, features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findMaximal(freqSet):", "def __restrict_features_freq(self, min_count=1):\n col_idx = self.X.tocsc().nonzero()[1]\n counter = np.bincount(col_idx)\n print(\"Counter:\", len(counter))\n include_cols = np.where(counter > min_count)[0]\n return include_cols", "def fit_features(data, max_features):\n ndata = []\n for rec in data:\n rec = list(rec)\n if len(rec) > max_features:\n rec = rec[:max_features]\n elif len(rec) < max_features:\n rec = rec + (max_features - len(rec)) * [0.0]\n ndata.append(rec)\n return np.array(ndata)", "def calcUpperFrequencyLimit(fls, noct, max_idx):\n # floats required due to integer division in Python 2.7\n f_upper = fls[0:max_idx] * (2.0 ** (1.0 / (2.0 * noct)))\n step_size = fls[1] - fls[0]\n approx_idx = f_upper / float(step_size)\n f_upper = np.round(approx_idx).astype(int)\n return f_upper", "def _cutoff(xdata, ydata, btype, fs, ff):\r\n try:\r\n# print ff\r\n if ff != None:\r\n nPts = int(1./(((xdata.max()-xdata.min())/xdata.shape[0])*(ff/10.)))\r\n else:\r\n nPts = 0\r\n if nPts%2 == 0:\r\n nPts = nPts + 1\r\n if nPts < xdata.shape[0]:\r\n nPts = xdata.shape[0]\r\n# print nPts\r\n window = np.hanning(ydata.shape[0])\r\n freq = FourierFrequency(xdata, nPts)\r\n index = np.argsort(freq)\r\n tdf = FourierTransform(ydata*window, nPts)\r\n tdf = abs(tdf)\r\n pp = _maxima(tdf[index], freq[index], lookahead = 1)\r\n# mm = _minima(tdf[index], freq[index], lookahead=1)\r\n pp, hh = np.array(np.array(pp).T[0]), np.array(np.array(pp).T[1])\r\n# mm = np.array(np.array(mm).T[0])#, np.array(np.array(mm).T[1])\r\n ind = np.where(pp == min(abs(pp)))[0][0]\r\n ind2 = np.where(hh == max(hh[(ind+1):]))[0][0]\r\n for u, i in enumerate(freq):\r\n if i > abs(pp[ind2])*1.5 or i < -abs(pp[ind2])*1.5 or (i < abs(pp[ind2])/2. and i > -abs(pp[ind2])/2.) or (tdf[u] > hh[ind2]*1.05): #(abs(i) < abs(mm[indmin])) or \r\n tdf[u] = 0.\r\n def lor2(x, A0, x0, gamma0):\r\n return A0*(1/np.pi)*(gamma0/2)/((x-x0)**2+(gamma0/2)**2)+A0*(1/np.pi)*(gamma0/2)/((x+x0)**2+(gamma0/2)**2)\r\n lmod2 = lmf.Model(lor2)\r\n lmod2.make_params()\r\n lmod2.set_param_hint('A0', value=max(tdf), min=max(tdf)/1000.)\r\n lmod2.set_param_hint('x0', value=abs(pp[ind2]), min=0.)\r\n lmod2.set_param_hint('gamma0', value=1., min=0.)\r\n result2 = lmod2.fit(tdf[index], x=freq[index])\r\n# print result2.values.get('x0'), result2.values.get('gamma0')\r\n if btype=='high':\r\n if result2.values.get('x0')-result2.values.get('gamma0') > 0.:\r\n# print \"frequency: \", result2.values.get('x0')-result2.values.get('gamma0')\r\n if hh[ind2] != max(hh[(ind+1):]):\r\n print \"False\", \" maximum\", \"\\n\", \"\\n\", \"\\n\"\r\n return result2.values.get('x0')-result2.values.get('gamma0')\r\n else:\r\n# print \"failed: 0\"\r\n return 0.\r\n elif btype=='low':\r\n return result2.values.get('x0')+result2.values.get('gamma0')\r\n except Exception:\r\n pass\r\n finally:\r\n pass", "def max_frequency(sig, FS):\n\n f, fs = plotfft(sig, FS)\n t = np.cumsum(fs)\n\n try:\n ind_mag = np.where(t > t[-1]*0.95)[0][0]\n except:\n ind_mag = np.argmax(t)\n f_max = f[ind_mag]\n\n return f_max", "def filtermax(f, maxfiltsize=10):\n # Maximum filter to ignore deeper fluxes of absorption lines\n f_maxfilt = maximum_filter1d(f, size=maxfiltsize)\n # Find points selected by maximum filter\n idxmax = np.array([i for i in range(len(f)) if f[i]-f_maxfilt[i] == 0.])\n\n return f_maxfilt, idxmax", "def FoldChangeFilterBasedOnMaxFC(X, data_headers, cutoff=0.5):\n XX = Linear(X.copy(), data_headers)\n X_ToMin = XX[data_headers] / XX[data_headers].min(axis=0)\n Xidx = np.any(X_ToMin.values >= X_ToMin.max().values * cutoff, axis=1)\n return X.iloc[Xidx, :]", "def spec_to_peaks(data, value, fp = iterate_structure(generate_binary_structure(rank = 2, connectivity=2), 10)):\n\n max_arr = maximum_filter(data, footprint = fp)\n return (data == max_arr) & (data > value)", "def prune(self, min_freq):\n new_forward = {}\n new_backward = [\"OOV\"]\n new_freq = [0]\n j = 1\n for i in xrange(1,len(self.backward)):\n f = self.backward[i]\n if self.freq[i] >= min_freq:\n new_forward[f] = j\n new_backward.append(f)\n new_freq.append(self.freq[i])\n j += 1\n self.forward = new_forward\n self.backward = new_backward\n self.freq = new_freq\n self.counter = j", "def rough_frequency_samples(m1, m2, flow, fmax, df_min):\n kmin = int(flow / df_min)\n kmax = int(fmax / df_min)\n k = kmin\n ksamples = []\n while k < kmax:\n ksamples.append(k)\n k += int(1.0 / rough_time_estimate(m1, m2, k * df_min) / df_min)\n ksamples.append(kmax)\n return numpy.array(ksamples)", "def _compare_indices(self, frequency_array):\n frequency_array = np.absolute(frequency_array)\n max_frequency = np.max(frequency_array)\n return max_frequency", "def mode(x: List[float]) -> List[float]:\n counts = Counter(x)\n max_count = max(counts.values())\n return [x_i for (x_i, count) in counts.items() if count == max_count]", "def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)", "def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)", "def set_maximum(freq, rg = None):\n if isinstance(rg, int):\n rg = [rg]\n elif rg is None:\n rg = _cpu.get_online_cpus()\n\n for core in rg:\n try:\n _cpu.set_max_frequencies(freq, core)\n if _verbose:\n print(f\"CPU {core} maximum frequency set to {int(freq/1000)} MHz.\")\n except Exception as e:\n print(f\"ERROR: An exception occurred for CPU {core}.\")\n print(e)", "def last_high(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int32)\n max_val = values[0]\n counter = 0\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n counter = i\n arr[i] = counter\n return arr", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def temporal_ideal_filter(tensor,low,high,fps,axis=0): \n fft=fftpack.fft(tensor,axis=axis)\n frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n if (bound_low==bound_high) and (bound_high<len(fft)-1):\n bound_high+=1\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff=fftpack.ifft(fft, axis=axis)\n \n return np.abs(iff)", "def peak(data, fft_data=None):\n return np.max(np.abs(data))", "def remove_low_info(X, max_frequency=0.99):\n selector = UniqueThreshold(max_frequency=max_frequency)\n return selector.fit_transform(X)", "def exclude_largest(self):\n mask = np.copy(self.array)\n vols = [np.sum(p) for p in self]\n ilarge = np.argmax(vols)+1 # pore types are 1-indexed\n mask[self.labels == ilarge] = 0\n self.set_array(mask, structure=self._structure)", "def apply_freq_filter(self, min_freq):\n self._apply_filter(lambda ng, freq: freq < min_freq)", "def setUpperFrequencyBound(self, new_bound: int) -> None:\n self.upper_frequency_bound = new_bound", "def remove_exceeding_samples(states_accumulator,\n policy_accumulator,\n value_prior_accumulator,\n max_samples_per_result_to_train):\n for ires in range(len(states_accumulator)):\n if len(states_accumulator[ires]) > \\\n max_samples_per_result_to_train:\n diff = len(states_accumulator[ires]) - \\\n max_samples_per_result_to_train\n states_accumulator[ires] = \\\n states_accumulator[ires][diff:]\n policy_accumulator[ires] = \\\n policy_accumulator[ires][diff:]\n value_prior_accumulator[ires] = \\\n value_prior_accumulator[ires][diff:]\n return states_accumulator, policy_accumulator, value_prior_accumulator", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def max_map(freq_map):\n\n max_val = max(freq_map.values())\n return max_val", "def peak_enhance(signal, peaks, window: int = 0.08, fs: int = processing.FS):\n window = int(fs * window)\n if not window % 2 == 0:\n window += 1\n enhanced_peaks = np.zeros(len(peaks), dtype=int)\n signal = np.abs(signal)\n for i, peak in enumerate(peaks):\n if peak < window // 2:\n enhanced_peaks[i] = np.argmax(signal[0:peak + window // 2 + 1])\n elif peak + window // 2 + 1 > signal.shape[0]:\n enhanced_peaks[i] = np.argmax(signal[peak - window // 2:]) + peak - window // 2\n else:\n # Because of one-side lag -> window: p - w * 0.25% : p + w * 75%\n enhanced_peaks[i] = np.argmax(signal[peak - window // 4:peak + 3 * window // 4]) + peak - window // 4\n\n return enhanced_peaks", "def clean(data, N_peaks, f_interval=None, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- clean')\n \n # Avoid overwritting data:\n data0 = data.copy()\n\n # Standard frequency resolution:\n T = data0[-1,0]-data[0,0]\n if f_resolution==None:\n f_resolution = 1/T\n \n # Avoid 0 as input as not peaks are found:\n if f_interval[0]==0:\n f_interval = [f_resolution, f_interval[1]]\n \n # Constants:\n SAMPLING = 1\n f_RES = 0.1*f_resolution # Standard frequency resolution\n picon = 2*np.pi*data0[:,0] # Optimization constant\n f_peaks = np.zeros(N_peaks)\n A_peaks = np.zeros(N_peaks)\n \n for i in range(N_peaks):\n k = i+1\n print '%s. Peak' %k\n\n # 1. Iteration - start finding largest peak:\n Pf_power, _, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1]) # Smaller f_int (Tuple instead of array for optimization)\n\n # Testing that the frequency resolution > sigma_f to continue:\n A_peak = P[j]\n A_av = np.mean(np.sqrt(P))\n sigma_a = 0.8*A_av\n sigma_phi = sigma_a/A_peak\n sigma_f = np.sqrt(3)*sigma_phi/(np.pi*T)\n if f_RES>sigma_f: \n \n # 2. Iteration: uses now f_res and so on..\n Pf_power, _, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n f_int = (f[j-1], f[j+1])\n \n # 3. Iteration: last\n Pf_power, P_comp, _, _, = tt.power(data0, f_int, f_RES, SAMPLING, w_column)\n f = Pf_power[:,0]; P = Pf_power[:,1]; j = np.nanargmax(P)\n fpicon = picon*f[j] # Optimization constant\n alpha = P_comp[:,0]; beta = P_comp[:,1]\n alpha0 = alpha[j]*np.sin(fpicon)\n beta0 = beta[j]* np.cos(fpicon)\n data0[:,1] = data0[:,1] - alpha0 - beta0\n f_peaks[i] = f[j]\n A_peaks[i] = np.sqrt(P[j])\n\n # Output:\n St_clean = data0\n print f_peaks, A_peaks\n return St_clean, f_peaks, A_peaks", "def lowpass_max_frequency(st, fn_fac=0.75, lp_max=40.0, config=None):\n if not st.passed:\n return st\n\n def _cap_lowpass(fc):\n freq_dict = tr.getParameter(\"corner_frequencies\")\n if freq_dict[\"lowpass\"] > fc:\n freq_dict[\"lowpass\"] = fc\n tr.setParameter(\"corner_frequencies\", freq_dict)\n\n for tr in st:\n if tr.passed:\n if tr.hasParameter(\"review\"):\n rdict = tr.getParameter(\"review\")\n if \"corner_frequencies\" in rdict:\n rev_fc_dict = rdict[\"corner_frequencies\"]\n if \"lowpass\" in rev_fc_dict:\n logging.warning(\n f\"Not applying lowpass_max_frequency for {tr} because the \"\n \"lowpass filter corner was set by manual review.\"\n )\n continue\n\n fn = 0.5 * tr.stats.sampling_rate\n lp_max_fn = fn * fn_fac\n _cap_lowpass(lp_max_fn)\n _cap_lowpass(lp_max)\n\n return st", "def max_mag_sig(df, thresh=3.0, save=True):\n ### Filter features below the threshold (set to zero)\n df[abs(df) < thresh] = 0\n ### magnitude of the absolute values of the feature vectors\n df['magnitude'] = df.abs().sum(axis=1)\n df['max_group'] = df.groupby(df.index, sort=False)['magnitude'].transform(max)\n ### select only signatures with maximal magnitude\n df = df[df['magnitude'] == df['max_group']]\n ### clean up after myself\n df.drop(df.columns[-2:], axis=1, inplace=True)\n df.columns = np.arange(1, df.shape[1]+1)\n if save:\n df.to_csv(join(FILE_PATH, \"l1000_scala_features.txt\"), delimeter='\\t')\n return df", "def getUpperFrequencyBound(self) -> int:\n return self.upper_frequency_bound", "def calculate_max_frequencies(self):\n\n # Fourier transform each frame of the file\n frame_ffts = []\n\n # The first 1 and last 2 frames are omitted since they are\n # frequently of different lengths than the rest of the file\n start_frame, end_frame = (1, len(self.frames) - 2)\n for i in range(start_frame, end_frame):\n # Decode the frame (stored as a byte array)\n # into a numpy int16 array\n # (NOTE: this assumes a 16-bit encoding, which was true\n # for the files tested, but won't necessarily always be true\n arr = np.frombuffer(self.frames[i], dtype=np.int16)\n\n # Take just the first channel, so that we only need\n # to work with one time series\n arr = arr[::self.channels]\n\n # Perform the Fourier transform\n frame_fft = np.abs(np.fft.rfft(arr))\n frame_ffts.append(frame_fft)\n\n # Convert the list of ffts to a numpy.ndarray (easier to work with)\n fft_2d = np.stack(frame_ffts)\n\n # Get frequency information\n # (Should be identical for each frame, except sometimes\n # the first and last frames, which we omitted)\n frame_freq = np.fft.rfftfreq(len(arr))\n\n # Clip the data to a smaller range of frequencies. For the files\n # tested, this range corresponded to a \"fingerprint\" region\n # where the actual melody resides.\n clip_start, clip_end = (1, 25)\n frame_freq_sub = frame_freq[clip_start:clip_end]\n fft_2d_sub = fft_2d[:, clip_start:clip_end]\n\n # Mask out low-amplitude frequencies so that we don't match to noise\n # (this is done on a proportional threshold\n # since absolute magnitudes vary)\n fft_2d_denoise = np.ma.masked_where(\n (fft_2d_sub.T < fft_2d_sub.max() * 0.25),\n fft_2d_sub.T, 0)\n\n # Finally, get the dominant frequency for each frame\n # (and mask it to omit any points where the dominant frequency is\n # just the baseline frequency)\n max_freq = frame_freq_sub[np.argmax(fft_2d_denoise, axis=0)]\n self.max_freq = np.ma.masked_where(\n max_freq == frame_freq_sub[0], max_freq)", "def maximumAbove(requestContext, seriesList, n):\n results = []\n for series in seriesList:\n if max(series) > n:\n results.append(series)\n return results", "def maximumBelow(requestContext, seriesList, n):\n\n result = []\n for series in seriesList:\n if max(series) <= n:\n result.append(series)\n return result", "def limit_weight(self, weight_max):\n # remove items with low values\n if self.total_weight > weight_max:\n items_sorted_by_fitness = sorted(self.items, key=lambda item: item.fitness, reverse=False)\n while items_sorted_by_fitness and self.total_weight > weight_max:\n least_fit_item = items_sorted_by_fitness.pop(0)\n if self.item_stats[least_fit_item.id] == 1:\n self.item_stats[least_fit_item.id] = 0\n self.update_values() # have to update each time an item is change to recompute weight", "def non_maximum_suppression_slow(boxes, confs, iou_threshold, top_k):\n idxs = np.argsort(-confs)\n selected = []\n for idx in idxs:\n if np.any(iou(boxes[idx], boxes[selected]) >= iou_threshold):\n continue\n selected.append(idx)\n if len(selected) >= top_k:\n break\n return selected", "def maglim(arr: np.ndarray, limit: float) -> np.ndarray:\n norm = np.linalg.norm(arr, axis=0)\n return arr / np.where(norm>limit, norm/limit, 1)", "def most_frequent(vector):\n\n top_vector = vector.drop(vector.columns[args.top:], axis=1)\n return top_vector", "def spectral_maxpeaks(sign, FS):\n f, ff = plotfft(sign, FS)\n diff_sig = np.diff(ff)\n\n return np.sum([1 for nd in range(len(diff_sig[:-1])) if (diff_sig[nd+1]<0 and diff_sig[nd]>0)])", "def peak_height(self):\n return np.array([max(self.waveform[ch]) for ch in range(self.nchannels)])", "def process_value(self, fft_value, peak_values):\n if fft_value < self.trigger_threshold:\n peak_values.append(0)\n else:\n if self.output_binary is True:\n peak_values.append(1)\n else:\n scaled_max = self.trigger_threshold + self.trigger_offset\n\n if fft_value < scaled_max:\n scaled_value = (scaled_max - fft_value) / scaled_max\n scaled_value *= self.scaled_max_value\n peak_values.append(math.ceil(scaled_value))\n else:\n peak_values.append(self.scaled_max_value)", "def set_filter_fq_pagb(self, threshold, take_max):\n with np.errstate(invalid='ignore'):\n prob_a_given_b = np.nan_to_num(self.existence_array / (self.existence_array + self.b_not_a_array))\n prob_b_given_a = np.nan_to_num(self.existence_array / (self.existence_array + self.a_not_b_array))\n self.filter = self._matrix_wise_max_or_min(prob_a_given_b, prob_b_given_a, take_max) > threshold", "def max():\n valid=result_alpha.F>0\n src_data.F[valid]=np.maximum( src_data.F[valid],result_data.F[valid] )", "def filter_toofew_toolong(df, min_each_group, max_length):\n df = df[~(df.question.apply(lambda x : len(x)) > max_length)]\n\n counts = df[\"index\"].value_counts()\n idxs = np.array(counts.index)\n \n # index numbers of groups with count >= mineachgroup\n list_idx = [i for i, c in zip(idxs, counts) if c >= min_each_group]\n\n # filter out data with \"index\" in list_idx \n df = df[df[\"index\"].isin(list_idx)]\n return df", "def calcLowFrequencyLimit(fls, noct, max_idx):\n # floats required due to integer division in Python 2.7\n f_lower = fls[0:max_idx] / (2.0 ** (1 / (2.0 * noct)))\n step_size = fls[1] - fls[0]\n approx_idx = f_lower / (1.0 * step_size)\n f_lower = np.round(approx_idx).astype(int)\n return f_lower", "def window_argmaxes(windows, data):\n data = np.array(data)\n argmaxes = []\n\n for window in windows:\n data_segment = data[window]\n argmaxes.append(window[np.argmax(data_segment)])\n\n return np.array(argmaxes)", "def _find_limits(hist, treated, untreated):\n # Treated Sample\n # Set the lowest frequency observed in the treated subsample\n # as the default for the lower limit of the common support\n lower_limit = np.min(treated)\n\n # The following algorithm checks for any empty histogram bins\n # (starting from 0 going up to 0.5).\n # If an empty histogram bin is found, the lower_limit is set to\n # the corresponding P(Z) value of the next bin above.\n for low in range(len(hist[0][0])):\n\n # Only consider values in the interval [0, 0.5)\n if hist[1][low] > 0.5:\n break\n\n # If the algorithm starts below the sample minimum,\n # move on to the next bin\n elif hist[1][low] < np.min(treated):\n continue\n\n else:\n # If the current bin is non-empty, we have still continuous\n # support and the sample minimum remains our lower limit\n if hist[0][0][low] > 0:\n pass\n\n # If an empty bin is found, set the lower limit to the next bin above\n # and move on to the next bin until P(Z) = 0.5 is reached\n else:\n lower_limit = hist[1][low + 1]\n\n # Untreated Sample\n # Set the highest frequency observed in the untreated subsample\n # as the default for the upper limit of the common support\n upper_limit = np.max(untreated)\n\n # The following algorithm checks for any empty histogram bins\n # (starting from 1 going down to 0.5).\n # If an empty histogram bin is found, the upper_limit is set to the\n # current next bin.\n for up in reversed(range(len(hist[0][1]))):\n\n # Only consider values in the interval (0.5, 1]\n if hist[1][up] < 0.5:\n break\n\n # If the algorithm starts above the sample maximum, move on to the next bin\n elif hist[1][up] > np.max(untreated):\n continue\n\n else:\n # If the current bin is non-empty, we have still continuous support and\n # the sample maximum remains our upper limit\n if hist[0][1][up] > 0:\n pass\n\n # If an empty bin is found, set the upper limit to the next bin below\n # and move on to the next bin until P(Z) = 0.5 is reached\n else:\n upper_limit = hist[1][up]\n\n return lower_limit, upper_limit", "def _peakdet(ts, threshold_ratio=.1):\n THRESH = threshold_ratio * (max(ts)-min(ts))\n maxima = []\n minima = []\n extrema = []\n looking_for_maximum = True\n last = 0\n for i in range(1, len(ts)):\n if looking_for_maximum:\n if ts[i] > ts[last]:\n last = i\n elif ts[i] + THRESH < ts[last]:\n maxima.append(last)\n extrema.append(last)\n looking_for_maximum = False\n else: #looking for minimum\n if ts[i] < ts[last]:\n last = i\n elif ts[i] - THRESH > ts[last]:\n minima.append(last)\n extrema.append(last)\n looking_for_maximum = True\n \n return extrema", "def set_frequencies(self, array):\n self.set_array('frequencies', array)", "def lmode(inlist):\r\n\r\n scores = pstats.unique(inlist)\r\n scores.sort()\r\n freq = []\r\n for item in scores:\r\n freq.append(inlist.count(item))\r\n maxfreq = max(freq)\r\n mode = []\r\n stillmore = 1\r\n while stillmore:\r\n try:\r\n indx = freq.index(maxfreq)\r\n mode.append(scores[indx])\r\n del freq[indx]\r\n del scores[indx]\r\n except ValueError:\r\n stillmore=0\r\n return maxfreq, mode", "def get_frequency_array(self):\n\t\treturn np.logspace(np.log10(self.converted_range[0]), np.log10(\n\t\t\tself.converted_range[1]), num=129)[:self.maximum_frequency]", "def process_frequencies(df_corpus, wdir, min_MFF, max_MFF, mode, names_MFF):\n # Normalization of the frequencies by the sum of the text\n df_corpus = df_corpus.loc[:].div(df_corpus.sum(axis='columns'), axis=\"index\")\n if mode == \"train\":\n # If we are doing a training corpus, it is easier\n \n # The dataframe gets a new summatory column that we use to order the df \n df_corpus = df_corpus.T\n df_corpus[\"sum\"]=df_corpus.sum(axis=\"columns\")\n df_corpus = df_corpus.sort_values(by=\"sum\", ascending=False)\n \n # Only a given amount of words is taken\n df_corpus = df_corpus[min_MFF:max_MFF]\n # Summatory column is deleted and the df goes back to its normal format\n del df_corpus['sum']\n df_corpus = df_corpus.T\n print(mode, \" last feature: \", df_corpus.columns[-1])\n \n elif mode == \"eval\" or mode == \"test\":\n # If we create the evaluation or the test corpus, we have to check first the features of the train corpus because the 5000 MFW of the train corpus are NOT the 5000 MFW of the test corpus.\n # TODO: I don't know if that is the best way to do it. Maybe we should calculate the total amount of features in the different corpora, get the list of the n MFF and then fill the diferent matrixs with this features.\n df_corpus = df_corpus.reindex_axis(names_MFF, axis=1)\n # Only a given amount of words is taken\n df_corpus = df_corpus.T\n df_corpus = df_corpus[min_MFF:max_MFF]\n df_corpus = df_corpus.T\n print(mode, \" last feature: \", df_corpus.columns[-1])\n\n df_corpus = df_corpus.fillna(0)\n \n # The table is saved as csv\n df_corpus.to_csv(wdir+\"freq_table.csv\", sep='\\t', encoding='utf-8', index=True)\n\n return df_corpus", "def filter_list_freq(lst, min_freq):\r\n arr = np.array(lst)\r\n items, count = np.unique(np.array(arr), return_counts=True)\r\n rem_items = items[count < min_freq]\r\n return [i for i in arr[~np.in1d(np.array(arr), rem_items)]]", "def reduced_frequency(cutoff):\n print 'reduced frequency method'\n global global_word_list\n global global_reduced_freqs\n\n doc_length = len(global_word_list)\n print 'number of words in files: {}'.format(doc_length)\n count = 0\n freq_list = count_words(global_word_list) # Calls count_words()\n\n for (w, freq) in freq_list.items():\n # a count for testing\n count += 1\n # if count % 100 == 0:\n # print '.',\n # if count % 10000 == 0:\n # print '\\n{}'.format(count)\n # end of count\n global_reduced_freqs[w] = 0\n interval = doc_length / freq\n if interval != doc_length and freq > cutoff:\n for i in range(0, doc_length, interval):\n # Checking if a word is in interval\n if w in global_word_list[i: interval + i]:\n global_reduced_freqs[w] += 1", "def _filter_features(in_recs, max_size):\n for rec in in_recs:\n final = []\n for f in rec.features:\n if len(f.location) < max_size:\n final.append(f)\n else:\n for sub in f.sub_features:\n if len(sub.location) < max_size:\n final.append(sub)\n rec.annotations = {}\n rec.features = final\n yield rec", "def max_value_each_subarray(numbers, size):\n # deque to store indices of number for each subarray in descending order by value\n index_deque = deque()\n\n for i in range(len(numbers)):\n # remove old indices in deque that is outside the current window (subarray)\n while index_deque and index_deque[0] <= i-size:\n index_deque.popleft()\n\n # make sure deque is stored descendingly\n while index_deque and numbers[i] >= numbers[index_deque[-1]]:\n index_deque.pop()\n index_deque.append(i)\n\n if i >= size - 1:\n print(numbers[index_deque[0]], end=\" \")", "def mode(self):\r\n\t\t_set\t= set(self.sample)\r\n\t\t_list\t= [self.sample.count(i) for i in _set]\r\n\t\treturn list(_set)[_list.index(max(_list))]", "def filter_max_length(x, y, max_length=MAX_LENGTH):\n return tf.logical_and(tf.size(x) <= max_length,\n tf.size(y) <= max_length)", "def findClosed(freqSet, freqSup):", "def supportDistribution(self, max): ### *** hack! remove!\n dist = dict(zip(*numpy.unique(self.supportArray, return_counts=True)))\n del(dist[0])\n for x in dist.keys():\n if x > max:\n del(dist[x])\n return dist", "def reduce_peaks(self,peaks,odf_min):\n if len(peaks)==0:\n return -1 \n if odf_min<self.iso_thr*peaks[0]:\n #remove small peaks\n ismallp=np.where(peaks<self.peak_thr*peaks[0])\n if len(ismallp[0])>0:\n l=ismallp[0][0]\n else:\n l=len(peaks)\n else:\n return -1\n return l", "def read_file_simple(self,filename):\n\n freqlim = config.cutoff*self.cutoff\n exceed_freqlim = False\n freqfile = open(filename)\n freqfile.readline() # skip head\n mode_temp = []\n for line in freqfile:\n line = line.strip()\n columns = line.split()\n n = int(columns[1])\n freq = utilities.to_float(columns[2])\n # remove frequencies above AIMS_configure.cutoff*nu_{cut-off}\n if (freq > freqlim):\n exceed_freqlim = True\n continue\n if (config.npositive and (n < 0)): continue # remove g-modes if need be\n mode_temp.append((n,int(columns[0]),freq,utilities.to_float(columns[4])))\n freqfile.close()\n self.modes = np.array(mode_temp,dtype=modetype)\n\n return exceed_freqlim", "def softmax(self):\n return self.feats", "def computeFreq(self):\n for x in self.data:\n i = 0\n for interval in self.classesInterval:\n if interval[0] <= x <= interval[1]:\n self.frequencies[i] += 1\n break\n i += 1\n\n self.minFreq = self.frequencies[0]\n self.maxFreq = self.frequencies[0]\n for f in self.frequencies:\n if f < self.minFreq:\n self.minFreq = f\n elif f > self.maxFreq:\n self.maxFreq = f", "def __cut_arrays(data_array, maximum_time, arrays_to_cut):\n\n try:\n begin_time = data_array[arrays_to_cut[0]][0][0]\n end_time = data_array[arrays_to_cut[0]][0][-1]\n delta_time = (\n data_array[arrays_to_cut[0]][0][1]\n - data_array[arrays_to_cut[0]][0][0]\n )\n total_time = end_time - begin_time\n if total_time > maximum_time:\n over_time = total_time - maximum_time\n array_elm_to_drop = int(over_time / delta_time)\n for arrays in arrays_to_cut:\n data_array[arrays][0] = data_array[arrays][0][\n array_elm_to_drop:\n ]\n data_array[arrays][1] = data_array[arrays][1][\n array_elm_to_drop:\n ]\n except:\n pass", "def peak(self):\n pass", "def get_gaussian_ff_top(self, filenames):\n amber_ffs = []\n for fname in filenames:\n amber_ffs.append(self._get_gaussian_ff_top_single(filename=fname))\n return amber_ffs", "def filterHighs( adata, bandlimit = 5000) :\n\n # TODO: compute Fourier representation of data\n\n fig = plt.figure()\n fig.add_subplot(2, 1, 1)\n plt.plot( np.real(fdata))\n plt.xlim( [0,adata.size])\n\n # TODO: filter data\n\n fig.add_subplot(2, 1, 2)\n plt.plot( np.real(fdata))\n plt.xlim( [0,adata.size])\n plt.show()\n\n # TODO: restore time domain representation of data\n\n return adata_filtered", "def majorityVoteSilence(y_Raw, amps, silenceClassNum):\n y_raw = y_Raw.copy()\n silenceThreshold = 1000\n majVotWindowLength = 2.0 #in seconds\n windowLength = 0.032\n frameLengthFloat = math.ceil(majVotWindowLength/windowLength)\n\n frameLength = int(frameLengthFloat)\n\n resArray = np.empty(y_raw.shape)\n\n n_frames = int(math.ceil(y_raw.shape[0]/frameLengthFloat))\n\n for i in range(n_frames):\n\n if ((i+1) * frameLength) < y_raw.shape[0]:\n\n tmpAmps = amps[(i * frameLength):(((i+1) * frameLength))]\n \n if tmpAmps.max() >= silenceThreshold:\n #if True:\n tmpArray = y_raw[(i * frameLength):(((i+1) * frameLength))]\n \n \"\"\" Get most frequent number in that frames: \"\"\"\n count = np.bincount(tmpArray)\n tmpMostFrequent = np.argmax(count)\n\n \"\"\" Fill all elements with most frequent number: \"\"\"\n tmpArray.fill(tmpMostFrequent)\n\n \"\"\" Write it into our result array: \"\"\"\n resArray[(i * frameLength):(((i+1) * frameLength))] = tmpArray\n \n else:\n \"\"\"If all amplitudes are below threshold, the \n sample is considered silent:\"\"\" \n resArray[(i * frameLength):(((i+1) * frameLength))] = silenceClassNum\n else:\n\n tmpAmps = amps[(i * frameLength):y_raw.shape[0]]\n\n\n if tmpAmps.max() >= silenceThreshold: \n #if True:\n tmpArray = y_raw[(i * frameLength):y_raw.shape[0]]\n \"\"\" Get most frequent number in that frames and fill \n all elements in the frame with it: \"\"\"\n count = np.bincount(tmpArray)\n tmpMostFrequent = np.argmax(count)\n\n \"\"\" Fill all elements with most frequent number: \"\"\"\n tmpArray.fill(tmpMostFrequent)\n\n \"\"\" Write it into our result array: \"\"\"\n resArray[(i * frameLength):y_raw.shape[0]] = tmpArray\n \n else:\n \"\"\"If all amplitudes are below threshold, the \n sample is considered silent:\"\"\" \n resArray[(i * frameLength):y_raw.shape[0]] = silenceClassNum\n\n return resArray", "def atmax(a,upperlimit,dimension=None,inclusive=1):\r\n if inclusive: upperfcn = N.less\r\n else: upperfcn = N.less_equal\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if upperlimit == None:\r\n upperlimit = N.maximum.reduce(N.ravel(a))+1\r\n smallest = N.minimum.reduce(N.ravel(a))\r\n ta = N.where(upperfcn(a,upperlimit),a,smallest)\r\n return N.maximum.reduce(ta,dimension)", "def max_level(data: np.ndarray) -> int:\n shape = data.shape[1:] # exclude channel dimension\n return min(shape).bit_length() - 1", "def clicked_checkbox_upper_limit(self):\n spectral_model, proxy_index, index = self._get_selected_model(True)\n spectral_model.metadata[\"is_upper_limit\"] \\\n = self.checkbox_upper_limit.isChecked()\n self.measurement_view.update_row(proxy_index.row())\n self.summarize_current_table()\n self.refresh_plots()\n return None", "def downsample_fluorescence(F, thres=20, verbose=1):\n diff_F = np.diff(F, axis=1)\n sum_F = np.sum(diff_F, axis=0)\n F = F[:,:-1]\n if verbose > 0:\n print(\n 'Downsampling fluorescence data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_F, thres))))\n \n return F[:, np.greater(sum_F, thres)]", "def _max_periods(self):\n return self.data.shape[0]", "def test_find_max_allele_frequency(self):\n \n # check for var without recorded MAF\n self.assertIsNone(self.var.find_max_allele_frequency())\n \n # check for single population\n self.var.info[\"MAX_AF\"] = \"0.005\"\n self.assertEqual(self.var.find_max_allele_frequency(), 0.005)\n \n # check for two populations\n self.var.info[\"AFR_AF\"] = \"0.01\"\n self.assertEqual(self.var.find_max_allele_frequency(), 0.01)\n \n # check for all populations\n pops = set([\"AFR_AF\", \"AMR_AF\", \"ASN_AF\", \"DDD_AF\", \"EAS_AF\", \\\n \"ESP_AF\", \"EUR_AF\", \"MAX_AF\", \"SAS_AF\", \"UK10K_cohort_AF\"])\n for pop in pops:\n self.var.info[pop] = \"0.05\"\n self.assertEqual(self.var.find_max_allele_frequency(), 0.05)", "def init(self, target):\n # Finds positive and negative peaks\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n positive_peak_ixs, peak_props = find_peaks(np.clip(target, 0, None), width=0, prominence=0, height=0)\n negative_peak_ixs, dip_props = find_peaks(np.clip(-target, 0, None), width=0, prominence=0, height=0)\n\n # Indexes for minimum and maximum center frequency\n min_fc_ix = np.argmin(np.abs(self.f - self.min_fc))\n max_fc_ix = np.argmin(np.abs(self.f - self.max_fc))\n\n # All peak indexes together\n peak_ixs = np.concatenate([positive_peak_ixs, negative_peak_ixs])\n # Exclude peak indexes which are outside of minimum and maximum center frequency\n mask = np.logical_and(peak_ixs >= min_fc_ix, peak_ixs <= max_fc_ix)\n\n if (len(positive_peak_ixs) == 0 and len(negative_peak_ixs) == 0) or np.sum(mask) == 0:\n # No peaks found\n params = []\n if self.optimize_fc:\n self.fc = self.f[(min_fc_ix + max_fc_ix) // 2]\n params.append(np.log10(self.fc))\n if self.optimize_q:\n self.q = np.sqrt(2)\n params.append(self.q)\n if self.optimize_gain:\n self.gain = 0.0\n params.append(self.gain)\n return params\n\n peak_ixs = peak_ixs[mask]\n # Properties of included peaks together\n widths = np.concatenate([peak_props['widths'], dip_props['widths']])[mask]\n heights = np.concatenate([peak_props['peak_heights'], dip_props['peak_heights']])[mask]\n # Find the biggest peak, by height AND width\n sizes = widths * heights # Size of each peak for ranking\n ixs_ix = np.argmax(sizes) # Index to indexes array which point to the biggest peak\n ix = peak_ixs[ixs_ix] # Index to f and target\n\n params = []\n if self.optimize_fc:\n self.fc = np.clip(self.f[ix], self.min_fc, self.max_fc)\n params.append(np.log10(self.fc)) # Convert to logarithmic scale for optimizer\n if self.optimize_q:\n width = widths[ixs_ix]\n # Find bandwidth which matches the peak width\n f_step = np.log2(self.f[1] / self.f[0])\n bw = np.log2((2 ** f_step) ** width)\n # Calculate quality with bandwidth\n self.q = np.sqrt(2 ** bw) / (2 ** bw - 1)\n self.q = np.clip(self.q, self.min_q, self.max_q)\n params.append(self.q)\n if self.optimize_gain:\n # Target value at center frequency\n self.gain = heights[ixs_ix] if target[ix] > 0 else -heights[ixs_ix]\n self.gain = np.clip(self.gain, self.min_gain, self.max_gain)\n params.append(self.gain)\n return params", "def margin_sampling(predictions, number):\n maxes = []\n maxesBis = []\n tmp = []\n for i in range(0, predictions.shape[0]):\n maxes.append(np.max(predictions[i]))\n tmp.append(np.delete(predictions[i], np.where(predictions[i] == np.max(predictions[i]))[0]))\n maxesBis.append(np.max(tmp[i]))\n\n val = np.array(maxes) - np.array(maxesBis)\n\n return __get_min_indexes(val, number)", "def test_2d_freq_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/2d_pipe/test.ft2\")\n assert data.shape == (2048, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1],2) == 1601.83\n assert round(data[10,22],2) == 3079.44\n lowmem_write_readback(dic,data)\n check_ppm_limits(dic,data,0,[174.84, 65.21])\n check_ppm_limits(dic,data,1,[253.90, -143.80])", "def high_count(values):\n length = len(values)\n arr = np.zeros(length, dtype=np.int16)\n count = 0\n max_val = values[0]\n for i in np.arange(1, length):\n if values[i] > max_val:\n max_val = values[i]\n count += 1\n arr[i] = count\n return arr", "def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:\n temp = np.maximum(array_list[0], array_list[1])\n all_maxs = np.maximum(temp, array_list[2])\n\n return all_maxs", "def cut(self, max_lenght):\n self.V_estimates = self.V_estimates[:max_lenght]\n super().cut(max_lenght)", "def filterbank(min_freq, max_freq, number, srate, N):\n points = numpy.linspace(M(min_freq), M(max_freq), number + 2)\n freqs = Mi(points)\n bins = freq2bin(freqs, srate, N)\n\n filters = numpy.zeros((number, N/2 +1))\n\n for i in xrange(0, number):\n bot = int(math.floor(bins[i]))\n mid = int(round(bins[i+1]))\n top = int(math.ceil(bins[i+2]))\n\n filters[i][bot:mid] = numpy.linspace(0, 1, mid - bot +1)[:-1]\n filters[i][mid:top+1] = numpy.linspace(1, 0, top - mid +1)\n\n return filters", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def set_filter_fq_pab(self, threshold):\n frequency_table = self._get_existence_frequency()\n self.filter = frequency_table > threshold", "def remove_low_freq_tokens(freq_list, tokens):\n return [t for t in tokens if freq_list[t] > 1]", "def __set_frequency_data(self, fdata):\n assert fdata.shape[-1] == self._nf\n self._in_freq = fdata\n self._in_time = None", "def filtered_families(seq_fam, minimum_count = 500, draw_histogram=False):\n \n families = Counter(fam for seq, fam in seq_fam.items())\n print('Number of families before filter: {}'.format(len(families)))\n \n filtered_fam = {fam : count for fam, count in families.items() if count >= minimum_count }\n ff_counts = np.array([*filtered_fam.values()])\n\n if draw_histogram:\n # Draw histogram\n fig, ax = plt.subplots()\n ax.hist(ff_counts)\n ax.set_ylabel('Count')\n ax.set_xlabel('Examples / Family')\n\n print('Num Examples: {} | Families: {} \\n'\n 'Mean: {:.2f} | Variance: {:.2f} \\n'\n 'Min: {} | Max: {}'.format(\n np.sum(ff_counts),\n len(filtered_fam),\n np.mean(ff_counts),\n np.var(ff_counts),\n np.min(ff_counts),\n np.max(ff_counts)))\n \n return filtered_fam", "def frfplot(freq, H, freq_min=0, freq_max=None, type=1, legend=[]):\n FLAG = type # Plot type, should libe renamed throughout.\n freq = freq.reshape(1, -1)\n lenF = freq.shape[1]\n if len(H.shape) is 1:\n H = H.reshape(1, -1)\n\n if H.shape[0] > H.shape[1]:\n H = H.T\n\n if freq_max is None:\n freq_max = np.max(freq)\n\n if freq_min is None:\n freq_min = np.min(freq)\n\n if freq_min < np.min(freq):\n freq_min = np.min(freq)\n\n if freq_min > freq_max:\n raise ValueError('freq_min must be less than freq_max.')\n\n # print(str(np.amin(freq)))\n inlow = int(lenF * (freq_min - np.amin(freq)\n ) // (np.amax(freq) - np.amin(freq)))\n\n inhigh = int(lenF * (freq_max - np.amin(freq)\n ) // (np.amax(freq) - np.amin(freq)) - 1)\n # if inlow<1,inlow=1;end\n # if inhigh>lenF,inhigh=lenF;end\n \"\"\"print('freq shape: {}'.format(freq.shape))\n print('H shape: {}'.format(H.shape))\n print('Index of low frequency: {}'.format(inlow))\n print('Index of high frequency: {}'.format(inhigh))\"\"\"\n H = H[:, inlow:inhigh]\n # print(H.shape)\n freq = freq[:, inlow:inhigh]\n mag = 20 * np.log10(np.abs(H))\n # print(mag)\n # print(mag.shape)\n minmag = np.min(mag)\n maxmag = np.max(mag)\n phase = np.unwrap(np.angle(H)) * 180 / np.pi\n # phmin_max=[min(phase)//45)*45 ceil(max(max(phase))/45)*45];\n phmin = np.amin(phase) // 45 * 45.0\n phmax = (np.amax(phase) // 45 + 1) * 45\n \"\"\"minreal = np.amin(np.real(H))\n maxreal = np.amax(np.real(H))\n minimag = np.amin(np.imag(H))\n maximag = np.amax(np.imag(H))\"\"\"\n\n if FLAG is 1:\n fig, (ax1, ax2) = plt.subplots(2, 1)\n ax1.plot(freq.T, mag.T)\n ax1.set_xlabel('Frequency (Hz)')\n ax1.set_ylabel('Mag (dB)')\n ax1.grid()\n ax1.set_xlim(xmax=freq_max, xmin=freq_min)\n ax1.set_ylim(ymax=maxmag, ymin=minmag)\n\n ax2.plot(freq.T, phase.T)\n ax2.set_xlabel('Frequency (Hz)')\n ax2.set_ylabel('Phase (deg)')\n ax2.grid()\n ax2.set_xlim(xmax=freq_max, xmin=freq_min)\n ax2.set_ylim(ymax=phmax, ymin=phmin)\n ax2.set_yticks(np.arange(phmin, (phmax + 45), 45))\n fig.tight_layout()\n\n if len(legend) > 0:\n plt.legend(legend)\n ax = (ax1, ax2)\n else:\n print(\"Sorry, that option isn't supported yet\")\n return ax\n\n \"\"\"# elif FLAG==2:\n # subplot(2,1,1)\n # semilogx(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # % Fmin,Fmax,min(mag),max(mag)\n # axis([Fmin Fmax minmag maxmag])\n\n # subplot(2,1,2)\n # semilogx(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n\n # elif FLAG==3:\n # subplot(2,1,1)\n # mag=20*log10(abs(Xfer));\n # semilogx(F*2*pi,mag)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Wmin Wmax minmag maxmag])\n # zoom on\n # subplot(2,1,2)\n # semilogx(F*2*pi,phase)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Phase (deg)')\n # grid on\n # axis([Wmin Wmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n\n # elseif FLAG==4\n # subplot(2,1,1)\n # plot(F,real(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Real')\n # grid on\n # axis([Fmin Fmax minreal maxreal])\n # zoom on\n # subplot(2,1,2)\n # plot(F,imag(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Imaginary')\n # grid on\n # axis([Fmin Fmax minimag maximag])\n # zoom on\n # elseif FLAG==5\n # subplot(1,1,1)\n # imax=round(length(F)*Fmax/max(F));\n # imin=round(length(F)*Fmin/max(F))+1;\n # plot(real(Xfer(imin:imax)),imag(Xfer(imin:imax)))\n # xlabel('Real')\n # ylabel('Imaginary')\n # grid on\n # zoom on\n # elseif FLAG==6\n # subplot(1,1,1)\n # mag=20*log10(abs(Xfer));\n # plot(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Fmin Fmax minmag maxmag])\n # zoom on\n # elseif FLAG==7\n # subplot(1,1,1)\n # plot(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n # elseif FLAG==8\n # subplot(1,1,1)\n # plot(F,real(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Real')\n # grid on\n # axis([Fmin Fmax minreal maxreal])\n # zoom on\n # elseif FLAG==9\n # subplot(1,1,1)\n # plot(F,imag(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Imaginary')\n # grid on\n # axis([Fmin Fmax minimag maximag])\n # zoom on\n # elseif FLAG==10\n # subplot(1,1,1)\n # mag=20*log10(abs(Xfer));\n # semilogx(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Fmin Fmax minmag maxmag])\n # zoom on\n # elseif FLAG==11\n # subplot(1,1,1)\n # semilogx(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n # elseif FLAG==12\n # subplot(1,1,1)\n # semilogx(F,real(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Real')\n # grid on\n # axis([Fmin Fmax minreal maxreal])\n # zoom on\n # elseif FLAG==13\n # subplot(1,1,1)\n # semilogx(F,imag(Xfer))\n # xlabel('Frequency (Hz)')\n # ylabel('Imaginary')\n # grid on\n # axis([Fmin Fmax minimag maximag])\n # zoom on\n # elseif FLAG==14\n # subplot(1,1,1)\n # mag=20*log10(abs(Xfer));\n # semilogx(F*2*pi,mag)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Wmin Wmax minmag maxmag])\n # zoom on\n # elseif FLAG==15\n # subplot(1,1,1)\n # semilogx(F*2*pi,phase)\n # xlabel('Frequency (Rad/s)')\n # ylabel('Phase (deg)')\n # grid on\n # axis([Wmin Wmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n # else\n # subplot(2,1,1)\n # mag=20*log10(abs(Xfer));\n # plot(F,mag)\n # xlabel('Frequency (Hz)')\n # ylabel('Mag (dB)')\n # grid on\n # axis([Fmin Fmax minmag maxmag])\n # zoom on\n # subplot(2,1,2)\n # plot(F,phase)\n # xlabel('Frequency (Hz)')\n # ylabel('Phase (deg)')\n # grid on\n # phmin_max=[floor(min(phase)/45)*45 ceil(max(phase)/45)*45];\n # axis([Fmin Fmax phmin_max(1) phmin_max(2)])\n # gridmin_max=round(phmin_max/90)*90;\n # set(gca,'YTick',gridmin_max(1):90:gridmin_max(2))\n # zoom on\n \"\"\"", "def _truncate_data(self):\n trunc_idx = np.argsort(self.energies)\n trunc_intensities = self.intensities[trunc_idx]\n norm_by = np.amax(trunc_intensities)\n return norm_by", "def best_crossover(feature_set, population):\n new = []\n pop = [x for y in population for x in y]\n most = pop[0]\n max = float(\"-inf\")\n all = list(set(pop))\n for j in range(0, len(feature_set)):\n for i in all:\n x = pop.count(i)\n if x > max:\n max = x\n most = i\n new.append(most)\n pop = filter(lambda a: a != most, pop)\n max = float(\"-inf\")\n\n return set(new).union(feature_set)", "def optimal_nffts(arr):\n\n return int(8 * 2 ** np.ceil(np.log2(len(arr))))", "def global_peak(apsp, sfield, peaks, n_size=5):\n\n peak_map = {p: None for p in peaks}\n corr_map = {p: None for p in peaks}\n\n for p in peaks:\n\n idx = (apsp[p, :]<=n_size)\n peak_map[p] = sfield[idx].mean()\n corr_map[p] = sfield[p]\n\n maxima = max(peak_map, key=peak_map.get)\n\n return [maxima, peak_map]", "def cut_by_count(self, min_count=1, max_count=None):\n word_count = list()\n for word, count in iteritems(self.word_count):\n word_count.append((word, count))\n\n self.clear_dictionary(keep_special=True)\n\n for word, count in word_count:\n if min_count is not None and count < min_count:\n continue\n if max_count is not None and count > max_count:\n continue\n self.add(word, count=count)\n\n print(\"After cut, Dictionary Size is %d\" % len(self))", "def peak_shaving_max_min(\n loadfactor_yd_cy_improved,\n average_yd,\n fuel_yh,\n mode_constrained\n ):\n # ------------------------------------------\n # Calculate new maximum demand for every day\n # and fueltype with help of newly adaped load factor\n # ------------------------------------------\n allowed_demand_max_d = average_yd / loadfactor_yd_cy_improved\n allowed_demand_max_d[np.isnan(allowed_demand_max_d)] = 0\n\n if mode_constrained:\n average_yd = average_yd[:, np.newaxis]\n allowed_demand_max_d = allowed_demand_max_d[:, np.newaxis]\n else:\n average_yd = average_yd[:, :, np.newaxis]\n allowed_demand_max_d = allowed_demand_max_d[:, :, np.newaxis]\n\n # ------------------------------------------\n # Calculate difference to daily mean for every hour\n # for every fueltype (hourly value - daily mean)\n # ------------------------------------------\n diff_to_mean = fuel_yh - average_yd\n\n # ------------------------\n # Calculate areas of lp below average for every day\n # all lp higher than average are set to zero\n # ------------------------\n diff_to_mean[diff_to_mean > 0] = 0\n diff_to_mean = np.abs(diff_to_mean)\n\n # Sum along all fueltypes the total fuels which are lp below average\n # Calculate percentage of total shiftable from above average to\n # below average for all hours which can take on fuel\n if mode_constrained:\n tot_area_below_mean = np.sum(diff_to_mean, axis=1) #one fueltype\n tot_area_below_mean = tot_area_below_mean[:, np.newaxis]\n else:\n tot_area_below_mean = np.sum(diff_to_mean, axis=2) #multiple fueltypes\n tot_area_below_mean = tot_area_below_mean[:, :, np.newaxis]\n\n area_below_mean_p = diff_to_mean / tot_area_below_mean\n area_below_mean_p[np.isnan(area_below_mean_p)] = 0\n\n # Calculate diff to newmax for every hour\n diff_to_max_demand_d = fuel_yh - allowed_demand_max_d\n diff_to_max_demand_d[diff_to_max_demand_d < 0] = 0\n\n # -----------------------------------------\n # Start with largest deviation to mean\n # and shift to all hours below average\n # -----------------------------------------\n # Calculate total demand which is to be shifted\n if mode_constrained:\n tot_demand_to_shift = np.sum(diff_to_max_demand_d, axis=1) # one fueltype\n tot_demand_to_shift = tot_demand_to_shift[:, np.newaxis]\n else:\n tot_demand_to_shift = np.sum(diff_to_max_demand_d, axis=2) # multiple fueltypes\n tot_demand_to_shift = tot_demand_to_shift[:, :, np.newaxis]\n\n # Add fuel below average:\n # Distribute shiftable demand to all hours which are below average\n # according to percentage contributing to lf which is below average\n shifted_fuel_yh = fuel_yh + (area_below_mean_p * tot_demand_to_shift)\n\n # Set all fuel hours whih are above max to max (substract diff)\n shifted_fuel_yh = shifted_fuel_yh - diff_to_max_demand_d\n\n return shifted_fuel_yh", "def filter_by_freq(self, low=0.5, high=40):\n self.epochs.load_data()\n self.epochs.filter(l_freq=low, h_freq=high, picks = 'all')\n return self.epochs", "def get_trimmed_features(words, num_recordings, base_path=\"\", energy_threshold=0.001):\n\n features_by_word = []\n for i in range(len(words)):\n indexes = []\n feature_array = []\n for j in range(1, num_recordings[i] + 1):\n # Determine the path\n path = base_path + words[i] + str(j) + \".wav\"\n (rate, data) = get_sig(path)\n # features is all the audio features for a given file\n features = get_st_features(data, rate)[0]\n # features[1] is total frame energies\n # energy threshold of 0.001 is arbitrary\n indexes.append(relevant_indexes(features[1], energy_threshold))\n # Add features for this specific audio file to the feature array for this word\n feature_array.append(features)\n # Finds the minimum index of all start indexes\n min_index = sorted(indexes, key=lambda x: x[0])[0][0]\n # Finds the max index of all end indexes\n max_index = sorted(indexes, key=lambda x: x[1])[::-1][0][1]\n # Debug print statements commented out\n # print(\"min, max index for word\", words[i])\n # print(min_index, max_index)\n # Only take the frames between min index and max index for each sample word\n # Note: Potential for a bug; if maxIndex is outside the length of its frame array\n # To fix, need to pad the shorter recordings with extra data\n features_by_word.append([x[0:34, min_index:max_index].transpose() for x in feature_array])\n # print(numpy.shape(features_by_word[i]))\n # features_by_word is an array of len(words) cells\n # Each cell has num_recordings[i] elements corresponding to the number of recordings of each word words[i]\n # Each recording has the same number of frames for a given word, as determined by minIndex and maxIndex\n # for a given word.\n # Finally, each frame contains the 34 features from that frame's raw data samples\n return features_by_word", "def filter_features(Y, N):\n most_varying_feature_idx = np.argsort(np.std(Y, axis=0))[-N:]\n filt_Y = Y[:, most_varying_feature_idx]\n return filt_Y, most_varying_feature_idx", "def _multiple_values_max(self, maps, threshold):\r\n max_val = np.zeros((maps.shape[0], maps.shape[1]), dtype=np.float)\r\n for i in range(maps.shape[1]):\r\n cmin = np.min(maps[:,i])\r\n cmax = np.max(maps[:,i])\r\n limit = cmax - (cmax - cmin) * threshold[i]\r\n min_mask = maps[:,i] <= limit\r\n max_mask = maps[:,i] > limit\r\n # for an abundance map the delta is around [-1..1],\r\n # but it can be outside this interval, it's something\r\n # to test\r\n # a guard with a -10 value maybe ok.\r\n rmin = min_mask * -10\r\n max_val[:,i] = max_mask * maps[:,i] + rmin\r\n max_vec = np.max(max_val, axis=1)\r\n max_mask = max_vec > -10\r\n argmax = np.argmax(max_val, axis=1)\r\n return (argmax + 1) * max_mask" ]
[ "0.6321469", "0.5905518", "0.5899971", "0.5845656", "0.5827781", "0.5711425", "0.55240804", "0.5505938", "0.54641736", "0.5451525", "0.5440867", "0.5408901", "0.5367295", "0.53634137", "0.5352045", "0.5324692", "0.53187746", "0.53122324", "0.5303038", "0.52815557", "0.5275447", "0.52573454", "0.52492553", "0.5232467", "0.52100176", "0.51902336", "0.5171352", "0.51594007", "0.51539475", "0.51530427", "0.51486945", "0.5144887", "0.5141171", "0.5131185", "0.5130988", "0.51292", "0.5127953", "0.51264715", "0.5123691", "0.5099836", "0.5098857", "0.50985545", "0.50959015", "0.5076074", "0.50756997", "0.50752085", "0.507497", "0.5065315", "0.5056968", "0.5048483", "0.5041968", "0.5039816", "0.503399", "0.50193906", "0.50135505", "0.50093895", "0.50029224", "0.49991444", "0.4996492", "0.49887022", "0.49884048", "0.498249", "0.4978405", "0.49744567", "0.4965099", "0.49625435", "0.4960726", "0.4950878", "0.4944241", "0.49404126", "0.4930877", "0.49258068", "0.4912584", "0.49060616", "0.490079", "0.48990592", "0.48981413", "0.4892387", "0.48814243", "0.48793334", "0.4876874", "0.48761243", "0.48738834", "0.4862069", "0.4862069", "0.4859519", "0.48508808", "0.48325917", "0.48274648", "0.48187566", "0.4818402", "0.48175654", "0.48171455", "0.48089412", "0.48017672", "0.47913823", "0.47889286", "0.47865447", "0.47842005", "0.47801107" ]
0.72728205
0
Check if song is already transformed into temp.
def check_wav(song, source_folder, temp_folder, encoder='mpg123'): # Name of files song_name, extension = os.path.splitext(song) mp3_file = os.path.join(source_folder, song) if '.wav' != extension: wav_file = os.path.join(temp_folder, song_name + '.wav') try: if not os.path.isfile(wav_file): mp3_to_wav( mp3_file=mp3_file, wav_file=wav_file, encoder=encoder) else: pass except MemoryError: logger.error('MemoryError: %s MP3 couldn\'t be transformed into WAV', song_name) else: # Already a wav file copyfile(mp3_file, os.path.join(temp_folder, song_name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isTemp(self,object):\n return (object in self.tempObjects)", "def test_transform_track_album_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def song_check(song):\n msg = choose_song(song)\n return msg != ERROR", "def test_transform_track_album_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 2',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, False)", "def test_apply_transform_single_album_no_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)", "def test_transform_track_title_based_on_album_title_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_single_track_with_transform(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.app.load_data()\n\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Artist 2', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 2)\n self.assertEqual(album.totaltracks, 1)\n self.assertEqual(album.last_transform, tf_pk)", "def test_apply_transform_single_track_no_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)", "def test_conversion():\n file = 'Sherlock OST/SHERlocked.mp3'\n new_name = cr.SingleSong_conversion(file)\n assert new_name[0].split('/')[-1] in os.listdir(os.path.split(file)[0])", "def test_repair_file(self):\n\n audio_path = self.converter.audio\n self.assertTrue(audio_path.endswith('.wav'))\n # Make sure it can be loaded in moviepy\n clip = AudioFileClip(audio_path)", "def test_no_transform_track_with_already_applied_transform(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)", "def replace(self):\n if self.success is False:\n raise TaskError('not ready')\n try:\n temp_src = '/tmp/' + str(random.randint(10000, 99999)) + '.mp3'\n os.move(self.source, temp_src)\n os.move(self.target, self.source)\n os.unlink(temp_src)\n except OSError as e:\n print(e)", "def test_transform_track_album_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 3', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 3')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_no_transform_track_with_song_with_transform_id_greater(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.transformed, False)", "def test_no_transform_album_with_already_applied_transform(self):\n album = Album(artist='Artist', album='Album', last_transform=1)\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.transformed, False)\n\n tflist.apply_album(album)\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.transformed, False)", "def cut_and_eq(song_name):\r\n print(\"[{}] STATUS: Loading...\".format(song_name))\r\n sound_file = AudioSegment.from_mp3(song_name)\r\n print(\"[{}] STATUS: Loaded, now processing...\".format(song_name))\r\n sound_file = match_target_amplitude(sound_file, TARGET_VOLUME) # Amplify beforehand to prevent over-zealous cutting\r\n chunks = split_on_silence(sound_file, SILENCE_CUTOFF, THRESHOLD, keep_silence=ACCEPTABLE_SILENCE)\r\n\r\n if len(chunks) > 1:\r\n print(\"[{}] ERROR: Too many chunks ({}) cannot export\".format(song_name, len(chunks)))\r\n return song_name\r\n else:\r\n output = AudioSegment.empty()\r\n for chunk in chunks:\r\n output += chunk\r\n\r\n new_name = song_name.split(\".\")[0]\r\n print(\"[{}] STATUS: Processed, now exporting...\".format(song_name))\r\n metadata = mediainfo(song_name).get('TAG',{})\r\n output.export(OUTPUT_NAME_FORMAT.format(new_name), format=OUTPUT_FORMAT, tags=metadata)\r\n print(\"[{}] STATUS: Exported to {} - cleaned.{}\".format(song_name, new_name, OUTPUT_FORMAT))\r\n return None", "def check_already_extracted(video_parts):\n filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(output_dir,\n filename_no_ext + '-0030.jpg')))", "def test_transform_track_title_based_on_album_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 3',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 3')\n self.assertEqual(track.transformed, False)", "def _do_move(self, artist, album, song):\n try:\n move_to = \"{0}{1}/{2}/\".format(self.dupe_dir, \n artist, album)\n if not os.path.exists(move_to):\n os.makedirs(move_to)\n \n shutil.move(song['path'], move_to)\n self.moved.append(song)\n return 1\n except:\n self.logger.error(\"Could not move file: {0}\".format(str(song['path'])))\n return 0", "def test_transform_track_title_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_album_based_on_artist_album_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_artist_based_on_artist_album_no_match_album(self):\n track = Track(artist='Artist', album='Album 2', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def remix(self):\n self.log(\"Looking up track...\", 5)\n self.getTag()\n self.processArt()\n\n self.log(\"Listening to %s...\" % ('\"%s\"' % self.tag['title'] if 'title' in self.tag else 'song'), 5)\n self.original = audio.LocalAudioFile(self.infile, False)\n if not 'title' in self.tag:\n self.detectSong(self.original)\n self.st = FastModify()\n \n self.log(\"Choosing key and tempo...\", 10)\n self.tonic = self.original.analysis.key['value']\n self.tempo = self.original.analysis.tempo['value']\n self.bars = self.original.analysis.bars\n self.beats = self.original.analysis.beats\n self.sections = self.original.analysis.sections\n self.tag['key'] = self.keys[self.tonic] if self.tonic >= 0 and self.tonic < 12 else '?'\n self.tag['tempo'] = self.template['tempo']\n\n self.log(\"Arranging intro...\", 40.0/(len(self.sections) + 1))\n self.partialEncode(self.compileIntro())\n\n past_progress = 0\n hats = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n\n i = 0 # Required if there are no sections\n for i, section in enumerate(self.sections):\n self.log(\"Arranging section %s of %s...\" % (i+1, len(self.sections)), 40.0/(len(self.sections) + 1))\n a, b = self.compileSection(i, section, hats)\n self.partialEncode(a)\n self.partialEncode(b)\n del a, b\n del hats\n self.original.unload()\n\n self.log(\"Adding ending...\", 5)\n self.partialEncode(\n audio.AudioData(\n self.sample_path + self.template['splash_ends'][(i + 1) % len(self.template['splash_ends'])],\n sampleRate=44100,\n numChannels=2,\n verbose=False\n )\n )\n \n self.log(\"Mixing...\", 5)\n self.mixwav(self.tempfile)\n\n if self.deleteOriginal:\n try:\n unlink(self.infile)\n except:\n pass # File could have been deleted by an eager cleanup script\n\n self.log(\"Mastering...\", 5)\n self.lame(self.tempfile, self.outfile)\n unlink(self.tempfile)\n \n self.log(\"Adding artwork...\", 20)\n self.updateTags(titleSuffix = \" (Wub Machine Remix)\")\n \n return self.outfile", "def test_apply_transform_single_track_match(self):\n track = Track(artist='Artist', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'New Artist')", "def test_transform_album_no_changes(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1, cond_artist=True, change_artist=True,\n pattern_artist='Foo', to_artist='Bar')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)", "def test_get_all_need_transform_one_track_another_already_applied(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n track = Track(artist='Artist', album='Album', title='Title')\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 2)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 1)\n self.assertEqual(tracks[0].pk, pk)", "def _alreadyProcessed(self, tiltseriesdata):\n\t\tseriesname = \"series%3d\" % (tiltseriesdata['number'])\n\t\tself._reloadDoneDict()\n\t\tif seriesname in self.donedict:\n\t\t\tif not self.stats['lastseries_skipped']:\n\t\t\t\tsys.stderr.write(\"skipping series\\n\")\n\t\t\telif self.stats['skipcount'] % 80 == 0:\n\t\t\t\tsys.stderr.write(\".\\n\")\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\tself.stats['lastseries_skipped'] = True\n\t\t\tself.stats['skipcount'] += 1\n\t\t\tself.stats['count'] += 1\n\t\t\treturn True\n\t\telse:\n\t\t\tself.stats['waittime'] = 0\n\t\t\tif self.stats['lastseries_skipped']:\n\t\t\t\tapDisplay.printMsg(\"\\nskipped\"+str(self.stats['skipcount'])+\" series so far\")\n\t\t\tself.stats['lastseries_skipped']=False\n\t\t\treturn False\n\t\treturn False", "async def async_is_playing_new_track(self):\n if self._playing_mediabrowser and self._media_source_uri is not None:\n # don't trigger new track flag for local mediabrowser files\n return False\n \n if self._icecast_name != None:\n import unicodedata\n artmed = unicodedata.normalize('NFKD', str(self._media_artist) + str(self._media_title)).lower()\n artmedd = u\"\".join([c for c in artmed if not unicodedata.combining(c)])\n if artmedd.find(self._icecast_name.lower()) != -1 or artmedd.find(self._source.lower()) != -1:\n # don't trigger new track flag for icecast streams where track name contains station name or source name; save some energy by not quering last.fm with this\n self._media_image_url = None\n return False\n\n if self._media_artist != self._media_prev_artist or self._media_title != self._media_prev_title:\n return True\n else:\n return False", "def test_transform_track_title_based_on_artist_album_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_album_album_based_on_artist_album_no_match_album(self):\n album = Album(artist='Artist', album='Album 3',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album 3')\n self.assertEqual(album.transformed, False)", "def test_transform_track_artist_based_on_artist_album_no_match_artist(self):\n track = Track(artist='Artist 3', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 3')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def _move_temporary(self, url: str) -> bool:\n if self._file_path.exists():\n info('File already exists')\n return True\n # If download complete, make file permanent\n move(self._temp_path, self._file_path)\n info(\"DOWNLOADED: %s TO %s\" % (url, self._file_path))\n return True", "def test_transform_track_album_based_on_album_title_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_album=True, to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def test_apply_transform_single_album_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='New Artist')\n self.assertNotEqual(tf_pk, 0)\n self.app.load_data()\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['alartist'], 'New Artist')", "def test_transform_album_artist_based_on_artist_album_no_match_album(self):\n album = Album(artist='Artist', album='Album 2',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, False)", "def test_transform_track_no_changes(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1, cond_artist=True, change_artist=True,\n pattern_artist='Foo', to_artist='Bar')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.ensemble, 'Ensemble')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.transformed, False)", "def test_transform_track_title_based_on_album_title_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_album=True, pattern_album = 'Album',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def test_transform_album_album_based_on_artist_album_no_match_artist(self):\n album = Album(artist='Artist 2', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)", "def compare_to_tape(self, track_number, word):\n track_cache = self.cache[track_number]\n it_word = iter(word)\n\n # check letters in cache\n if any(letter_on_track != next(it_word)\n for letter_on_track in track_cache):\n return False\n\n # check letters not already cached\n for letter_in_word in it_word:\n successful, letter_on_track = self.read(track_number)\n if not successful:\n return False\n if letter_in_word != letter_on_track:\n return False\n return True", "def check_already_extracted(video_parts):\n train_or_test, classname, filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(\"/data/niteshku001/Ravdess\", train_or_test, classname,\n filename_no_ext + '-0001.jpg')))", "def test_adding_existing_album_with_dependant_transform(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n album_id = self.add_album(artist='Artist 2', album='Album')\n self.app.load_data()\n self.assertEqual(self.get_album_count(), 1)\n\n self.add_mp3()\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('Would update to', status)\n self.assertEqual(self.get_album_count(), 1)", "def artist_song_first_pass(self):\n log.debug(\"Called artist_song_first_pass for %s.\" % self.name)\n self.success = False\n song_potentials = []\n potential_count = 0\n _min = 20\n\n def generate_potentials(count):\n results = self.sp.search(q= 'artist: ' + self.artist + ' track: ' + self.song, type='track', limit=2)\n if results['tracks']['total'] >= 1:\n for items in results['tracks']['items']:\n song_potentials.append([items['name'], items['uri']])\n for artist in items['artists']:\n song_potentials[count].append(artist['name'])\n song_potentials[count].append(artist['uri'])\n count += 1\n\n for splitter in splitters:\n if self.name_clean.count(splitter) == 1:\n self.artist, self.song = self.name_clean.split(splitter)\n generate_potentials(potential_count)\n elif self.name_clean.count(splitter) > 1:\n for x in range(0, self.name_clean.count(splitter)):\n self.artist, self.song = split(self.name_clean, splitter, x)\n generate_potentials(potential_count)\n\n cutoff = matching(self.name_clean)\n log.debug(\"%s potential matches found for %d\" % (len(song_potentials), id(self)))\n log.debug(\"Potentials: %s\" % song_potentials)\n for potential in song_potentials:\n log.debug(potential)\n log.debug(self.name_clean)\n log.debug(str(potential[2]) + \" \" + str(potential[0]))\n lev = levenshtein(self.name_clean, str.lower(str(potential[2])) + \" \" + str.lower(str(potential[0])))\n log.debug(lev)\n if lev < _min:\n _min = lev\n self.artist = potential[2]\n self.artist_uri = potential[3]\n self.song = potential[0]\n self.song_uri = potential[1]\n\n if self.artist_uri and self.song_uri is not None:\n log.debug(\"Cutoff point for %s : %d\" % (id(self), cutoff))\n log.debug(\"Current Min: {}\".format(_min))\n log.debug(\"Levenshtein distance between {} and {} : {}\"\n .format(self.name_clean, self.artist + self.song,\n levenshtein(self.name, self.artist + \" \" + self.song)))\n if int(_min) > cutoff:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None\n else:\n log.debug(\"Method artist_song_first_pass succeeded for %s.\" % self.name)\n self.success = True\n else:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None", "def test_transform_track_artist_based_on_artist_title_no_match_artist(self):\n track = Track(artist='Artist 3', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_title=True, pattern_title='Title',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 3')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_artist_based_on_artist_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 2',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_title=True, pattern_title='Title',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, False)", "def test_transform_album_artist_based_on_artist_album_no_match_artist(self):\n album = Album(artist='Artist 3', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist 3')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)", "def compare_to_tape(self, track_number, word):\n track_cache = self.cache[track_number]\n it_word = iter(word)\n\n # process letters in cache\n for _ in track_cache:\n next(it_word)\n\n # check letters not already cached\n for letter_in_word in it_word:\n successful, letter_on_track = self.read(track_number)\n if not successful:\n return False\n return True", "def test_log_track_with_transform_and_nonmatching_album(self):\n\n album_id = self.add_album(artist='Artist', album='Album')\n self.assertNotEqual(album_id, 0)\n\n tf_id = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_id, 0)\n self.app.load_data()\n\n track = self.app.log_track(self.track_obj('silence.mp3'))\n self.assertEqual(self.get_track_count(), 1)\n track_row = self.get_track_by_id(track.pk)\n self.assertNotEqual(track_row, None)\n self.assertEqual(track_row['lasttransform'], tf_id)\n self.assertEqual(track_row['artist'], 'Artist 2')\n self.assertEqual(track_row['album'], 'Album')\n self.assertEqual(track_row['title'], 'Track')\n self.assertEqual(track_row['source'], 'xmms')\n self.assertEqual(track_row['album_id'], 0)", "def lyrics_note_is_same_as_original():\n pass", "def test_transform_track_title_based_on_artist_title_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def transform(self, source, target):\n return False", "def test_transform_track_album_based_on_artist_album_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def test_transform_track_title_based_on_artist_title_no_match_title(self):\n track = Track(artist='Artist', album='Album', title='Title 3',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 3')\n self.assertEqual(track.transformed, False)", "def test_transform_track_title_based_on_artist_album_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, pattern_album = 'Album',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def test_transform_album_empty_transform(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n change_album=True, pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)", "def dict_is_song(info_dict):\n if \"full album\" in info_dict[\"title\"].lower():\n return False\n if int(info_dict[\"duration\"]) > 7200:\n return False\n return True", "def test_transform_track_change_album(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1, cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.transformed, True)", "def test_transform_track_empty_transform(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1,\n change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n change_album=True, pattern_album='Album', to_album='Album 2',\n change_title=True, pattern_title='Title', to_title='Title 2',\n change_ensemble=True, pattern_ensemble='Ensemble', to_ensemble='Ensemble 2',\n change_composer=True, pattern_composer='Composer', to_composer='Composer 2',\n change_conductor=True, pattern_conductor='Conductor', to_conductor='Conductor 2',\n )\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.ensemble, 'Ensemble')\n self.assertEqual(track.conductor, 'Conductor')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.transformed, False)", "def TurnCheck(field, dir):\n global sym\n sym = 1\n temp_list = field[:]\n field_copy = field[:]\n if temp_list == Transform(field_copy, dir):\n sym = 0\n return False\n else:\n sym = 0\n return True", "def move(self):\n for artist in self.audio_dict:\n for album in self.audio_dict[artist]:\n for songlist in self.audio_dict[artist][album]:\n if len(self.audio_dict[artist][album][songlist]) > 1:\n \n # track the song that wont be deleted\n song_to_keep = {}\n # track bitrate through songlist\n highest_bitrate = 0\n # find the highest bitrate\n for song in self.audio_dict[artist][album][songlist]:\n if song['bitrate'] > highest_bitrate:\n highest_bitrate = song['bitrate']\n song_to_keep = song\n # flag files for deletion \n for song in self.audio_dict[artist][album][songlist]:\n if song != song_to_keep:\n self._do_move(artist, album, song)\n \n return self", "def test_get_all_need_transform_no_albums_matched(self):\n orig_album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120, last_transform=1)\n pk = orig_album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 1)\n\n self.assertEqual(Album.get_all_need_transform(self.app.curs, 1), [])", "def _check_orig(self):\n if self.is_dir():\n self._orig = False\n return\n\n parts = self._path.split('.')\n try:\n if parts[-1] == 'tgz':\n self._orig = True\n elif parts[-2] == 'tar':\n if (parts[-1] in Compressor.Opts or\n parts[-1] in Compressor.Aliases):\n self._orig = True\n except IndexError:\n self._orig = False", "def test_get_all_need_transform_no_tracks_matched(self):\n track = Track(artist='Artist', album='Album', title='Title', last_transform=1)\n pk = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n self.assertEqual(self.get_track_count(), 1)\n\n tracks = Track.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(tracks), 0)", "def test_two_tracks_mismatched_album(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_album=True, album='Album 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, False)\n self.assertIn('changed to', status)\n self.assertEqual(self.get_album_count(), 0)", "def test_transform_track_artist_based_on_artist_album_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def checkHash(song):\n\tsql = \"Select path, filename, hash from songs where hash = '\" + song.hash + \"';\"\n\tc, conn = connect()\n\tc.execute(sql)\n\tnotexists = True\n\tfor (path, filename, hash) in c:\n\t\tif hash == song.hash:\n\t\t\tnotexists = False\n\t\telse:\n\t\t\tnotexists = True\n\treturn notexists", "def _soundhelper(self):\n self._click()\n if self._last is None and self._touch is not None:\n if self._soundImage.contains(self._touch.x, self._touch.y):\n self._sound = not self._sound\n if self._soundImage.source == 'whitevolumeon.png':\n self._soundImage.source = 'whitevolumenull.png'\n else:\n self._soundImage.source = 'whitevolumeon.png'", "def check_music(self):\n\t\tif self.menu_music_played == 0:\n\t\t\tif self.initial_menu_music_element == self.next_menu_music_element:\n\t\t\t\tself.ingame_music.extend(self.menu_music)\n\t\t\t\tself.music = self.ingame_music\n\t\t\t\tself.music_rand_element = random.randint(0, len(self.ingame_music) - 1)\n\t\t\t\tself.menu_music_played = 1\n\t\t\telse:\n\t\t\t\tself.music = self.menu_music\n\n\t\tif hasattr(self, '_bgsound_old_byte_pos') and hasattr(self, '_bgsound_old_sample_pos'):\n\t\t\tif self._bgsound_old_byte_pos == self.emitter['bgsound'].getCursor(fife.SD_BYTE_POS) and self._bgsound_old_sample_pos == self.emitter['bgsound'].getCursor(fife.SD_SAMPLE_POS):\n\t\t\t\tself.music_rand_element = self.music_rand_element + 1 if \\\n\t\t\t\t\t self.music_rand_element + 1 < len(self.music) else 0\n\t\t\t\tself.play_sound('bgsound', self.music[self.music_rand_element])\n\t\t\t\tif self.menu_music_played == 0:\n\t\t\t\t\tself.next_menu_music_element = self.music_rand_element\n\n\t\tself._bgsound_old_byte_pos, self._bgsound_old_sample_pos = \\\n\t\t\t self.emitter['bgsound'].getCursor(fife.SD_BYTE_POS), \\\n\t\t\t self.emitter['bgsound'].getCursor(fife.SD_SAMPLE_POS)", "def has_moved(self):\n return bool(self.rename_phases)", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def is_same_song(a, b):\n\n for k in 'album', 'title', 'artists':\n if a[k] != b[k]:\n return False\n\n return True", "def test_transform_album_full_transform(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n cond_album=True, change_album=True, pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, True)", "def check_already_extracted(video_parts):\n train_or_test, filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join('demo_frames',\n filename_no_ext + '-0001.jpg')))", "def process(self):\n try:\n existing_movie = os.listdir(self.movie_root_path)\n except:\n os.makedirs(self.movie_root_path)\n existing_movie = os.listdir(self.movie_root_path)\n\n if not [movie for movie in existing_movie if self.title.lower() in movie.lower()]:\n if self.has_video_extension:\n self.move_media()\n else:\n self.extract_media()\n else:\n Logger.log('[-] %s already exists. Skipping...' % self.title)", "def _process_audio(self, root: str, id: str) -> bool:\n path = os.path.join(root, id + \".flac\")\n si, _ = torchaudio.info(path)\n duration = (si.length / si.channels) / si.rate\n if self.max_duration is not None and duration > self.max_duration:\n return True\n self.paths.append(path)\n self.durations.append(duration)\n return False", "def test_transform_track_artist_based_on_artist_title_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_title=True, pattern_title='Title',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, True)", "def test_two_transforms_track_with_one_already_applied(self):\n track = Track(artist='Artist', title='Title')\n track.last_transform = 1\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(2,\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2',\n ))\n\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)\n\n tflist.apply_track(track)\n\n self.assertEqual(track.last_transform, 2)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def test_apply_transform_two_tracks_one_matches(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_pk, 0)\n\n self.app.load_data()\n track = Track(artist='Artist', title='Title', last_transform=tf_pk)\n pk_first = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n track = Track(artist='Artist', title='Title')\n pk_second = track.insert(self.app.db,\n self.app.curs,\n 'xmms',\n datetime.datetime.now())\n\n row = self.get_track_by_id(pk_first)\n self.assertEqual(row['lasttransform'], tf_pk)\n row = self.get_track_by_id(pk_second)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_track_by_id(pk_first)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'Artist')\n\n row = self.get_track_by_id(pk_second)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['artist'], 'Artist 2')", "def test_transform_track_composer_based_on_artist_composer_no_match_composer(self):\n track = Track(artist='Artist', album='Album', title='Title',\n composer='Composer 3',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_composer=True, change_composer=True,\n pattern_composer='Composer', to_composer='Composer 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.composer, 'Composer 3')\n self.assertEqual(track.transformed, False)", "def test_transform_track_composer_based_on_artist_composer_no_match_artist(self):\n track = Track(artist='Artist 2', album='Album', title='Title',\n composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_composer=True, change_composer=True,\n pattern_composer='Composer', to_composer='Composer 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.composer, 'Composer')\n self.assertEqual(track.title, 'Title')\n self.assertEqual(track.transformed, False)", "def test_transform_track_title_based_on_artist_title_match(self):\n track = Track(artist='Artist', album='Album', title='Title',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_title=True, pattern_title='Title',\n change_title=True, to_title='Title 2')\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist')\n self.assertEqual(track.album, 'Album')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.transformed, True)", "def __already_there(self, pad_file, strip, transfomation_class, T, V0, skip=False):\n\n if skip:\n return False # If you want to skip this check and force the table to move\n\n pos = self.get_current_position() # table position\n pad_pos = [\n float(x) for x in pad_file[\"data\"][str(strip)]\n ] # where it should be in sensor system\n table_pos = transfomation_class.vector_trans(\n pad_pos, T, V0\n ) # Transforms the vektor to the right basis\n deltapos = [abs(x1 - x2) for (x1, x2) in zip(pos, table_pos)]\n\n for delta in deltapos:\n if delta >= 0.5: # Checks if the position is correct in a range of 0.5 um\n return False\n return True", "def song_already_exists(song, playlist_id):\n print('Song {title} already in playlist {playlist_id}, adding has been skipped.'\n .format(title=song.title,\n playlist_id=playlist_id))\n pass", "def _save_results(self, src, dst):\n\t\tlog = self.log\n\n\t\tif not os.path.isfile(src):\n\t\t\tlog.error(\"The folder \\\"%s\\\" doesn't exist.\" % src)\n\t\t\treturn False\n\n\t\tif not os.path.exists(dst):\n\t\t\ttry:\n\t\t\t\tos.makedirs(dst)\n\t\t\texcept (IOError, os.error), why:\n\t\t\t\tlog.error(\"Unable to create directory \\\"%s\\\": %s\" % (dst, why))\n\t\t\t\treturn False\n\t\telse:\n\t\t\tlog.error(\"The folder \\\"%s\\\" already exists. It should be used \" \\\n\t\t\t\t\t \"for storing results of task with ID %s. \" \\\n\t\t\t\t\t \"Have you deleted Cuckoo's database?\"\n\t\t\t\t\t % (dst, self.task[\"id\"]))\n\t\t\treturn False\n\t\ttry:\n\t\t\ttar = tarfile.open(src, \"r:gz\")\n\t\t\ttar.extractall(path = dst)\n\t\t\ttotal = len(tar.getmembers())\n\t\t\tlog.debug(\"Extracted %d elements\" % total)\n\t\texcept:\n\t\t\tlog.error(\"Trouble extracting '%s'\" % src)\n\t\t\treturn False\n\t\treturn True", "def test_apply_transform_two_albums_one_matches(self):\n tf_pk = self.add_transform(cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2')\n self.assertNotEqual(tf_pk, 0)\n\n self.app.load_data()\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120, last_transform=tf_pk)\n pk_first = album.insert(self.app.db, self.app.curs)\n album = Album(artist='Artist', album='Album 2',\n totaltracks=1, totalseconds=120)\n pk_second = album.insert(self.app.db, self.app.curs)\n\n row = self.get_album_by_id(pk_first)\n self.assertEqual(row['lasttransform'], tf_pk)\n row = self.get_album_by_id(pk_second)\n self.assertEqual(row['lasttransform'], 0)\n\n for line in self.app.apply_transforms():\n pass\n\n row = self.get_album_by_id(pk_first)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['alartist'], 'Artist')\n\n row = self.get_album_by_id(pk_second)\n self.assertEqual(row['lasttransform'], tf_pk)\n self.assertEqual(row['alartist'], 'Artist 2')", "def test_retrieve_original_to_temp(self):\n # put file\n self.prepare_uploads()\n backend = BackendS3(**self.config)\n src = os.path.join(self.upload_path, 'demo-test.tar.gz')\n id = utils.generate_id('demo-test.tar.gz')\n backend.put(src, id)\n\n # retrieve file\n result = backend.retrieve_original(id, self.tmp_path)\n expected_dst = os.path.join(self.tmp_path, id, 'demo-test.tar.gz')\n self.assertEquals(expected_dst, result)\n self.assertTrue(os.path.exists(expected_dst))", "def precheck(self):\n if (not dfs.exists(self.outputpath)):\n logger.debug(\"precheck(%s): outputpath %s does not exist, ready to run.\" \n % (self, self.outputpath))\n return 'ready'\n inTSs = [dfs.modtime(file) for file in self.inputpaths]\n outTS = dfs.modtime(self.outputpath)\n newer = reduce(lambda x,y: x or y, [(inTS>outTS) for inTS in inTSs])\n logger.debug(\"Input timestamps: %s\" % inTSs)\n logger.debug(\"Output timestamp: %s\" % outTS)\n if newer:\n logger.debug(\"At least one input file is newer than outputfile, ready to run.\")\n dfs.delete(self.outputpath)\n return 'ready'\n else:\n logger.debug(\"All input files are newer than outputfile, skipping.\")\n return 'skip'", "def test_transform_album_album_based_on_artist_album_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist='Artist',\n cond_album=True, change_album=True,\n pattern_album='Album', to_album='Album 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, True)", "def handle_track_conversion_exception(track_path, track_idx, error):\n global active_track_idx\n track_name = os.path.basename(track_path)\n\n if ((error == \"Unrecognized audio format\" \n or error == \"Position not implemented for music type\")\n and tkinter.messagebox.askyesno(\"Convert file\", \"Can't play this audio format, convert to .ogg?\")):\n converted_track, converted_track_path = convert_track(track_path)\n if converted_track is not None:\n track_name = os.path.basename(converted_track_path)\n playlist_box.delete(track_idx)\n playlist_box.insert(track_idx, track_name)\n playlist[track_idx] = converted_track_path\n playlist_box.selection_set(track_idx)\n play_pause(track_idx=track_idx)\n else:\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=f\"Audio file incorrect : {track_name}, Please chose another file!\")\n # Uselect the incorrect track\n playlist_box.selection_clear(track_idx)\n # Select the previous active track\n playlist_box.selection_set(active_track_idx)\n playlist_box.see(active_track_idx)\n else:\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=f\"Audio file incorrect : {track_name}, Please chose another file!\")\n # Uselect the incorrect track\n playlist_box.selection_clear(track_idx)\n # Select the previous active track\n playlist_box.selection_set(active_track_idx)\n playlist_box.see(active_track_idx)", "def has_album_cover(audio) -> bool:\r\n if type(audio) == str: audio: File = File(audio)\r\n try:\r\n fix_cover(audio)\r\n if 'APIC:' in audio:\r\n apic: mutagen.id3.APIC = audio['APIC:']\r\n if apic.encoding != Encoding.LATIN1:\r\n apic.encoding = Encoding.LATIN1\r\n audio['APIC:'] = apic\r\n audio.save()\r\n return True\r\n except KeyError: audio.add_tags()\r\n return False", "def song_just_finished(self):\n uart_return_code = self.mp3Player.uart.readline()\n _debug(\"uart_return_code\", uart_return_code)\n # uart_return_code == b'~\\xff\\x06=\\x00\\x00\\x12\\xfe\\xac\\xef~\\xff\\x06=\\x00\\x00\\x12\\xfe\\xac\\xef'\n return uart_return_code and b'\\x06=' in uart_return_code", "def test_output_exists():\n assert song_decoder(\"WUWUBUBWUBUWUB\") is not None", "def is_correct(self, ans):\n \n seq = self.player_seq.copy()\n seq.append(ans)\n return seq[-1] == self.sequence[len(seq) - 1]", "def update_temperature(self):\n if self.T < self.Tmin:\n return False\n self.T -= self.alpha\n\n return True", "def test_transform_track_full_transform(self):\n track = Track(artist='Artist', album='Album', title='Title',\n ensemble='Ensemble', conductor='Conductor', composer='Composer',\n tracknum=1, seconds=60)\n transform = Transform(1,\n cond_artist=True, change_artist=True, pattern_artist='Artist', to_artist='Artist 2',\n cond_album=True, change_album=True, pattern_album='Album', to_album='Album 2',\n cond_title=True, change_title=True, pattern_title='Title', to_title='Title 2',\n cond_ensemble=True, change_ensemble=True, pattern_ensemble='Ensemble', to_ensemble='Ensemble 2',\n cond_composer=True, change_composer=True, pattern_composer='Composer', to_composer='Composer 2',\n cond_conductor=True, change_conductor=True, pattern_conductor='Conductor', to_conductor='Conductor 2',\n )\n\n self.assertEqual(track.last_transform, 0)\n transform.apply_track(track)\n self.assertEqual(track.last_transform, 1)\n self.assertEqual(track.artist, 'Artist 2')\n self.assertEqual(track.album, 'Album 2')\n self.assertEqual(track.title, 'Title 2')\n self.assertEqual(track.ensemble, 'Ensemble 2')\n self.assertEqual(track.conductor, 'Conductor 2')\n self.assertEqual(track.composer, 'Composer 2')\n self.assertEqual(track.transformed, True)", "def _check_duplicate_trans(self):\n transactions_set = set(self._transactions)\n return len(transactions_set) == len(self._transactions)", "def test_get_all_need_transform_one_album_another_already_applied(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=120, last_transform=1)\n pk = album.insert(self.app.db, self.app.curs)\n album = Album(artist='Artist', album='Album 2',\n totaltracks=1, totalseconds=120)\n pk = album.insert(self.app.db, self.app.curs)\n self.assertEqual(self.get_album_count(), 2)\n\n albums = Album.get_all_need_transform(self.app.curs, 1)\n self.assertEqual(len(albums), 1)\n self.assertEqual(albums[0].pk, pk)", "def _media_playback_trackable(self) -> bool:\n if (\n self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.Duration\n is None\n ):\n return False\n\n return (\n self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.Duration > 0\n )", "def test_two_transforms_album_with_one_already_applied(self):\n album = Album(artist='Artist', album='Album', last_transform=1)\n tflist = TransformList()\n tflist.add_transform(Transform(1,\n cond_artist=True, pattern_artist='Artist',\n change_artist=True, to_artist='Artist 2',\n ))\n tflist.add_transform(Transform(2,\n cond_album=True, pattern_album='Album',\n change_album=True, to_album='Album 2',\n ))\n\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, False)\n\n tflist.apply_album(album)\n\n self.assertEqual(album.last_transform, 2)\n self.assertEqual(album.artist, 'Artist')\n self.assertEqual(album.album, 'Album 2')\n self.assertEqual(album.transformed, True)", "def test_transform_album_artist_based_on_artist_album_match(self):\n album = Album(artist='Artist', album='Album',\n totaltracks=1, totalseconds=60)\n transform = Transform(1,\n cond_artist=True, pattern_artist = 'Artist',\n cond_album=True, pattern_album='Album',\n change_artist=True, to_artist='Artist 2')\n\n self.assertEqual(album.last_transform, 0)\n transform.apply_album(album)\n self.assertEqual(album.last_transform, 1)\n self.assertEqual(album.artist, 'Artist 2')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.transformed, True)", "def is_templated(self) -> bool:\n # We check two things:\n # * Source slice not empty: If it's empty, this means it doesn't appear\n # in the source, e.g. because it is new code generated by a lint fix.\n # Return False for these.\n # * It's not a literal slice. If it's a literal and has size then it's\n # not templated.\n assert self.pos_marker\n return (\n self.pos_marker.source_slice.start != self.pos_marker.source_slice.stop\n and not self.pos_marker.is_literal()\n )" ]
[ "0.56140214", "0.54819727", "0.542196", "0.5414857", "0.5397536", "0.5393968", "0.5390524", "0.53878826", "0.53857505", "0.5385733", "0.5346858", "0.5340154", "0.53382695", "0.5315191", "0.5310992", "0.53011787", "0.52986276", "0.5296976", "0.5272015", "0.5269272", "0.5257539", "0.524134", "0.52334416", "0.5214529", "0.5213078", "0.5204291", "0.5201751", "0.5199436", "0.5190387", "0.5173247", "0.5163136", "0.5151035", "0.5130697", "0.5121556", "0.5096637", "0.50943345", "0.5082233", "0.5080146", "0.50487316", "0.50477475", "0.50392365", "0.5035174", "0.5014872", "0.50142586", "0.49894363", "0.49886468", "0.4980671", "0.49675208", "0.49637604", "0.4961595", "0.49520838", "0.49507785", "0.49445918", "0.49408334", "0.4940346", "0.49397984", "0.49166894", "0.49107316", "0.49092755", "0.49081558", "0.48978126", "0.48955667", "0.48874027", "0.48810762", "0.48802632", "0.4876078", "0.48749265", "0.48670486", "0.48637903", "0.48501468", "0.4818607", "0.47857872", "0.47829023", "0.4770718", "0.47546858", "0.47464556", "0.47393838", "0.47374648", "0.47328094", "0.47311017", "0.47242764", "0.47241095", "0.47193706", "0.47183797", "0.47170103", "0.4715653", "0.4711828", "0.47106758", "0.4710624", "0.47000402", "0.46957868", "0.46922484", "0.46815807", "0.46662733", "0.46619847", "0.46546325", "0.46461472", "0.46428725", "0.46395856", "0.46264744" ]
0.5619942
0
Return a dictionary with memory information.
def get_mem_info(): MemInfoEntry = namedtuple('MemInfoEntry', ['value', 'unit']) mem_info = {} with open('/proc/meminfo') as file: for line in file: key, value, *unit = line.strip().split() mem_info[key.rstrip(':')] = MemInfoEntry(value, unit) return mem_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MemoryInfo(cls):\n\t\tres = {}\n\t\tfor line in cat(\"/proc/meminfo\").split(\"\\n\")[:-1]:\n\t\t\tline = RE_SPACES.sub(\" \", line).strip().split(\" \")\n\t\t\tname, value = line[:2]\n\t\t\tres[name.replace(\"(\", \"_\").replace(\")\", \"_\").replace(\":\", \"\")] = int(value)\n\t\treturn res", "def get_mem():\n return {\n 'MEM': string_chopped_to_float(psutil.virtual_memory(), 'percent=', ', used'),\n }", "def get_memory() -> dict:\n import os\n\n import psutil\n\n proc = psutil.Process(os.getpid())\n return proc.memory_info()", "def _api_memory_info() -> Dict[str, Any]:\n process = psutil.Process(os.getpid())\n return {k: size(v) for k, v in process.memory_info()._asdict().items()}", "def get_mem_info():\n import psutil\n vm = psutil.virtual_memory()\n return {\n \"memtotal\": vm.total,\n \"memavailable\": vm.available,\n }", "def memory():\n\twith open('/proc/meminfo','r') as mem:\n\t\tret = {}\n\t\ttmp = 0\n\t\tfor i in mem:\n\t\t\tsline = i.split()\n\t\t\tif str(sline[0])=='MemTotal:':\n\t\t\t\tret['total'] = int(sline[1]*1.0e-6)\n\treturn ret", "def _memory(self):\n memory = {}\n memory_used = cpmCPUMemoryUsed\n varbinds = self._snmp_connection.bulk_walk(memory_used)\n for varbind in varbinds:\n # grab the last element of the index to use as the memory_id\n if self._cisco_model in self._n3k_models:\n memory_id = self._process_mib_indices_table[int(varbind.index.split('.')[-1])]\n else:\n memory_id = int(varbind.index.split('.')[-1])\n memory[memory_id] = {u'memory_used': int(varbind.value)}\n\n memory_free = cpmCPUMemoryFree\n varbinds = self._snmp_connection.bulk_walk(memory_free)\n for varbind in varbinds:\n # grab the last element of the index to use as the memory_id\n if self._cisco_model in self._n3k_models:\n memory_id = self._process_mib_indices_table[int(varbind.index.split('.')[-1])]\n else:\n memory_id = int(varbind.index.split('.')[-1])\n memory[memory_id][u'memory_free'] = int(varbind.value)\n memory[memory_id][u'memory_total'] = memory[memory_id][u'memory_used'] + int(varbind.value)\n\n for memory_id in list(memory.keys()):\n if memory_id in self._module_numbers:\n if int(self._module_numbers[memory_id]) in self._entity_physical_names:\n memory[memory_id][u'memory_type'] = u\"Module {} ({})\".format(self._module_numbers[memory_id],\n self._entity_physical_names[\n int(self._module_numbers[memory_id])])\n else:\n memory[memory_id][u'memory_type'] = u\"Module {}\".format(self._module_numbers[memory_id])\n\n if not len(memory):\n self._logger.warn(\n u'Failed to get memory enrichments on device \"%s\" with model \"%s\"' %\n (self._device_fqdn, self._cisco_model))\n\n return memory", "def get_meminfo():\n\n mem_info = {}\n re_keyval = re.compile(r'^\\s*(\\S+)\\s*[=:]\\s*(\\d+)')\n try:\n with open(MEMINFO, 'r') as mem_file:\n for line in mem_file:\n match = re_keyval.search(line)\n if match:\n keyfile = match.group(1)\n val = match.group(2)\n mem_info[keyfile] = int(val)\n except IOError as err:\n LOG.error('%s: Cannot read meminfo, error=%s',\n 'platform memory usage', err)\n return mem_info\n\n return mem_info", "def getMemDetail(self):\n mem = {}\n if self.type in ['E', 'T', 'S', 'K', 'A', 'AX', 'W']:\n m = \"The percentage of CP memory utilization:\\s*([\\d\\.]+)%\\s+DP memory utilization:\\s*([\\d\\.]+)%\"\n rt = re.search(m, self.dut.cli(\"show memory detail\"))\n if rt:\n mem = {\"cp\": float(rt.groups()[0]), \"dp\": float(rt.groups()[1])}\n return mem", "def mem_info():\n meminfo = OrderedDict()\n with open('/proc/meminfo') as f:\n for line in f:\n meminfo[line.split(':')[0]] = line.split(':')[1].strip()\n return meminfo", "def get_memory_info():\n return psutil.virtual_memory()", "def get_mem_info(vars = {}, log = sys.stderr):\n\n try:\n meminfo_file= file(PROC_MEMINFO_PATH,\"r\")\n except IOError, e:\n return\n\n mem_info = {}\n\n for line in meminfo_file:\n\n try:\n (fieldname,value)= string.split(line,\":\")\n except ValueError, e:\n # this will happen for lines that don't have two values\n # (like the first line on 2.4 kernels)\n continue\n\n fieldname= string.strip(fieldname)\n value= string.strip(value)\n\n if fieldname == 'MemTotal' or fieldname == 'MemFree' or fieldname == 'Active' or fieldname == 'Inactive':\n mem_info.update(mem_to_dict(fieldname, value))\n\n\n meminfo_file.close()\n return mem_info", "def get_meminfo():\r\n info = {}\r\n with open('/proc/meminfo') as f:\r\n for line in f:\r\n m = _MEMINFO_RE.match(line)\r\n if m:\r\n if m.group(2):\r\n name = m.group(1) + '_' + m.group(2)[1:-1]\r\n else:\r\n name = m.group(1)\r\n info[name] = int(m.group(3))\r\n return collections.namedtuple('MemInfo', list(info.keys()))(**info)", "def memory():\n\n mem_info = {}\n\n if platform.linux_distribution()[0]:\n with open('/proc/meminfo') as file:\n c = 0\n for line in file:\n lst = line.split()\n if str(lst[0]) == 'MemTotal:':\n mem_info['total'] = int(lst[1])\n elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n c += int(lst[1])\n mem_info['free'] = c\n mem_info['used'] = (mem_info['total']) - c\n elif platform.mac_ver()[0]:\n ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]\n vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]\n\n # Iterate processes\n process_lines = ps.split('\\n')\n sep = re.compile('[\\s]+')\n rss_total = 0 # kB\n for row in range(1, len(process_lines)):\n row_text = process_lines[row].strip()\n row_elements = sep.split(row_text)\n try:\n rss = float(row_elements[0]) * 1024\n except:\n rss = 0 # ignore...\n rss_total += rss\n\n # Process vm_stat\n vm_lines = vm.split('\\n')\n sep = re.compile(':[\\s]+')\n vm_stats = {}\n for row in range(1, len(vm_lines) - 2):\n row_text = vm_lines[row].strip()\n row_elements = sep.split(row_text)\n vm_stats[(row_elements[0])] = int(row_elements[1].strip('\\.')) * 4096\n\n mem_info['total'] = rss_total\n mem_info['used'] = vm_stats[\"Pages active\"]\n mem_info['free'] = vm_stats[\"Pages free\"]\n else:\n raise('Unsupported Operating System.\\n')\n exit(1)\n\n return mem_info", "def get_memory_map(self):\n return self._memory_map", "def get_memory_usage(cls):\n\n mem_stats = psutil.virtual_memory()\n\n mem_stats_dict = { StatsKeys.MEMORY :\n {\n StatsKeys.TOTAL : mem_stats.total,\n StatsKeys.AVAILABLE : mem_stats.available,\n StatsKeys.USED : mem_stats.used\n }\n }\n logger.debug(\"Memory stats: {}\".format(mem_stats_dict))\n\n return mem_stats_dict", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def get_memory_info(dut):\n command = \"top -n 1 b | grep 'KiB Mem' \"\n output = st.show(dut, command)\n include_keys = ['total', 'used', 'free', 'buff_cache']\n rv = {each_key: ast.literal_eval(output[0][each_key]) for each_key in output[0] if each_key in include_keys}\n return rv", "def memory(self):\r\n return self._memory", "def getMemory(self):\n return self.memory", "def get_memory(self, mem_type='usedMemory'):\n pass", "def memory(self) -> Optional[Any]:\n return pulumi.get(self, \"memory\")", "def memory(self) -> Optional[Any]:\n return pulumi.get(self, \"memory\")", "def _build_memorymap(self):\n\t\tmemorymap = {}\n\t\ttotalsize = 0\n\t\tbaserva = self.liststream64.DirectoryData.BaseRva\n\t\tmmdscrptr64 = self.liststream64.DirectoryData.MINIDUMP_MEMORY_DESCRIPTOR64\n\t\tnumberofmemoryranges = self.liststream64.DirectoryData.NumberOfMemoryRanges\n\t\tfor i in range(numberofmemoryranges):\n\t\t\tmemorymap[mmdscrptr64[i].StartOfMemoryRange] = ((baserva + totalsize),mmdscrptr64[i].DataSize)\n\t\t\ttotalsize += mmdscrptr64[i].DataSize\n\t\treturn memorymap", "def get_memory_usage():\n\n memory_usage = {'total' : 0, 'used' : 0}\n meminfo = subprocess.Popen(['free', '-m'], shell=False, stdout=subprocess.PIPE)\n meminfo.stdout.readline()\n total_used = meminfo.stdout.readline()\n memory_usage['total'] = total_used.split()[1]\n memory_usage['used'] = total_used.split()[2]\n return memory_usage", "def stats():\n global CACHE, STATS_MISSES, STATS_HITS, STATS_KEYS_COUNT\n memory_address = \"0x\" + str(\"%X\" % id( CACHE )).zfill(16)\n return {'cache_memory_address': memory_address,\n 'hits': STATS_HITS,\n 'misses': STATS_MISSES ,\n 'keys_count': STATS_KEYS_COUNT,\n }", "def memory():\n sin = psutil.virtual_memory()\n return round((sin.total / sin.used) / 100, 3)", "def get_memory_visit_lookup(self) -> Dict[str, int]:\n return self.memory_visit_lookup", "def get_cpu_memory_info(process_name):\n info_dict = dict()\n try:\n process_list = get_process_info(process_name)\n for process in process_list:\n cmdline = process.cmdline()\n name = os.path.basename(cmdline[2]) if len(cmdline) > 3 else process_name + \"_\" + str(process.pid)\n name = process_name + \"_\" + str(process.pid) if not name else name\n cpu_info = process.cpu_percent(3)\n memory_info = process.memory_full_info()\n info_dict.update({name: {\"cpu\": cpu_info, \"memory\": memory_info}})\n except Exception as e:\n logger.error(\"Fetch the process %s of cpu and memory info err: %s\" % (process_name, e), html=True)\n\n return info_dict", "def get_mem(self) -> list:\n return self.__mem", "def get_gpu_memory_map():\n\tresult = subprocess.check_output(\n\t\t[\n\t\t\t'nvidia-smi', '--query-gpu=memory.used',\n\t\t\t'--format=csv,nounits,noheader'\n\t\t])\n\tresult = result.decode('utf-8')\n\t# Convert lines into a dictionary\n\tgpu_memory = [int(x) for x in result.strip().split('\\n')]\n\tgpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n\treturn gpu_memory_map", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ])#, encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map", "def get_gpu_memory_map():\r\n result = subprocess.check_output(\r\n [\r\n 'nvidia-smi', '--query-gpu=memory.free',\r\n '--format=csv,nounits,noheader'\r\n ], encoding='utf-8')\r\n # Convert lines into a dictionary\r\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\r\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\r\n return gpu_memory_map", "def get_gpu_memory_map():\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n print(\"Current usage: %i of 11178\" % gpu_memory_map[1])", "def available_memory(kind=None):\n values = [v.split() for v in subprocess.check_output([\"/usr/bin/free\", \"-m\"]).split('\\n')[1:] if v.split()]\n \n d = dict([(k[0][0:-1], dict(zip(('total', 'used', 'free'), [int(s) for s in k[1:]]))) \n for k \n in [v[0:4] \n for v \n in values \n if v[0] in ('Mem:', 'Swap:')]])\n if kind in ('Mem', 'Swap'):\n return d[kind]\n else:\n return d", "def get_gpu_memory_map():\n\tresult = subprocess.check_output(\n\t\t[\n\t\t\t'nvidia-smi', '--query-gpu=memory.free',\n\t\t\t'--format=csv,nounits,noheader'\n\t\t])\n\t# Convert lines into a dictionary\n\tresult=result.decode('utf-8')\n\tprint(result)\n\tgpu_memory = [int(x) for x in result.strip().split('\\n')]\n\tgpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n\treturn gpu_memory_map", "def gpu_memory_mb() -> Dict[int, int]:\n # pylint: disable=bare-except\n try:\n result = subprocess.check_output(['nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'],\n encoding='utf-8')\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n return {gpu: memory for gpu, memory in enumerate(gpu_memory)}\n except FileNotFoundError:\n # `nvidia-smi` doesn't exist, assume that means no GPU.\n return {}\n except:\n # Catch *all* exceptions, because this memory check is a nice-to-have\n # and we'd never want a training run to fail because of it.\n logger.exception(\"unable to check gpu_memory_mb(), continuing\")\n return {}", "def gpu_memory_info(device_id=0):\n free = ctypes.c_uint64()\n total = ctypes.c_uint64()\n dev_id = ctypes.c_int(device_id)\n check_call(_LIB.MXGetGPUMemoryInformation64(dev_id, ctypes.byref(free), ctypes.byref(total)))\n return (free.value, total.value)", "def get_mem_usage(**kwargs):\n try:\n con_mem_data_list = kwargs[\"con\"]._client.get_memory(\n session=kwargs[\"con\"]._session, memory_level=kwargs[\"mem_type\"]\n )\n usedram = 0\n freeram = 0\n for con_mem_data in con_mem_data_list:\n page_size = con_mem_data.page_size\n node_memory_data_list = con_mem_data.node_memory_data\n for node_memory_data in node_memory_data_list:\n ram = node_memory_data.num_pages * page_size\n is_free = node_memory_data.is_free\n if is_free:\n freeram += ram\n else:\n usedram += ram\n totalallocated = usedram + freeram\n if totalallocated > 0:\n totalallocated = round(totalallocated / 1024 / 1024, 1)\n usedram = round(usedram / 1024 / 1024, 1)\n freeram = round(freeram / 1024 / 1024, 1)\n ramusage = {}\n ramusage[\"usedram\"] = usedram\n ramusage[\"freeram\"] = freeram\n ramusage[\"totalallocated\"] = totalallocated\n ramusage[\"errormessage\"] = \"\"\n except Exception as e:\n errormessage = \"Get memory failed with error: \" + str(e)\n logging.error(errormessage)\n ramusage[\"errormessage\"] = errormessage\n return ramusage", "def get_mem_usage():\n \n with open('/proc/meminfo') as f:\n for line in f:\n if line.startswith('MemTotal:'):\n mem_total = int(line.split()[1])\n elif line.startswith('MemFree:'):\n mem_free = int(line.split()[1])\n elif line.startswith('VmallocTotal:'):\n vm_total = int(line.split()[1])\n elif line.startswith('Cached:'):\n mem_cached = int(line.split()[1])\n \n return {\n 'total': mem_total,\n 'res': mem_total - mem_free,\n 'virt': vm_total,\n 'cached': mem_cached\n }", "def get_memory(self):\n return (self.K.get_value(), self.V.get_value(), self.A.get_value())", "def display_memory(self) -> None:\n return self.__memory", "def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent", "def get_gpu_memory_map():\n # https://stackoverflow.com/questions/49595663/find-a-gpu-with-enough-memory\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ])\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n return gpu_memory_map", "def get_memory():\n with open('/proc/meminfo', 'r') as mem:\n free_memory = 0\n for i in mem:\n sline = i.split()\n if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):\n free_memory += int(sline[1])\n print(\"____________________ \" + str(free_memory) + \"____________________\")\n return free_memory", "def get(self):\n ret_dict = {}\n\n ret_dict[\"autoignore_rules\"] = self.shared_memory_manager_dict[\n \"autoignore_rules\"\n ]\n\n ret_dict[\"config_timestamp\"] = self.shared_memory_manager_dict[\n \"config_timestamp\"\n ]\n\n self.write(ret_dict)", "def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()", "def get_swap_usage(cls):\n\n swap_stats = psutil.swap_memory()\n swap_stats_dict = { StatsKeys.SWAP :\n {\n StatsKeys.FREE : swap_stats.free,\n StatsKeys.USED : swap_stats.used\n }\n }\n logger.debug(\"Swap stats: {}\".format(swap_stats_dict))\n\n return swap_stats_dict", "def _get_resident_memory_in_bytes():\n\n # Convert Kb to bytes\n k = 2**10\n\n if os.name == 'posix':\n # In Linux and MaxOS\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n\n # In Linux, the output of the command is in Kb. Convert to Bytes.\n if sys.platform == 'linux':\n mem *= k\n\n else:\n # In windows\n pid = os.getpid()\n command = ['tasklist', '/fi', '\"pid eq %d\"' % pid]\n\n try:\n pid = os.getpid()\n command = ['tasklist', '/fi', 'pid eq %d' % pid]\n process = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n error_code = process.poll()\n if error_code != 0:\n mem = 'n/a'\n return mem\n\n # Parse output\n last_line = stdout.strip().decode().split(\"\\n\")[-1]\n\n # Check last line of output has any number in it\n is_digit = [char.isdigit() for char in last_line]\n if not any(is_digit):\n mem = 'n/a'\n return mem\n\n # Get memory as string and its unit\n mem_string = last_line.split(' ')[-2].replace(',', '')\n mem = int(mem_string)\n mem_unit = last_line.split(' ')[-1]\n\n # Convert bytes based on the unit\n if mem_unit == 'K':\n exponent = 1\n if mem_unit == 'M':\n exponent = 2\n if mem_unit == 'G':\n exponent = 3\n if mem_unit == 'T':\n exponent = 4\n\n # Memory in bytes\n mem = mem * (k**exponent)\n\n except FileNotFoundError:\n mem = 'n/a'\n\n return mem", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def info_cache():\n return [custom_hit, custom_miss, len(custom_memory), total_custom_memory]", "def virtual_memory():\n mem = cext.virtual_mem()\n totphys, availphys, totsys, availsys = mem\n #\n total = totphys\n avail = availphys\n free = availphys\n used = total - avail\n percent = usage_percent((total - avail), total, round_=1)\n return svmem(total, avail, percent, used, free)", "def get_info(self):\n hits, misses, cacheSizeBytes, cacheSize = (\n self.hits,\n self.misses,\n self.__get_cache_size(),\n len(self.__recentAccessed),\n )\n filled = cacheSizeBytes / self.__maxSize\n\n return {\n \"hits\": hits,\n \"misses\": misses,\n \"cacheSize\": {\"bytes\": cacheSizeBytes, \"items\": cacheSize},\n \"filled\": filled,\n }", "def allocatememory(self):\n pass", "def allocated_memory(self):\n return self._allocated_memory", "def get_memory(self):\n return self.loss_memory", "def get_memory(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Memory Usage Statistics\",\n \"/statistics/systems/memory.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)", "def mem(self) -> List[MemorySlot]:\n return self._mem_slots", "def get_ram_info():\n ram_cent = psutil.virtual_memory()[2]\n return str(ram_cent)", "def _dump_info(resolution, block_size, pwidth):\n V, H = resolution\n M, N = block_size\n bytes = int(ceil(pwidth / 8))\n mem_bytes = 2 * M * H * bytes\n print(\"Memory requirements:\")\n print(\" {:d} bytes for double buffer\".format(mem_bytes))\n\n return bytes, mem_bytes", "def in_memory_data(self):\n return self._in_memory_data", "def get_memory_maps(self):\r\n f = None\r\n try:\r\n f = open(\"/proc/%s/smaps\" % self.pid)\r\n first_line = f.readline()\r\n current_block = [first_line]\r\n\r\n def get_blocks():\r\n data = {}\r\n for line in f:\r\n fields = line.split(None, 5)\r\n if len(fields) >= 5:\r\n yield (current_block.pop(), data)\r\n current_block.append(line)\r\n else:\r\n data[fields[0]] = int(fields[1]) * 1024\r\n yield (current_block.pop(), data)\r\n\r\n if first_line: # smaps file can be empty\r\n for header, data in get_blocks():\r\n hfields = header.split(None, 5)\r\n try:\r\n addr, perms, offset, dev, inode, path = hfields\r\n except ValueError:\r\n addr, perms, offset, dev, inode, path = hfields + ['']\r\n if not path:\r\n path = '[anon]'\r\n else:\r\n path = path.strip()\r\n yield (addr, perms, path,\r\n data['Rss:'],\r\n data['Size:'],\r\n data.get('Pss:', 0),\r\n data['Shared_Clean:'], data['Shared_Clean:'],\r\n data['Private_Clean:'], data['Private_Dirty:'],\r\n data['Referenced:'],\r\n data['Anonymous:'],\r\n data['Swap:'])\r\n f.close()\r\n except EnvironmentError:\r\n # XXX - Can't use wrap_exceptions decorator as we're\r\n # returning a generator; this probably needs some\r\n # refactoring in order to avoid this code duplication.\r\n if f is not None:\r\n f.close()\r\n err = sys.exc_info()[1]\r\n if err.errno in (errno.ENOENT, errno.ESRCH):\r\n raise NoSuchProcess(self.pid, self._process_name)\r\n if err.errno in (errno.EPERM, errno.EACCES):\r\n raise AccessDenied(self.pid, self._process_name)\r\n raise\r\n except:\r\n if f is not None:\r\n f.close()\r\n raise", "def MemValues():\n for line in open('/proc/meminfo').readlines():\n if line.startswith('MemTotal:'):\n memTotal = line.split()[1]\n if line.startswith('MemFree:'):\n memFree = line.split()[1]\n if line.startswith('Cached:'):\n memCached = line.split()[1]\n # :fixme: fails if one of these lines is missing in /proc/meminfo\n return memTotal, memCached, memFree", "def memory_get_usage():\n raise NotImplementedError()", "def dump_memory_maps(pid: str = \"self\") -> list[dict[str, Any]]:\n filename = os.path.join(\"/proc\", pid, \"smaps\")\n if not os.path.exists(filename):\n return []\n with open(filename, encoding=\"utf-8\") as input_:\n cur_dict: dict[str, int] = defaultdict(int)\n sizes: dict[str, Any] = {}\n for line in input_:\n line = line.rstrip(\"\\n\")\n matcher = SMAPS_LOCATION_RE.match(line)\n if matcher:\n cur_dict = sizes.setdefault(matcher.group(1), defaultdict(int))\n else:\n matcher = SMAPS_ENTRY_RE.match(line)\n if matcher:\n name = matcher.group(1)\n if name in (\"Size\", \"Rss\", \"Pss\"):\n cur_dict[name.lower() + \"_kb\"] += int(matcher.group(2))\n elif (\n not line.startswith(\"VmFlags:\")\n and not line.startswith(\"ProtectionKey:\")\n and not line.startswith(\"THPeligible:\")\n ):\n LOG.debug(\"Don't know how to parse /proc/%s/smaps line: %s\", pid, line)\n return [{\"name\": name, **value} for name, value in sizes.items() if value.get(\"pss_kb\", 0) > 0]", "def GetMemoryRegions(process_handle: int) -> Generator[dict, None, None]:\n mem_region_begin = system_information.lpMinimumApplicationAddress\n mem_region_end = system_information.lpMaximumApplicationAddress\n\n current_address = mem_region_begin\n\n while current_address < mem_region_end:\n region = MEMORY_BASIC_INFORMATION()\n kernel32.VirtualQueryEx(process_handle, current_address, ctypes.byref(region), ctypes.sizeof(region))\n\n yield {\"address\": current_address, \"size\": region.RegionSize, \"struct\": region}\n\n current_address += region.RegionSize", "def subcmd_getmemory_main(args, parameter_info):\n \n from get_memory_inventory import get_memory_inventory\n result = get_memory_inventory(parameter_info['ip'], parameter_info['user'], parameter_info['passwd'], parameter_info['sysid'], None)\n \n if result['ret'] is True:\n del result['ret']\n sys.stdout.write(json.dumps(result['entries'], sort_keys=True, indent=2))\n else:\n sys.stderr.write(result['msg'])", "def process_memory():\n process = psutil.Process()\n return int(convert.bytetomb(process.memory_info().rss))", "def mem_per_proc(self):\n return self._mem_per_proc", "def deviceMemory(self):\n return 1", "def get(self):\n\t\treturn {\n\t\t\t'system': self.get_system_information(),\n\t\t\t'cpu': self.get_cpu_stats(),\n\t\t\t'gpu': self.get_gpu_stats(),\n\t\t\t'ram': self.get_ram_stats(),\n\t\t\t'storage': self.get_storage_stats(),\n\t\t\t'battery': self.get_battery_stats(),\n\t\t\t'temps': self.get_temperatures()\n\t\t}", "def print_memory_stats(location_tag=\"undef\"):\n try:\n import psutil\n p = psutil.Process(os.getpid())\n rm, vm = p.get_memory_info()\n print \"MEM_STAT (%s) rm=%s, vm=%s\" % (location_tag, rm, vm)\n except ImportError:\n print \"psutil module not available\"", "def info(dump_alloc_table: bytes, /) -> None:", "def _get_mem_info(self):\n memory_usage_pct = None\n try:\n memory_usage = self._get_cgroups_current_memory_usage()\n if self._max_memory_usage and memory_usage:\n memory_usage_pct = round((memory_usage / self._max_memory_usage) * 100, 1)\n except BaseException:\n self._log.warning(f'Unable to determine memory usage', exc_info=True)\n return memory_usage_pct", "def get_memory(self, os: str, line: List, value: str, key: str):\n\n if args.memtype == \"gb\":\n memgb = value.split()\n used = float(memgb[1].replace(\"MiB\", \"\"))\n total = float(memgb[3].replace(\"MiB\", \"\"))\n\n line.append(\n \" \".join(\n [\n str(round(used / 1024, 2)),\n \"GiB /\",\n str(round(total / 1024, 2)),\n \"GiB\",\n ]\n )\n )\n else:\n line.append(value[value.find(key) + len(key) + 1 :])", "def record_memory_map(self):\n memory_map = self.get_memory_map()\n self._memory_map_records.append(memory_map)", "def get_space():\n return {}", "def __init__(self):\n self.memory = {}\n self.sum_ = {}", "def info(self):\n return dict(\n name=self.name,\n offset=self.offset,\n length=self.length,\n width=self.width,\n height=self.height,\n )", "def get_memory_usage(conn):\n get_all_mem = conn.getInfo()[1] * 1048576\n get_freemem = conn.getMemoryStats(-1, 0)\n if type(get_freemem) == dict:\n free = (list(get_freemem.values())[0] +\n list(get_freemem.values())[2] +\n list(get_freemem.values())[3]) * 1024\n percent = (100 - ((free * 100) / get_all_mem))\n usage = (get_all_mem - free)\n mem_usage = {'usage': usage, 'percent': percent}\n else:\n mem_usage = {'usage': None, 'percent': None}\n return mem_usage", "def allocatememory(self):\n\n for key, value in self._dentsvertsdata.items():\n value.allocatememory()", "def set_memory_map(self):\n sorted_list_tuple = sorted([(key, value) for key, value in \n self._page_map.iteritems() if value[0] >= 0], \n key=lambda pair: pair[1])\n self._memory_map = [pair[0] for pair in sorted_list_tuple]", "def get_resident_memory(human_readable=False):\n\n mem = Memory._get_resident_memory_in_bytes()\n\n # Convert from bytes to the closets unit\n if human_readable:\n mem, unit = Memory._human_readable_memory(mem)\n else:\n mem = mem\n unit = 'b'\n\n return mem, unit", "def read_memory(self, address):\n return self.memory[Vm.filter_mem_address(address)]", "def collect():\n\n command = \"cat /proc/meminfo |grep MemTotal|awk -F' ' '{print $2}'\"\n memTotal_f = round(float(os.popen(command).read())/1024/1000,0)\n memTotal = int(memTotal_f)\n cmd = 'df -h |grep \"/dev/s\"'\n metric_disk = os.popen(cmd).readlines()\n hardNum=[]\n for i in metric_disk:\n hard_space = float((i.strip().split()[1])[:-1])\n hardNum.append(hard_space)\n\n disk_info = sum(hardNum)\n disk_use = {}\n metric_disks=os.popen('df -x tmpfs -x devtmpfs | grep -Eo \" /\\S*$\" ').readlines()\n for disk in metric_disks:\n cmd = 'df|grep -E \"%s$\"' % disk.strip()\n disks = os.popen(cmd).readlines()[0]\n disk_list = disks.split()\n disk_use[disk_list[5]]=disk_list[4]\n hard = {\n \"disk_used\" : disk_use,\n \"disk_total\":disk_info,\n \"mem_total\":memTotal\n }\n\n return hard", "def memory(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"memory\")", "def memory(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"memory\")", "def memory(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"memory\")", "def read_from_meminfo(key):\r\n meminfo = utils.system_output('grep %s /proc/meminfo' % key)\r\n return int(re.search(r'\\d+', meminfo).group(0))", "def set_memory_map(self):\n sorted_list_tuple = sorted([(key, value) for key, value in \n self._page_map.iteritems() if value >= 0], \n key=lambda pair: pair[1])\n self._memory_map = [pair[0] for pair in sorted_list_tuple]", "def mem(self) -> List[float]:\n return list(map(attrgetter(\"mem\"), self.stats))", "def total_mem(self):\n return Memory(self.mem_per_proc * self.mpi_procs + self.master_mem_overhead, \"Mb\")", "def mem_from_proc_meminfo():\n with open('/proc/meminfo') as f:\n for line in f:\n if line.startswith('MemTotal:'):\n parts = line.strip().split()\n val, unit = parts[1:3]\n unit_factor = unit2factor[unit[0].lower()]\n return int(val) * unit_factor\n raise RuntimeError('Could not get MemTotal from /proc/meminfo')", "def statistics(self):\n \n u_self = resource.getrusage(resource.RUSAGE_SELF)\n\tu_children = resource.getrusage(resource.RUSAGE_CHILDREN)\n\t\n\tpath = os.getenv('TMPDIR')\n\tif not path:\n\t path = os.getcwd()\n\t \n\tdisk = 0 \n\tfor root, dirs, files in os.walk(path): \n\t for d in dirs+files:\n\t disk += os.stat(os.path.join(root, d)).st_size\n\n return dict(\n\t cpu = u_self[0]+u_self[1]+u_children[0]+u_children[1],\n\t memory = (u_self[2]+u_children[2])*resource.getpagesize(),\n\t disk = disk,\n\t time = self.elapsed_time(),\n\t signal = self.signal\n\t)" ]
[ "0.83256704", "0.8320725", "0.8266024", "0.8175442", "0.8164575", "0.800875", "0.79796195", "0.7837153", "0.778341", "0.77577746", "0.7723628", "0.76374567", "0.7615092", "0.75794065", "0.75041574", "0.7460927", "0.7370262", "0.7370262", "0.7370262", "0.7370262", "0.7370262", "0.7370262", "0.7370262", "0.7342351", "0.72449344", "0.7105379", "0.6994703", "0.69471985", "0.69471985", "0.6943392", "0.6937576", "0.6926306", "0.6920062", "0.6889709", "0.68738246", "0.68588954", "0.6840742", "0.6831121", "0.68232393", "0.67749625", "0.6769016", "0.676402", "0.6748436", "0.6737532", "0.6710872", "0.67093104", "0.6700362", "0.66965973", "0.6639386", "0.6631755", "0.65358216", "0.65303177", "0.6495658", "0.6478304", "0.6441226", "0.643524", "0.63853794", "0.6383189", "0.63681173", "0.63569677", "0.6343911", "0.6343194", "0.6316054", "0.6315454", "0.6305151", "0.6282885", "0.6243941", "0.62332195", "0.62220657", "0.6218074", "0.6208139", "0.6191546", "0.61675143", "0.61562455", "0.6145494", "0.6112668", "0.61017287", "0.6091687", "0.6081973", "0.6078345", "0.6074015", "0.6065505", "0.60526216", "0.60494584", "0.6049306", "0.5999465", "0.59978", "0.599158", "0.59847075", "0.594854", "0.5943049", "0.5940645", "0.5940645", "0.5940645", "0.593991", "0.5937845", "0.5930852", "0.59231603", "0.59128064", "0.5912018" ]
0.79375446
7
Return the free space in Gigabits.
def get_free_gb(): mem_info = get_mem_info() free_gb = float(mem_info['MemAvailable'].value) / 10**6 return free_gb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cgts_vg_free_space():\n\n try:\n # Determine space in cgts-vg in GiB\n vg_free_str = subprocess.check_output( # pylint: disable=not-callable\n ['vgdisplay', '-C', '--noheadings', '--nosuffix',\n '-o', 'vg_free', '--units', 'g', 'cgts-vg'],\n close_fds=True, universal_newlines=True).rstrip()\n cgts_vg_free = int(float(vg_free_str))\n except subprocess.CalledProcessError:\n LOG.error(\"Command vgdisplay failed\")\n raise Exception(\"Command vgdisplay failed\")\n\n return cgts_vg_free", "def _get_free_capacity(self):\n\n capacity = np.ones(len(self.grid.T)) * len(self.grid)\n capacity -= np.count_nonzero(self.grid, axis=0)\n return capacity", "def get_space_used():\n fs.get_space_used()", "def get_available_space(self):\n return self.maxsize - len(self)", "def gb(self):\n return self.data.gb", "def free_ram():\n return int(convert.bytetomb(psutil.virtual_memory().available))", "def _free_space() -> int:\n return disk_usage(realpath('/')).free", "def used_ram():\n return total_ram() - free_ram()", "def get_free(self):\n return int(self.free_cores)", "def freespace(self):\n self.log.info(\"freespace\")\n freebytes = shutil.disk_usage(self.s3_dir).free\n self.log.info(\"returning:\" + str(freebytes))\n return freebytes", "def get_free_mem(self):\n return self.free_mem", "def get_free(self):\r\n\t\treturn len(self.free_objects)", "def fs_total_reserved_space(self):\n return self._fs_total_reserved_space", "def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used", "def __get_free_system_gid(self):\n\n gid_min, gid_max = self.__get_system_group_gid_range()\n\n busy_gids = [x.gr_gid for x in grp.getgrall() if gid_min <= x.gr_gid <= gid_max]\n\n # find free system gid\n for gid in range(gid_min, gid_max + 1):\n if gid not in busy_gids:\n return gid", "def get_free_space(dirname):\n return psutil.disk_usage(dirname).free", "def count_free_gpus():\n return len(get_free_gpus())", "def calc_free_g(energies, temperatures):\n pass", "def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")", "def _available_space( self, pool_name ):\n\t\ttry:\n\t\t\treturn self.storage_pools[pool_name].available\n\t\texcept KeyError:\n\t\t\treturn -1", "def get_free_ram():\n try:\n output = subprocess.check_output(['free', '-b']).decode(\"utf-8\")\n lines = output.splitlines()\n m = re.match(r'\\w+:' + '(\\s+(\\d+))'*6, lines[1])\n if m:\n return int(m.group(6))\n except OSError:\n pass\n sys.stderr.write(\"Warning: Unable to determine free RAM, using 1GB\\n\")\n return 10**9", "def getSpaceUsage(path):\n st = os.statvfs(path)\n \n flash = { \"free\" : st.f_bavail * st.f_frsize, \"used\":(st.f_blocks - st.f_bfree) * st.f_frsize }\n \n #free = st.f_bavail * st.f_frsize\n #total = st.f_blocks * st.f_frsize\n #used = (st.f_blocks - st.f_bfree) * st.f_frsize\n return flash", "def get_free_slots(self):\n # pon_ports = keys(self.uncfg_onu)\n pass # TODO", "def available_space(self):\r\n space = dict()\r\n for path in self._mounts.keys():\r\n space.update({path:self.available_space_for_path(path)})\r\n return space", "def gagged(self):\r\n return self._gag", "def available_space(self):\n # From http://stackoverflow.com/a/787832/732596\n s = os.statvfs(self.path)\n return (s.f_bavail * s.f_frsize) / 1024**2", "def get_free_space(directory):\r\n if sys.platform in [\"win32\", \"cygwin\"]:\r\n free_bytes = ctypes.c_ulonglong(0)\r\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(directory),\r\n None, None, ctypes.pointer(free_bytes))\r\n space = free_bytes.value\r\n else:\r\n space = os.statvfs(directory).f_bfree * os.statvfs(directory).f_frsize\r\n\r\n return format_size(space)", "def memory(self):\n mem_size_list = []\n gig_size = self.random.randint(1,32)\n size = gig_size * 1073741824\n suffixes=['B','KB','MB','GB','TB']\n suffixIndex = 0\n while size > 1024 and suffixIndex < 4:\n suffixIndex += 1 #increment the index of the suffix\n size = size/1024.0 #apply the division\n mem_size_list.append(f\"{size:.2f} {suffixes[suffixIndex]}\")\n return mem_size_list", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def getSpace(self):\n return self.space", "def memory(self):\n # Run 'free -m' command and make a list from output.\n mem_data = self.execCMD('free', '-m').split()\n total_mem = int(mem_data[7]) / 1024.\n used_mem = int(mem_data[15]) / 1024.\n # Caculate percentage\n used_mem_percent = int(used_mem / (total_mem / 100))\n\n # Results are in kilobyte.\n return total_mem, used_mem, used_mem_percent", "def check_available_memory(self,unit='B'):\n free = psutil.virtual_memory().available\n\n if unit == 'MB':\n\n return free/10**6\n\n elif unit == 'GB':\n\n return free/10**9\n\n else:\n\n return free", "def get_free_space(folder, format=\"MB\"):\n fConstants = {\"GB\": 1073741824,\n \"MB\": 1048576,\n \"KB\": 1024,\n \"B\": 1\n }\n if platform.system() == 'Windows':\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))\n return (int(free_bytes.value / fConstants[format.upper()]), format)\n else:\n return (int(os.statvfs(folder).f_bfree * os.statvfs(folder).f_bsize / fConstants[format.upper()]), format)", "def sample_free_space(self):\n # Choose a random cell from this array\n index = np.random.choice(len(self.free_cells))\n coords = self.free_cells[index]\n m, n = coords[0], coords[1]\n \n # Convert to x, y location.\n cart_arr = self.cell_to_cartesian(coords)\n \n return cart_arr #np.array([x, y]) # Can alter to x, y, theta if necessary", "def capacity_used(self):\n raise NotImplementedError()", "def real_space(self):\n return self._real_space", "def real_space(self):\n return self._real_space", "def real_space(self):\n return self._real_space", "def memUsedGpu(self):\n return None # amount not known", "def get_free_ram_size(self):\n\t\treturn call_sdk_function('PrlStat_GetFreeRamSize', self.handle)", "def get_space(self):\n return self.space", "def gb(gigabytes): # pylint: disable=C0103\r\n return FormattedItem(int(float(gigabytes)) * 1024,\r\n \"%dG\" % int(float(gigabytes)))", "def memory_free(self) -> int:\r\n return self._memory_free", "def get_free_space(self, folder):\n if os_version == 'Windows':\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))\n return free_bytes.value\n else:\n st = os.statvfs(folder)\n return st.f_bavail * st.f_frsize", "def get_free_space(folder):\n if platform.system() == 'Windows':\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(\n ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))\n return free_bytes.value/1024/1024/1024\n else:\n st = os.statvfs(folder)\n return st.f_bavail * st.f_frsize/1024/1024/1024.", "def current_capacity_range(self):\n done, data = self._request('GC')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError", "def get_capacity():\n fs.get_capacity()", "def search_space_size(self):", "def get_free_gpu():\n\tos.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp')\n\tif os.path.exists('tmp'):\n\t\tmemory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()]\n\t\tos.remove('tmp')\n\t\treturn np.argmax(memory_available)\n\treturn 0", "def get_space(): \n space = {\n 'timesteps_per_batch': hp.choice('timesteps_per_batch', [512, 1024, 2048, 4096, 8192]),\n 'vf_stepsize': hp.loguniform('vf_stepsize', -5, -2),\n 'max_kl' : hp.loguniform('max_kl', -2.5, -0.5),\n 'gamma': hp.uniform('gamma', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))), #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n 'lam': hp.uniform('lam', (1-(1/((10**(-1))*4))), (1-(1/((10**(1.5))*4)))) #4:T. Remember to change this if code is altered. -1:T/tau. tau=0.04=dt\n }\n return space", "def freespace(p):\n s = os.statvfs(p)\n return (s.f_bsize * s.f_bavail) /1024", "def min_memory_gib(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"min_memory_gib\")", "def get_reserved_space(self):\n reserved_space_ratio = .45\n max_reserved_space = 8\n _, height = click.get_terminal_size()\n return min(int(round(height * reserved_space_ratio)), max_reserved_space)", "def get_used_capacity(self,tot=\"50\"):\n data=self.at_cmd(\"CPMS?\")\n index=data[1].find(tot)-1\n if data[1][index-1]==',':\n return data[index]\n else:\n return data[1][index-1:index]", "def get_free_gpus(gpu_num = None, bool=False, verbose=False):\n if gpu_num != None:\n check_num(gpu_num)\n return (get_gpu_utilization(gpu_num, verbose)<(1,1)).min()\n res = (get_gpu_utilization(gpu_num, verbose)<(1,1)).min(axis=1)\n if bool:\n return res\n return res.nonzero()[-1]", "def get_available_gib_in_disk(host, device_path, dbapi):\n available_gib = 0\n disks = dbapi.idisk_get_by_ihost(host.uuid)\n for disk in disks:\n if disk.device_path == device_path or disk.device_node == device_path:\n available_gib = disk.available_mib / 1024\n return available_gib", "def get_gym_space(self):\n return gym.spaces.Discrete(1)", "def max_memory_gib(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"max_memory_gib\")", "def get_used_mem(self):\n return self.used_mem", "def Capacity(self) -> int:", "def get_free_gpu(self):\r\n output = subprocess.Popen('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free', stdout=subprocess.PIPE,\r\n shell=True).communicate()[0]\r\n output = output.decode(\"ascii\")\r\n\r\n # assumes that it is on the popiah server and the last gpu is not used\r\n memory_available = [int(x.split()[2]) for x in output.split(\"\\n\")[:-2]]\r\n\r\n if memory_available:\r\n print(\"Setting GPU to use to PID {}\".format(np.argmax(memory_available)))\r\n return np.argmax(memory_available)\r\n\r\n if not memory_available:\r\n print('No GPU memory available')", "def gc(self) -> float:\n g = self.count(\"G\")\n c = self.count(\"C\")\n return (g + c) / len(self) * 100", "def ufree_disk():\n import os\n # note: this would work on PyCom devices but not implemented\n fs_stat = os.statvfs('//')\n fs_size = fs_stat[0] * fs_stat[2]\n fs_free = fs_stat[0] * fs_stat[3]\n fs_per = fs_free / fs_size\n return(\"Total: {:,} Free: {:,} ({0:.2f}%)\".format(fs_size, fs_free, fs_per))", "def get_free_disk_space(p):\n s = os.statvfs(p)\n return s.f_frsize * s.f_bavail", "def deallocate_room_space(self):\n if self._is_empty:\n return - 1\n else:\n self.allocated_spaces = self.allocated_spaces - 1\n self.unallocated_spaces = self.capacity - self.allocated_spaces", "def getLocalSpace(path):\n\n import Node\n thisWorkNode = Node.Node()\n thisWorkNode.collectWNInfo(path)\n return int(thisWorkNode.disk)*1024**2 # convert from MB to B", "def free(self):\n return self.i_free().j_free()", "def find_free(min_=0):\n while is_occupied(min_):\n min_ += 1\n return min_", "def get_heap_cap(self):\r\n return self.capacity", "def gpus(self):\n return self.__gpus", "def compute_free_space(self, env, obj_mask):\n free = np.ones(obj_mask.shape, dtype=np.uint8)\n for obj_ids in env.obj_ids.values():\n for obj_id in obj_ids:\n free[obj_mask == obj_id] = 0\n return free", "def get_fill_space(self):\n return self._fill_space", "def root_disk_size_gib(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"root_disk_size_gib\")", "def dbgain_free_space(self, pt_1, pt_2):\n if (pt_1.ndim > 1) or (pt_2.ndim > 1):\n raise NotImplementedError\n dist = np.linalg.norm(pt_1 - pt_2)\n\n return self.dist_to_dbgain_free_space(\n dist,\n wavelength=self.wavelength,\n antenna_dbgain_tx=self.antenna_dbgain_tx,\n antenna_dbgain_rx=self.antenna_dbgain_rx,\n )", "def gpus_used(self):\n\n return list(self._gpu_data.keys())", "def getGC(self):\n numGC = self.sequence.upper().count(\"G\") + self.sequence.upper().count(\"C\")\n self.gc = float(numGC)/len(self.sequence)\n return self.gc", "def max_memory_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_memory_gib\")", "def max_memory_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_memory_gib\")", "def sgi(self):\n from cctbx import sgtbx\n if self.space_group == '':\n sg = self.space_groups[self.centring]\n else:\n sg = self.space_group\n return sgtbx.space_group_info(sg)", "def mem_avail():\n return psutil.virtual_memory().available", "def available_memory(kind=None):\n values = [v.split() for v in subprocess.check_output([\"/usr/bin/free\", \"-m\"]).split('\\n')[1:] if v.split()]\n \n d = dict([(k[0][0:-1], dict(zip(('total', 'used', 'free'), [int(s) for s in k[1:]]))) \n for k \n in [v[0:4] \n for v \n in values \n if v[0] in ('Mem:', 'Swap:')]])\n if kind in ('Mem', 'Swap'):\n return d[kind]\n else:\n return d", "def GetGC(flanks):\n gc = 0\n total = 0\n for i in flanks:\n if i != \"N\":\n total += 1\n if i == \"C\" or i == \"G\": gc += 1\n return gc*1.0/total", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def gc(self):\n g = self.seq.count('G')\n g += self.seq.count('g')\n c = self.seq.count('C')\n c += self.seq.count('c')\n return (g + c) / len(self.seq)", "def part2():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n\n used_space = all_sizes[-1]\n extra_free_space_needed = used_space - 40000000 # 40000000 = total filesystem size - free space required\n return min([size for size in all_sizes if size >= extra_free_space_needed])", "def root_disk_size_gib(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"root_disk_size_gib\")", "def total_ram(self):\n return sum([self.size_to_gb(slot[\"Size\"]) for slot in self.get(\"Memory Device\")])", "def get_free_space_mb(self, folder):\r\n if platform.system() == 'Windows':\r\n free_bytes = ctypes.c_ulonglong(0)\r\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))\r\n return free_bytes.value / 1024 / 1024 / 1024.\r\n else:\r\n st = os.statvfs(folder)\r\n return st.f_bavail * st.f_frsize / 1024 / 1024 / 1024.", "def get_gsize(self):\n gsize_file = Genome(self.genome).get_fasize()\n gsize = 0\n with open(gsize_file, 'rt') as fi:\n for a in fi:\n c, n = a.strip().split('\\t')\n gsize += int(n)\n return gsize", "def reserved_disk_space_in_bytes(self):\n try:\n return int(environment.get(\"ReservedDiskSpaceInBytes\"))\n except KeyError:\n return maxsize", "def allocated_storage(self):\n return self._allocated_storage", "def group_size(self):\n return self._gsize", "def get_free_space_mb(dirname):\n if platform.system() == 'Windows':\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(dirname), None, None, ctypes.pointer(free_bytes))\n return free_bytes.value / 1024 / 1024\n else:\n st = os.statvfs(dirname)\n return st.f_bavail * st.f_frsize / 1024 / 1024", "def unallocated_spaces(self):\n unallocated_offices = 0\n for office in self.offices:\n unallocated_offices += self.offices[\n office]['room'].unallocated_spaces\n unallocated_living = 0\n for living in self.living_spaces:\n unallocated_living += self.living_spaces[\n living]['room'].unallocated_spaces\n\n return [unallocated_offices, unallocated_living]", "def size_gb(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"size_gb\")", "def find_free(self):\n\n free_position = np.where(self.block == 0)\n free_position = np.array(free_position).flatten()\n return free_position", "def bytes_used(self):\n return int(self.status[\"pgmap\"][\"bytes_used\"])", "def space(self):\n return self._space", "def root_disk_size_gib(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"root_disk_size_gib\")", "def free(flags='-k'):\n system_command(\"free {0}\".format(flags))" ]
[ "0.7192137", "0.6909616", "0.690619", "0.68786997", "0.6813705", "0.67277247", "0.66669893", "0.6577275", "0.6504887", "0.65012854", "0.64604944", "0.6451814", "0.6396979", "0.63525146", "0.63032967", "0.6267119", "0.6259522", "0.62487036", "0.6246622", "0.62347066", "0.6206821", "0.6199555", "0.6154595", "0.6153875", "0.615194", "0.6122632", "0.60965425", "0.6084616", "0.6078471", "0.60578483", "0.60575", "0.60479194", "0.6029209", "0.5995134", "0.596467", "0.59532356", "0.59532356", "0.59532356", "0.5948171", "0.5944891", "0.5938545", "0.59257835", "0.5910643", "0.58981514", "0.58961153", "0.58612925", "0.58516383", "0.5827113", "0.5823248", "0.581235", "0.58118236", "0.5808813", "0.5802188", "0.5779112", "0.5774052", "0.57721", "0.5762731", "0.5759872", "0.5748088", "0.57425386", "0.5738331", "0.5736453", "0.5729736", "0.57272524", "0.5726157", "0.57180065", "0.57163507", "0.57104635", "0.5709863", "0.5706958", "0.5705632", "0.57013047", "0.56899345", "0.56884736", "0.5680349", "0.56716233", "0.5667943", "0.5667943", "0.56630534", "0.56572497", "0.5642922", "0.56393623", "0.56386864", "0.5636815", "0.5636584", "0.5627181", "0.5625325", "0.56250054", "0.5602095", "0.55990285", "0.5591541", "0.55845034", "0.55752397", "0.5573337", "0.5572341", "0.5564105", "0.5562948", "0.5558947", "0.5553702", "0.5553447" ]
0.75012624
0
True if it can't run, else otherwise. Condition is Gb of RAM memory available.
def ram_condition(min_gb=3): return get_free_gb() < min_gb
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def memory_check(self) -> bool:\n available_bytes = psutil.virtual_memory().available\n cur_rss = self.mem_status.memory_info().rss\n\n if cur_rss < self.init_mem_rss:\n self.init_mem_rss = cur_rss\n estimated_model_size_mb = (cur_rss - self.init_mem_rss) >> 20\n available_mb = available_bytes >> 20\n model_size_memory_ratio = estimated_model_size_mb / available_mb\n\n early_stop = False\n if model_size_memory_ratio > 1.0:\n logger.warning(f'Warning: Large model size may cause OOM error if training continues')\n early_stop = True\n\n if available_mb < 512: # Less than 500 MB\n logger.warning(f'Warning: Low available memory may cause OOM error if training continues')\n early_stop = True\n\n if early_stop:\n logger.warning('Warning: Early stopped model prior to optimal result to avoid OOM error. '\n 'Please increase available memory to avoid subpar model quality.')\n logger.warning(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return True\n elif self.verbose or (model_size_memory_ratio > 0.25):\n logging.debug(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return False", "def _checkAvailableMemory():\n #execute free -m to get output in MB\n logging.debug(\"checking total memory\")\n cmd = [\n basedefs.EXEC_FREE, \"-m\"\n ]\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_FREE_MEM)\n\n #itterate over output and look for the line: \"Mem: 1 something\"\n #and extract 1 from it (1 is an example to the free memory)\n availableMemory = 0\n for line in output.split(\"\\n\"):\n result = re.match(\"Mem:\\s+(\\d+)\\s+.+\", line)\n if result:\n logging.debug(\"Found a match, amount of memory: %s\" % result.group(1))\n availableMemory = result.group(1)\n\n #compare found memory to restrictions\n availableMemory = int(availableMemory)\n #multiplying CONST_MIN_MEMORY by 0.95 to have tolerance of 5%\n if availableMemory < (basedefs.CONST_MIN_MEMORY_MB * 0.95):\n logging.error(\"Availble memory (%s) is lower then the minimum requirments (%s)\" % (availableMemory, basedefs.CONST_MIN_MEMORY_MB))\n raise Exception(output_messages.ERR_EXP_NOT_EMOUGH_MEMORY)\n\n if availableMemory < basedefs.CONST_WARN_MEMORY_MB:\n logging.warn(\"There is less then %s available memory \" % basedefs.CONST_WARN_MEMORY_MB)\n controller.MESSAGES.append(output_messages.WARN_LOW_MEMORY)", "def check_available_memory(self,unit='B'):\n free = psutil.virtual_memory().available\n\n if unit == 'MB':\n\n return free/10**6\n\n elif unit == 'GB':\n\n return free/10**9\n\n else:\n\n return free", "def device_out_of_memory(self) -> bool:\n return pulumi.get(self, \"device_out_of_memory\")", "def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")", "def device_out_of_memory(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_out_of_memory\")", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def check_mem_usage():\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n return mem", "def mem_avail():\n return psutil.virtual_memory().available", "def is_out_of_memory(self):\n\n return self._state == \"OUT_OF_MEMORY\"", "def testExcessiveRamUsage(self):\n c = Simulation()\n c.set_simulation_parameters(\n seed=1,\n task=36,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=100000000000,\n sample_size=0.1,\n max_time=10,\n )\n c.set_map_files(sample_file=\"sample/large_mask.tif\", fine_file=\"sample/large_fine.tif\")\n with self.assertRaises(MemoryError):\n c.optimise_ram(ram_limit=16)", "def hasmem(state, mem):\n if mem <= state[HEAD][MEM]:\n return True\n else:\n state[HEAD][STATUS] = OOM\n return False", "def _handle_not_enough_memory(self, calculation):\n\n if not self.ctx.can_be_optimised:\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('I am not allowed to optimize your settings. Consider providing at least'\n 'num_machines and num_mpiprocs_per_machine')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MEMORY_ISSUE_NO_SOLUTION)\n\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report('Calculation failed due to lack of memory, I resubmit it with twice larger'\n ' amount of computational nodes and smaller MPI/OMP ratio')\n\n # increase number of nodes\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n\n self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2\n\n status = self.check_kpts()\n if status is not None:\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES)\n\n if 'settings' not in self.ctx.inputs:\n settings = {}\n else:\n settings = self.ctx.inputs.settings.get_dict()\n settings.setdefault('remove_from_remotecopy_list', [])\n if 'mixing_history*' not in settings['remove_from_remotecopy_list']:\n settings['remove_from_remotecopy_list'].append('mixing_history*')\n self.ctx.inputs.settings = orm.Dict(dict=settings)\n\n #check if the cdn.hdf can be reused\n #Out of memory can also occur after a couple of iterations if the mixing_history gets too large\n remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n\n return ProcessHandlerReport(True)", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def can_build(self, game_map) -> bool:\n if self.is_cart():\n return False\n cell = game_map.get_cell_by_pos(self.pos)\n if not cell.has_resource() and cell.citytile is None and self.can_act() and self.has_enough_resources_to_build:\n return True\n return False", "def ram_prop_condition(prop=0.25):\n mem_info = get_mem_info()\n total_mem = float(mem_info['MemTotal'].value) / 10**6\n min_gb = prop * total_mem\n return ram_condition(min_gb=min_gb)", "def is_busy(self):\n threads = len(self.executor._threads)\n if threads == 0:\n return False\n\n capacity = self.executor._work_queue.qsize() / float(threads)\n if capacity > 2:\n return True\n elif capacity < 1:\n return False\n else:\n return capacity > (random.random() + 1)", "def check_free_space(environment, target_xy, fovea):\n temp_image = check_target_position(environment, target_xy, fovea)\n if np.array_equal(temp_image, np.zeros(temp_image.shape)):\n return True\n else:\n return False", "def check(self, runtime):\n return True", "def test_mem_available_percent():\n result = _run_metric('mem_available_percent')\n assert result.exit_code == 0", "def is_available_while_running(cls) -> bool:\n\n return True", "def __check_memory_limit(self, efile_path):\n try:\n log.debug('Checking %s for exceeded memory message from SLURM', efile_path)\n with open(efile_path) as f:\n if os.path.getsize(efile_path) > 2048:\n f.seek(-2048, os.SEEK_END)\n f.readline()\n for line in f.readlines():\n stripped_line = line.strip()\n if stripped_line == SLURM_MEMORY_LIMIT_EXCEEDED_MSG:\n return OUT_OF_MEMORY_MSG\n elif any(_ in stripped_line for _ in SLURM_MEMORY_LIMIT_EXCEEDED_PARTIAL_WARNINGS):\n return PROBABLY_OUT_OF_MEMORY_MSG\n except Exception:\n log.exception('Error reading end of %s:', efile_path)\n\n return False", "def allocate(self) -> bool:\n if hasattr(self.at_options, 'allocate'):\n return self.at_options.allocate == 1\n return False", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def checkmem(self,file_,line_): # 3\n res = self.__obj.checkmemtask(file_,line_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def test_disk_space_required_zero_if_no_vm(self):\n self.assertEqual(self.command.working_dir_disk_space_required(), 0)", "def min_system_resources(node):\n\n min_sys_res = True\n\n # CPUs\n if \"layout\" in node[\"cpu\"]:\n total_cpus = len(node[\"cpu\"][\"layout\"])\n if total_cpus < 2:\n print(\n \"\\nThere is only {} CPU(s) available on this system. \"\n \"This is not enough to run VPP.\".format(total_cpus)\n )\n min_sys_res = False\n\n # System Memory\n if (\n \"free\" in node[\"hugepages\"]\n and \"memfree\" in node[\"hugepages\"]\n and \"size\" in node[\"hugepages\"]\n ):\n free = node[\"hugepages\"][\"free\"]\n memfree = float(node[\"hugepages\"][\"memfree\"].split(\" \")[0])\n hugesize = float(node[\"hugepages\"][\"size\"].split(\" \")[0])\n\n memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize\n percentmemhugepages = (memhugepages / memfree) * 100\n if free is \"0\" and percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:\n print(\n \"\\nThe System has only {} of free memory. You will not \"\n \"be able to allocate enough Huge Pages for VPP.\".format(\n int(memfree)\n )\n )\n min_sys_res = False\n\n return min_sys_res", "def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False", "def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0", "def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False", "def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed", "def check_image_before_load(self,image_dims):\n\n if image_dims[0]*image_dims[1]*image_dims[2]*4 < self.check_available_memory():\n return True\n else:\n return False", "def is_memory_device(self, device_index):\n return self.drt_manager.is_memory_device(device_index)", "def ComputeEAvailable(self):\r\n pass", "def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n return usage < 73", "def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True", "def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )", "def has_error(self):\r\n return self._arm.has_error", "def _schedTest(self):\n if not self._hasSlices(): # There are no migratory tasks, so let's check utilization\n return self.util() <= 1.0\n else:\n return self._qpa()", "def test_check_disk_space_sufficient(self):\n self.assertTrue(self.command.check_disk_space(1, self.temp_dir))\n self.assertTrue(self.command.check_disk_space(\n 1, self.temp_dir,\n label=\"Hello\", context=\"Contextual detail\", die=True))", "def test_not_ready_if_insufficient_working_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available for\"\n \" temporary file storage\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n return self.ctx.running_calc < 2 and self.can_restart()", "def check_system_config():\n if os.uname().sysname == 'Darwin':\n return\n\n assert int(run(\"cat /proc/sys/vm/max_map_count\", shell=True)) >= 262144, \\\n 'the \"vm.max_map_count\" kernel parameter is too low, check readme'\n\n check_resources()", "def has_memory(self, user_id, memory_date):\n raise NotImplementedError()", "def has_cargo(self) -> bool:\n return bool(self.proto.cargo_space_taken)", "def check_if_full(self):\n pass", "def is_free(self) -> tuple:\n if self.running_procs >= self.procs_no:\n return (False, None)\n if self.gpus:\n for gpu in self.gpus:\n if self.gpu_running_procs[gpu] < self.per_gpu[gpu]:\n return (True, gpu)\n return (False, None)\n return (True, None)", "def is_available(self, cmd):\n num_qubits = 0\n for qureg in cmd.all_qubits:\n num_qubits += len(qureg)\n return num_qubits <= 2", "def free_ram():\n return int(convert.bytetomb(psutil.virtual_memory().available))", "def is_available():", "def available(self) -> bool:\n return True", "def available(self) -> bool:\n return True", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def not_converging(self):\n if len(self.rundir) >= int(self.settings[\"run_limit\"]):\n return True\n return False", "def _CheckMachineSize(self):\n if self.CompareMachineSize(self._machine_type, self._min_machine_size,\n self._zone) < 0:\n raise errors.DriverError(\n \"%s does not meet the minimum required machine size %s\" %\n (self._machine_type, self._min_machine_size))", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result", "def check(self):\n self.__check_request_limit()", "def is_free(self):\n return self._size > 0", "def is_available() -> bool:\n return HAVE_RLE", "def detect_overcommit(self, host):\n ratio = host.ram_allocation_ratio or 1\n ram_limit = host.total_usable_ram_mb * ratio\n used_ram = host.total_usable_ram_mb - host.free_ram_mb\n if used_ram > ram_limit:\n return True\n\n ratio = host.disk_allocation_ratio or 1\n disk_limit = host.total_usable_disk_gb * ratio\n used_disk = host.total_usable_disk_gb - host.free_disk_mb / 1024.\n if used_disk > disk_limit:\n return True\n\n ratio = host.cpu_allocation_ratio or 1\n cpus_limit = host.vcpus_total * ratio\n if host.vcpus_used > cpus_limit:\n return True\n\n return False", "def checkResourceRequest(self, memory, cores, disk):\n assert memory is not None\n assert disk is not None\n assert cores is not None\n if cores > self.maxCores:\n raise InsufficientSystemResources('cores', cores, self.maxCores)\n if memory > self.maxMemory:\n raise InsufficientSystemResources('memory', memory, self.maxMemory)\n if disk > self.maxDisk:\n raise InsufficientSystemResources('disk', disk, self.maxDisk)", "def _CheckDeviceFreeSpace(self, device_info):\n effective_free = device_info.target_dir_size + device_info.target_fs_free\n staging_size = self._GetStagingDirSize()\n if effective_free < staging_size:\n raise DeployFailure(\n 'Not enough free space on the device. Required: %s MiB, '\n 'actual: %s MiB.' % (staging_size / 1024, effective_free / 1024))\n if device_info.target_fs_free < (100 * 1024):\n logging.warning('The device has less than 100MB free. deploy_chrome may '\n 'hang during the transfer.')", "def check_disk_usage(disk):\n du= shutil.disk_usage(disk)\n free =du.free/du.total * 100\n return free > 30", "def get_available_memory_blocks(self):\n status = self.get_status()\n return status & (STATUS_MEM_0_EMPTY | STATUS_MEM_1_EMPTY)", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def test_rebuilt_server_ram(self):\n\n remote_instance = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n lower_limit = int(self.expected_ram) - (int(self.expected_ram) * .1)\n server_ram_size = int(remote_instance.get_allocated_ram())\n self.assertTrue((int(self.expected_ram) == server_ram_size or lower_limit <= server_ram_size))", "def can_fit_more(self):\n\n return len(self._requeue_jobs) < MAX_NUM", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def test_not_ready_if_insufficient_output_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n # Make working directory requirements negligible but output huge\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available\"\n \" to guarantee successful output\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def CacheFreeSpaceCheck(self, amount):\n self._required_cache = max(self._required_cache, amount)\n self.script.append(('apply_patch_space(%d) || abort(\"E%d: Not enough free '\n 'space on /cache to apply patches.\");') % (\n amount,\n common.ErrorCode.INSUFFICIENT_CACHE_SPACE))", "def _assert_enough_space_for_copy(self, volume_size):\n pool_size = max(volume_size * 0.2, 5.27)\n required_size = pool_size + volume_size\n\n if required_size > self.stats['pools'][0]['free_capacity_gb']:\n raise stx_exception.NotEnoughSpace(backend=self.backend_name)", "def get_available_memory():\n if platform == 'linux' or platform == 'linux2':\n return _get_available_memory_linux()\n elif platform == 'darwin':\n return _get_available_memory_darwin()\n else:\n raise Exception('Platform not supported')", "def main():\n over_treshold = False\n if PROC_SCRIPT_NAME != \"\" and CPU_MIN_VAL_FOR_PROC != \"\":\n std_output = check_ps_cmd()\n if std_output:\n res = is_script_running(std_output)\n if res is True:\n over_treshold = check_cpu_for_proc(std_output)\n return over_treshold\n sys.exit()", "def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def canBuild(self, game_map) -> bool:\n cell = game_map.getCellByPos(self.pos)\n if not cell.hasResource() and self.canAct() and (self.cargo[\"wood\"] + self.cargo[\"coal\"] + self.cargo[\"uranium\"]) >= GAME_CONSTANTS[\"PARAMETERS\"][\"CITY_BUILD_COST\"]:\n return True\n return False", "def can_run(self):\n return True", "def test_check_disk_space_insufficient(self, mock_available):\n # If user declines, return False or die\n self.command.ui.default_confirm_response = False\n\n self.assertFalse(self.command.check_disk_space(100, self.temp_dir))\n mock_available.assert_called_once()\n\n mock_available.reset_mock()\n self.command._cached_disk_requirements.clear()\n self.assertRaises(SystemExit, self.command.check_disk_space,\n 100, self.temp_dir, die=True)\n mock_available.assert_called_once()\n\n mock_available.reset_mock()\n self.command._cached_disk_requirements.clear()\n\n # If user accepts, return True anyways\n self.command.ui.default_confirm_response = True\n\n self.assertTrue(self.command.check_disk_space(100, self.temp_dir))\n mock_available.assert_called_once()\n\n mock_available.reset_mock()\n self.command._cached_disk_requirements.clear()\n self.assertTrue(self.command.check_disk_space(100, self.temp_dir,\n die=True))\n mock_available.assert_called_once()", "def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75", "def check(self, runtime):", "def check_if_free_resources(free_mem, free_cpu, reqs):\n req_mem = float(''.join(filter(str.isdigit, reqs[\"mem\"])))\n logger.info(\"CHECK FOR RESOURCES\", extra={\"mem\": free_mem, \"cpu\": free_cpu, \"req_mem\": req_mem})\n if (free_mem/ONE_MB > req_mem and free_cpu > float(reqs[\"cpu\"])):\n logger.info(\"MEM AVAILABLE: \" + str(free_mem/ONE_MB))\n logger.info(\"CPU FREE: \" + str(free_cpu))\n return True\n else:\n logger.info(\"FAILED RESOURCE REQUIREMENTS\")\n return False", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def maxmemory_reserved(self) -> Optional[int]:\n return pulumi.get(self, \"maxmemory_reserved\")", "def query_ram_total() -> t.Optional[int]:\n if not RAM_TOTAL:\n return None\n return psutil.virtual_memory().total", "def is_cuda_extension_usable() -> bool:\n if not EXTENSION_BUILT or not torch.npu.is_available():\n return False\n bsz = 2\n tokens = torch.tensor([[4, 4, 3, 2], [1, 2, 3, 4]], dtype=torch.long, device=\"npu\")\n lprobs = torch.rand((8, 12), device=\"npu\")\n try:\n outputs = ngram_repeat_block_cuda.forward(tokens, lprobs, bsz, 3, 4, 3)\n outputs = outputs + 4 # This line breaks if the extension is built incorrectly.\n return True\n except RuntimeError:\n warnings.warn(\n \"NGramRepeatBlock extension must be rebuilt.\"\n 'Run TORCH_CUDA_ARCH_LIST=\"6.0;6.1;7.0\" python setup.py build_ext --inplace'\n )\n return False", "def available(self):\n return True", "def available(self):\n return True", "def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN", "def _is_legal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) >= servers_used_mem)", "def is_full(self) -> bool:\r\n return self.size == self.capacity", "def _is_done_illegal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) < servers_used_mem)" ]
[ "0.7269116", "0.68143415", "0.66817755", "0.64842796", "0.6442835", "0.6359018", "0.6243431", "0.6243431", "0.62134373", "0.61970484", "0.58983666", "0.58968914", "0.5888447", "0.5873215", "0.58724576", "0.57852805", "0.5777731", "0.57357985", "0.57344395", "0.5731109", "0.5679409", "0.56775975", "0.56675816", "0.5660551", "0.56556684", "0.564799", "0.5594359", "0.5590541", "0.5519973", "0.55175686", "0.551197", "0.5508512", "0.5508116", "0.5473451", "0.54501647", "0.5430011", "0.54282004", "0.541931", "0.54171807", "0.5414207", "0.5411668", "0.54017836", "0.53973216", "0.5396957", "0.5394572", "0.53944737", "0.53944737", "0.53944737", "0.5390523", "0.53683007", "0.53661597", "0.5353478", "0.535065", "0.5338212", "0.53323233", "0.5331341", "0.532933", "0.5325514", "0.5325514", "0.532496", "0.53244627", "0.53228754", "0.53218454", "0.53185624", "0.5317766", "0.531307", "0.53108484", "0.53053606", "0.53012437", "0.5299553", "0.5291541", "0.5288987", "0.52873826", "0.5278906", "0.5277769", "0.52758914", "0.5275364", "0.5269757", "0.5267293", "0.52640384", "0.5250678", "0.5244239", "0.5244239", "0.5244239", "0.5236864", "0.5236151", "0.5229645", "0.5229039", "0.52166855", "0.52105826", "0.52095973", "0.52082163", "0.5200347", "0.51892734", "0.51891655", "0.51891655", "0.51884097", "0.51835203", "0.51792735", "0.51617694" ]
0.6852558
1
True if it can't run, else otherwise. Condition is a proportion of RAM memory available.
def ram_prop_condition(prop=0.25): mem_info = get_mem_info() total_mem = float(mem_info['MemTotal'].value) / 10**6 min_gb = prop * total_mem return ram_condition(min_gb=min_gb)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_if_sufficient_memory():\n percent_memory = psutil.virtual_memory().percent\n if percent_memory > 75:\n raise ValueError('Please use a device with more CPU ram or a smaller dataset')", "def memory_check(self) -> bool:\n available_bytes = psutil.virtual_memory().available\n cur_rss = self.mem_status.memory_info().rss\n\n if cur_rss < self.init_mem_rss:\n self.init_mem_rss = cur_rss\n estimated_model_size_mb = (cur_rss - self.init_mem_rss) >> 20\n available_mb = available_bytes >> 20\n model_size_memory_ratio = estimated_model_size_mb / available_mb\n\n early_stop = False\n if model_size_memory_ratio > 1.0:\n logger.warning(f'Warning: Large model size may cause OOM error if training continues')\n early_stop = True\n\n if available_mb < 512: # Less than 500 MB\n logger.warning(f'Warning: Low available memory may cause OOM error if training continues')\n early_stop = True\n\n if early_stop:\n logger.warning('Warning: Early stopped model prior to optimal result to avoid OOM error. '\n 'Please increase available memory to avoid subpar model quality.')\n logger.warning(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return True\n elif self.verbose or (model_size_memory_ratio > 0.25):\n logging.debug(f'Available Memory: {available_mb} MB, Estimated Model size: {estimated_model_size_mb} MB')\n return False", "def ram_condition(min_gb=3):\n return get_free_gb() < min_gb", "def _checkAvailableMemory():\n #execute free -m to get output in MB\n logging.debug(\"checking total memory\")\n cmd = [\n basedefs.EXEC_FREE, \"-m\"\n ]\n output, rc = utils.execCmd(cmdList=cmd, failOnError=True, msg=output_messages.ERR_EXP_FREE_MEM)\n\n #itterate over output and look for the line: \"Mem: 1 something\"\n #and extract 1 from it (1 is an example to the free memory)\n availableMemory = 0\n for line in output.split(\"\\n\"):\n result = re.match(\"Mem:\\s+(\\d+)\\s+.+\", line)\n if result:\n logging.debug(\"Found a match, amount of memory: %s\" % result.group(1))\n availableMemory = result.group(1)\n\n #compare found memory to restrictions\n availableMemory = int(availableMemory)\n #multiplying CONST_MIN_MEMORY by 0.95 to have tolerance of 5%\n if availableMemory < (basedefs.CONST_MIN_MEMORY_MB * 0.95):\n logging.error(\"Availble memory (%s) is lower then the minimum requirments (%s)\" % (availableMemory, basedefs.CONST_MIN_MEMORY_MB))\n raise Exception(output_messages.ERR_EXP_NOT_EMOUGH_MEMORY)\n\n if availableMemory < basedefs.CONST_WARN_MEMORY_MB:\n logging.warn(\"There is less then %s available memory \" % basedefs.CONST_WARN_MEMORY_MB)\n controller.MESSAGES.append(output_messages.WARN_LOW_MEMORY)", "def precheck(self):\n if self.__memory_size is None:\n self.logger.exception(\"[Memory] Please set memory size.\")\n raise ArgsNotCorrect(\"Please set memory size.\")", "def device_out_of_memory(self) -> bool:\n return pulumi.get(self, \"device_out_of_memory\")", "def check_available_memory(self,unit='B'):\n free = psutil.virtual_memory().available\n\n if unit == 'MB':\n\n return free/10**6\n\n elif unit == 'GB':\n\n return free/10**9\n\n else:\n\n return free", "def test_mem_available():\n result = _run_metric('mem_available')\n assert result.exit_code == 0", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def device_out_of_memory(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"device_out_of_memory\")", "def test_mem_available_percent():\n result = _run_metric('mem_available_percent')\n assert result.exit_code == 0", "def _handle_not_enough_memory(self, calculation):\n\n if not self.ctx.can_be_optimised:\n self.ctx.restart_calc = calculation\n self.ctx.is_finished = True\n self.report('I am not allowed to optimize your settings. Consider providing at least'\n 'num_machines and num_mpiprocs_per_machine')\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_MEMORY_ISSUE_NO_SOLUTION)\n\n self.ctx.restart_calc = None\n self.ctx.is_finished = False\n self.report('Calculation failed due to lack of memory, I resubmit it with twice larger'\n ' amount of computational nodes and smaller MPI/OMP ratio')\n\n # increase number of nodes\n propose_nodes = self.ctx.num_machines * 2\n if propose_nodes > self.ctx.max_queue_nodes:\n propose_nodes = self.ctx.max_queue_nodes\n self.ctx.num_machines = propose_nodes\n\n self.ctx.suggest_mpi_omp_ratio = self.ctx.suggest_mpi_omp_ratio / 2\n\n status = self.check_kpts()\n if status is not None:\n self.ctx.is_finished = True\n self.results()\n return ProcessHandlerReport(True, self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES)\n\n if 'settings' not in self.ctx.inputs:\n settings = {}\n else:\n settings = self.ctx.inputs.settings.get_dict()\n settings.setdefault('remove_from_remotecopy_list', [])\n if 'mixing_history*' not in settings['remove_from_remotecopy_list']:\n settings['remove_from_remotecopy_list'].append('mixing_history*')\n self.ctx.inputs.settings = orm.Dict(dict=settings)\n\n #check if the cdn.hdf can be reused\n #Out of memory can also occur after a couple of iterations if the mixing_history gets too large\n remote = calculation.base.links.get_outgoing().get_node_by_label('remote_folder')\n if _is_remote_reusable(self.ctx.inputs, calculation):\n if 'fleurinp' in self.ctx.inputs:\n del self.ctx.inputs.fleurinp\n self.ctx.inputs.parent_folder = remote\n\n return ProcessHandlerReport(True)", "def check_image_before_load(self,image_dims):\n\n if image_dims[0]*image_dims[1]*image_dims[2]*4 < self.check_available_memory():\n return True\n else:\n return False", "def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75", "def testExcessiveRamUsage(self):\n c = Simulation()\n c.set_simulation_parameters(\n seed=1,\n task=36,\n output_directory=\"output\",\n min_speciation_rate=0.5,\n sigma=2,\n tau=2,\n deme=100000000000,\n sample_size=0.1,\n max_time=10,\n )\n c.set_map_files(sample_file=\"sample/large_mask.tif\", fine_file=\"sample/large_fine.tif\")\n with self.assertRaises(MemoryError):\n c.optimise_ram(ram_limit=16)", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def check_mem(self, values):\n try:\n virt_mem = psutil.virtual_memory()\n values[keys.KEY_VIRTUAL_MEM_TOTAL] = virt_mem.total\n values[keys.KEY_VIRTUAL_MEM_PERCENT] = virt_mem.percent\n except:\n logging.error(\"Error collecting memory stats.\")", "def mem_avail():\n return psutil.virtual_memory().available", "def _schedTest(self):\n if not self._hasSlices(): # There are no migratory tasks, so let's check utilization\n return self.util() <= 1.0\n else:\n return self._qpa()", "def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n return usage < 73", "def check(self, runtime):\n return True", "def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed", "def min_system_resources(node):\n\n min_sys_res = True\n\n # CPUs\n if \"layout\" in node[\"cpu\"]:\n total_cpus = len(node[\"cpu\"][\"layout\"])\n if total_cpus < 2:\n print(\n \"\\nThere is only {} CPU(s) available on this system. \"\n \"This is not enough to run VPP.\".format(total_cpus)\n )\n min_sys_res = False\n\n # System Memory\n if (\n \"free\" in node[\"hugepages\"]\n and \"memfree\" in node[\"hugepages\"]\n and \"size\" in node[\"hugepages\"]\n ):\n free = node[\"hugepages\"][\"free\"]\n memfree = float(node[\"hugepages\"][\"memfree\"].split(\" \")[0])\n hugesize = float(node[\"hugepages\"][\"size\"].split(\" \")[0])\n\n memhugepages = MIN_TOTAL_HUGE_PAGES * hugesize\n percentmemhugepages = (memhugepages / memfree) * 100\n if free is \"0\" and percentmemhugepages > MAX_PERCENT_FOR_HUGE_PAGES:\n print(\n \"\\nThe System has only {} of free memory. You will not \"\n \"be able to allocate enough Huge Pages for VPP.\".format(\n int(memfree)\n )\n )\n min_sys_res = False\n\n return min_sys_res", "def check_free_space(environment, target_xy, fovea):\n temp_image = check_target_position(environment, target_xy, fovea)\n if np.array_equal(temp_image, np.zeros(temp_image.shape)):\n return True\n else:\n return False", "def is_out_of_memory(self):\n\n return self._state == \"OUT_OF_MEMORY\"", "def test_check_disk_space_sufficient(self):\n self.assertTrue(self.command.check_disk_space(1, self.temp_dir))\n self.assertTrue(self.command.check_disk_space(\n 1, self.temp_dir,\n label=\"Hello\", context=\"Contextual detail\", die=True))", "def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False", "def hasmem(state, mem):\n if mem <= state[HEAD][MEM]:\n return True\n else:\n state[HEAD][STATUS] = OOM\n return False", "def check_mem_usage():\n mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n return mem", "def test_not_ready_if_insufficient_working_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available for\"\n \" temporary file storage\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n 'working_dir_disk_space_required',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def __check_memory_limit(self, efile_path):\n try:\n log.debug('Checking %s for exceeded memory message from SLURM', efile_path)\n with open(efile_path) as f:\n if os.path.getsize(efile_path) > 2048:\n f.seek(-2048, os.SEEK_END)\n f.readline()\n for line in f.readlines():\n stripped_line = line.strip()\n if stripped_line == SLURM_MEMORY_LIMIT_EXCEEDED_MSG:\n return OUT_OF_MEMORY_MSG\n elif any(_ in stripped_line for _ in SLURM_MEMORY_LIMIT_EXCEEDED_PARTIAL_WARNINGS):\n return PROBABLY_OUT_OF_MEMORY_MSG\n except Exception:\n log.exception('Error reading end of %s:', efile_path)\n\n return False", "def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True", "def test_not_ready_if_insufficient_output_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n # Make working directory requirements negligible but output huge\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available\"\n \" to guarantee successful output\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def _CheckMachineSize(self):\n if self.CompareMachineSize(self._machine_type, self._min_machine_size,\n self._zone) < 0:\n raise errors.DriverError(\n \"%s does not meet the minimum required machine size %s\" %\n (self._machine_type, self._min_machine_size))", "def test_disk_space_required_zero_if_no_vm(self):\n self.assertEqual(self.command.working_dir_disk_space_required(), 0)", "def _should_run_now(self):\n # Assumes the unit/all values will have values.\n if not len(self._device_values.keys()) > 0:\n return False\n return not len(self._needed_devices) > 0", "def part1():\n program = read_input()\n root = build_filesystem(program)\n all_sizes = root.make_size_list()\n return sum(size for size in all_sizes if size <= 100000)", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n return self.ctx.running_calc < 2 and self.can_restart()", "def _assert_enough_space_for_copy(self, volume_size):\n pool_size = max(volume_size * 0.2, 5.27)\n required_size = pool_size + volume_size\n\n if required_size > self.stats['pools'][0]['free_capacity_gb']:\n raise stx_exception.NotEnoughSpace(backend=self.backend_name)", "def check_disk_usage(disk):\n du= shutil.disk_usage(disk)\n free =du.free/du.total * 100\n return free > 30", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False", "def is_busy(self):\n threads = len(self.executor._threads)\n if threads == 0:\n return False\n\n capacity = self.executor._work_queue.qsize() / float(threads)\n if capacity > 2:\n return True\n elif capacity < 1:\n return False\n else:\n return capacity > (random.random() + 1)", "def detect_overcommit(self, host):\n ratio = host.ram_allocation_ratio or 1\n ram_limit = host.total_usable_ram_mb * ratio\n used_ram = host.total_usable_ram_mb - host.free_ram_mb\n if used_ram > ram_limit:\n return True\n\n ratio = host.disk_allocation_ratio or 1\n disk_limit = host.total_usable_disk_gb * ratio\n used_disk = host.total_usable_disk_gb - host.free_disk_mb / 1024.\n if used_disk > disk_limit:\n return True\n\n ratio = host.cpu_allocation_ratio or 1\n cpus_limit = host.vcpus_total * ratio\n if host.vcpus_used > cpus_limit:\n return True\n\n return False", "def not_converging(self):\n if len(self.rundir) >= int(self.settings[\"run_limit\"]):\n return True\n return False", "def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n return usage < 75", "def checkResourceRequest(self, memory, cores, disk):\n assert memory is not None\n assert disk is not None\n assert cores is not None\n if cores > self.maxCores:\n raise InsufficientSystemResources('cores', cores, self.maxCores)\n if memory > self.maxMemory:\n raise InsufficientSystemResources('memory', memory, self.maxMemory)\n if disk > self.maxDisk:\n raise InsufficientSystemResources('disk', disk, self.maxDisk)", "def can_build(self, game_map) -> bool:\n if self.is_cart():\n return False\n cell = game_map.get_cell_by_pos(self.pos)\n if not cell.has_resource() and cell.citytile is None and self.can_act() and self.has_enough_resources_to_build:\n return True\n return False", "def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN", "def _can_cache_acts_data(num_batches: int, input_shape: torch.Size, output_shape: torch.Size) -> bool:\n can_cache_data = False\n\n # Available CPU memory in GB.\n threshold_mem = psutil.virtual_memory().available / (1024 * 1024 * 1024)\n threshold_mem = threshold_mem * EMPIRICAL_THRESHOLD\n\n # required CPU memory in GB.\n req_mem = 0\n req_mem += reduce(lambda x, y: x * y, input_shape) * num_batches * DATA_SIZE_IN_BITS / (1024 * 1024 * 1024 * 8)\n req_mem += reduce(lambda x, y: x * y, output_shape) * num_batches * DATA_SIZE_IN_BITS / (1024 * 1024 * 1024 * 8)\n\n if req_mem < threshold_mem:\n can_cache_data = True\n\n return can_cache_data", "def check_cpu_usage():\n usage = psutil.cpu_percent(1)\n print(\"DEBUG:usage:{}\".format(usage))\n return usage < 75", "def test_check_disk_space_insufficient(self, mock_available):\n # If user declines, return False or die\n self.command.ui.default_confirm_response = False\n\n self.assertFalse(self.command.check_disk_space(100, self.temp_dir))\n mock_available.assert_called_once()\n\n mock_available.reset_mock()\n self.command._cached_disk_requirements.clear()\n self.assertRaises(SystemExit, self.command.check_disk_space,\n 100, self.temp_dir, die=True)\n mock_available.assert_called_once()\n\n mock_available.reset_mock()\n self.command._cached_disk_requirements.clear()\n\n # If user accepts, return True anyways\n self.command.ui.default_confirm_response = True\n\n self.assertTrue(self.command.check_disk_space(100, self.temp_dir))\n mock_available.assert_called_once()\n\n mock_available.reset_mock()\n self.command._cached_disk_requirements.clear()\n self.assertTrue(self.command.check_disk_space(100, self.temp_dir,\n die=True))\n mock_available.assert_called_once()", "def check_if_full(self):\n pass", "def main():\n over_treshold = False\n if PROC_SCRIPT_NAME != \"\" and CPU_MIN_VAL_FOR_PROC != \"\":\n std_output = check_ps_cmd()\n if std_output:\n res = is_script_running(std_output)\n if res is True:\n over_treshold = check_cpu_for_proc(std_output)\n return over_treshold\n sys.exit()", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def scan_size(self):\n max_memory = 10e9/4 # because 32-bit floats will be used\n memory = 0\n for f in self.filenames:\n img = cv2.imread(f, int(self.color))\n if img is not None:\n m = 1\n for dim in img.shape:\n m *= dim\n memory += m\n else:\n print('error opening %s' % f)\n print('size is %s bytes' % memory)\n return memory <= max_memory", "def CacheFreeSpaceCheck(self, amount):\n self._required_cache = max(self._required_cache, amount)\n self.script.append(('apply_patch_space(%d) || abort(\"E%d: Not enough free '\n 'space on /cache to apply patches.\");') % (\n amount,\n common.ErrorCode.INSUFFICIENT_CACHE_SPACE))", "def size_check(self):\r\n # get the coordinates of the mesh nodes\r\n coords = self.V.tabulate_dof_coordinates()[::3]\r\n\r\n # compute the magnitude/distance of the points from the c.o.m.\r\n mags = np.sqrt(np.sum(np.square(coords), axis=1))\r\n\r\n # if the maximum distance is greater than the cut\r\n if np.max(mags) > self.sizecut:\r\n # set as divergent\r\n self.diverged = True\r\n\r\n # print and save logs\r\n print(\"--- SIZE BREAKS THRESHOLD ---\")\r\n self.logfile.write(\"--- SIZE BREAKS THRESHOLD --- \\n\")", "def check_memory(self, lambda_memory):\n if (lambda_memory < 128) or (lambda_memory > 1536):\n raise Exception('Incorrect memory size specified')\n else:\n res = lambda_memory % 64\n if (res == 0):\n return lambda_memory\n else:\n return lambda_memory - res + 64", "def checkmem(self,file_,line_): # 3\n res = self.__obj.checkmemtask(file_,line_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def is_memory_device(self, device_index):\n return self.drt_manager.is_memory_device(device_index)", "def should_build_drones(self, knowledge):\n if self.cache.own(UnitTypeId.DRONE).amount >= 16 + self.get_count(UnitTypeId.EXTRACTOR) * 3:\n return False\n if not self.knowledge.game_analyzer.our_power.is_enough_for(\n self.knowledge.enemy_units_manager.enemy_total_power\n ):\n return False\n self.drones.to_count = 16 + self.get_count(UnitTypeId.EXTRACTOR) * 3\n return True", "def is_available_while_running(cls) -> bool:\n\n return True", "def _CheckDeviceFreeSpace(self, device_info):\n effective_free = device_info.target_dir_size + device_info.target_fs_free\n staging_size = self._GetStagingDirSize()\n if effective_free < staging_size:\n raise DeployFailure(\n 'Not enough free space on the device. Required: %s MiB, '\n 'actual: %s MiB.' % (staging_size / 1024, effective_free / 1024))\n if device_info.target_fs_free < (100 * 1024):\n logging.warning('The device has less than 100MB free. deploy_chrome may '\n 'hang during the transfer.')", "def allocate(self) -> bool:\n if hasattr(self.at_options, 'allocate'):\n return self.at_options.allocate == 1\n return False", "def check(self, context):\n self.update_product_size()\n return True", "def check(self, context):\n self.update_product_size()\n return True", "def check(self, runtime):", "def is_full(self):\r\n return self.num_checkers == self.width * self.height", "def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False", "def is_compute(self, nb_iterations):\n return nb_iterations % self.nb_iterations_between_compute == 0", "def check_disk_space(self):\n mm = MicroManager(self.hostname)\n drives = mm.get_disks()\n env = mm.get_env()\n for drive in drives:\n if drive['Name'].startswith(env['HOMEDRIVE']):\n if drive['TotalFreeSpace'] >= 367001600:\n return [CheckStatus(self, CheckStatus.CHECK_DISK_SPACE, CheckStatus.STATUS_PASS), ]\n else:\n return [CheckStatus(self, CheckStatus.CHECK_DISK_SPACE, CheckStatus.STATUS_FAIL, \"Only {} bytes of available disk space remain, expecting at least 367001600\"), ]", "def checkLimit(device, checkStatus):\n d = device.read(1)\n if d:\n print(d)\n status = d[0]\n\n printStatus(status)\n if (checkStatus & status):\n return False\n return True", "def verify_avail_space(self, pool, project, share, size):\n self.verify_project(pool, project)\n avail = self.get_project_stats(pool, project)\n if avail < size:\n exception_msg = (_('Error creating '\n 'share: %(share)s on '\n 'pool: %(pool)s. '\n 'Not enough space.')\n % {'share': share,\n 'pool': pool})\n raise exception.ShareBackendException(msg=exception_msg)", "def test_rebuilt_server_ram(self):\n\n remote_instance = self.server_behaviors.get_remote_instance_client(\n self.server, self.servers_config)\n lower_limit = int(self.expected_ram) - (int(self.expected_ram) * .1)\n server_ram_size = int(remote_instance.get_allocated_ram())\n self.assertTrue((int(self.expected_ram) == server_ram_size or lower_limit <= server_ram_size))", "def has_memory(self, user_id, memory_date):\n raise NotImplementedError()", "def is_power_limited(self):\n status = self.get_status_response()\n return ((status[1] & 0x10) == 0x10)\n #end is_power_limited()", "def ComputeEAvailable(self):\r\n pass", "def check_sum_limits(memory_info):\n slices = ['mgmt.slice', 'storpool.slice', 'user.slice', 'system.slice', 'machine.slice']\n slices = [slc for slc in slices if slc in memory_info]\n limits_mb_sum = sum(memory_info[slc]['memory.limit_in_bytes'] / 1024**2 for slc in slices)\n total_mem_mb = utils.get_memtotal_kb() / 1024\n kernel_mem_mb = max(0, total_mem_mb - limits_mb_sum)\n errors = []\n warnings = []\n if limits_mb_sum >= total_mem_mb:\n errors.append('sum of {0} limits is {1}MB, while total memory is {2}MB'\n .format(', '.join(slices), limits_mb_sum, total_mem_mb))\n if kernel_mem_mb < 1024:\n warnings.append('memory left for kernel is {0}MB'.format(kernel_mem_mb))\n return errors, warnings", "def test_create_instance_with_oversubscribed_ram_fail(self):\n self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)\n self.rt.update_available_resource(self.context.elevated(), NODENAME)\n\n # get total memory as reported by virt driver:\n resources = self.compute.driver.get_available_resource(NODENAME)\n total_mem_mb = resources['memory_mb']\n\n oversub_limit_mb = total_mem_mb * 1.5\n instance_mb = int(total_mem_mb * 1.55)\n\n # build an instance, specifying an amount of memory that exceeds\n # both total_mem_mb and the oversubscribed limit:\n params = {\"flavor\": {\"memory_mb\": instance_mb, \"root_gb\": 128,\n \"ephemeral_gb\": 128}}\n instance = self._create_fake_instance_obj(params)\n\n filter_properties = {'limits': {'memory_mb': oversub_limit_mb}}\n\n self.compute.build_and_run_instance(self.context, instance,\n {}, {}, filter_properties, [],\n block_device_mapping=[])", "def is_resource_sufficient(self, drink):\n can_make = True\n for item in drink.ingredients:\n if drink.ingredients[item] > self.resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n can_make = False\n return can_make", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def check_disk_usage(disk):\n du = shutil.disk_usage(disk)\n free = du.free / du.total * 100\n return free > 20", "def _shrinkCheck(self):\n if self.size > self.INIT_CAPACITY and self.size / self.capacity <= 0.25:\n self._shrink()", "def is_resource_sufficient(self, drink):\n can_make = True\n for item in drink.ingredients:\n if drink.ingredients[item] > self.resources[item]:\n print(f\"Sorry there is not enough {item}.\")\n can_make = False\n return can_make", "def is_full(self) -> bool:\r\n return self.size == self.capacity", "def _is_legal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) >= servers_used_mem)", "def scale(self, _: Application) -> bool:\n return False", "def deviceMemory(self):\n return 1", "def oswmem_free_memory(self,min=0): \n result = self.df[self.df['free mmemory'] > min].all \n return result", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def check_system_config():\n if os.uname().sysname == 'Darwin':\n return\n\n assert int(run(\"cat /proc/sys/vm/max_map_count\", shell=True)) >= 262144, \\\n 'the \"vm.max_map_count\" kernel parameter is too low, check readme'\n\n check_resources()", "def _check_shrink(self):\n # As an example, if length is 1/4 of capacity and growth factor is 2,\n # then the capacity should shrink in half to keep length proportional\n # to capacity\n if self._length < int(self._capacity / (self._growth_factor ** 2)):\n self._shrink_arr()", "def assertUsable (self):\n if self.usableChecked:\n return self.usableError\n try:\n # check if parent encoder is runnable\n aviErr = AviH264.assertUsable ()\n if aviErr != None:\n self.usableChecked = True\n self.usableError = aviErr\n return self.usableError\n\n # check if mencoder is runnable\n result = subprocess.run ([_MP4H264Encoder._getMP4BoxRunnable ()], capture_output = True)\n if result.returncode != 0:\n if not 'MP4Box' in (str(result.stdout) + ' ' + str(result.stderr)):\n self.usableError = \"Can not execute MP4Box. Is inside PATH variable?\"\n else:\n self.usableError = None\n self.usableChecked = True\n return self.usableError\n except:\n self.usableError = \"Can not execute \" + _MP4H264Encoder._getMP4BoxRunnable () + \". Is inside PATH variable?\"\n self.usableChecked = True\n return self.usableError", "def _is_done_illegal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) < servers_used_mem)", "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def check_if_free_resources(free_mem, free_cpu, reqs):\n req_mem = float(''.join(filter(str.isdigit, reqs[\"mem\"])))\n logger.info(\"CHECK FOR RESOURCES\", extra={\"mem\": free_mem, \"cpu\": free_cpu, \"req_mem\": req_mem})\n if (free_mem/ONE_MB > req_mem and free_cpu > float(reqs[\"cpu\"])):\n logger.info(\"MEM AVAILABLE: \" + str(free_mem/ONE_MB))\n logger.info(\"CPU FREE: \" + str(free_cpu))\n return True\n else:\n logger.info(\"FAILED RESOURCE REQUIREMENTS\")\n return False", "def check_disk_free_space_reserved(self):\n if self.skip_disk_space_check:\n return True\n disk_partition_size = util.disk_partition_size(self.outfile_dir)\n free_disk_space = util.disk_partition_free(self.outfile_dir)\n free_space_factor = self.free_space_reserved_percent / 100\n free_space_reserved = disk_partition_size * free_space_factor\n if free_disk_space < free_space_reserved:\n raise OSCError(\n \"NOT_ENOUGH_SPACE\",\n {\n \"need\": util.readable_size(free_space_reserved),\n \"avail\": util.readable_size(free_disk_space),\n },\n )" ]
[ "0.7295557", "0.6686275", "0.6624224", "0.63968927", "0.62839794", "0.623324", "0.6183667", "0.61706424", "0.6126043", "0.6126043", "0.5933436", "0.5923849", "0.5857149", "0.58338654", "0.58319116", "0.5819135", "0.5796401", "0.5792657", "0.5691452", "0.5668905", "0.5658324", "0.5648655", "0.5622175", "0.5591848", "0.55826336", "0.55231285", "0.551165", "0.5509169", "0.5508916", "0.5487881", "0.54848975", "0.54839134", "0.5476376", "0.54743665", "0.5470177", "0.54619694", "0.5443484", "0.5442074", "0.5430285", "0.5424394", "0.54237056", "0.54231536", "0.54141057", "0.54069835", "0.5398594", "0.53959286", "0.53923744", "0.5385272", "0.5364695", "0.53645915", "0.5359923", "0.5357964", "0.5343937", "0.5340788", "0.5338066", "0.53370124", "0.53370124", "0.53370124", "0.5336294", "0.53303623", "0.53242993", "0.5323864", "0.5311684", "0.5308672", "0.530203", "0.52895963", "0.52875245", "0.5286268", "0.5286268", "0.52708864", "0.52599", "0.5252989", "0.5251941", "0.524039", "0.5239512", "0.52320373", "0.5215056", "0.5210491", "0.51944596", "0.518783", "0.5183382", "0.5180947", "0.5180285", "0.51769406", "0.517485", "0.5167144", "0.51632625", "0.51550347", "0.51548207", "0.5151574", "0.5150844", "0.51498276", "0.5142362", "0.5140473", "0.513468", "0.51308584", "0.51258206", "0.51214296", "0.51162463", "0.5103508" ]
0.60934323
10
Transform a MP3 song into WAV format, and then into Fourier series.
def time_to_frequency(song, temp_folder, output_folder, rate_limit=6000.0, overwrite=True, plot=True, image_folder=None, step=5.0): # Name of files song_name = os.path.splitext(song)[0] json_name = os.path.join(output_folder, song_name + '.json') wav_file = os.path.join(temp_folder, song_name + '.wav') if not os.path.isfile(json_name) or overwrite is True: # Fourier transformation try: if ram_prop_condition(prop=0.1): logger.error('Song %s is waiting until more memory is available', song_name) while ram_prop_condition(prop=0.2): pass # It consumes cpu, but we assure it doesn't go to sleep indefinitely frequencies, fourier_series = wav_to_fourier(wav_file=wav_file, rate_limit=rate_limit, step=step) # Save as JSON json_to_save = {song: {str(x): y for x, y in zip(frequencies, fourier_series)}} with open(json_name, 'w') as output: json.dump(json_to_save, output) # Plotting if plot is True: fourier_plot(freq=frequencies, features=fourier_series, folder=image_folder, filename=song_name) logger.debug('%s transformed', song_name) except MemoryError: logger.error('MemoryError: %s couldn\'t be Fourier transformed', song_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each of the mp3 files in each subdirectory of data/fma_small\n file_list = glob.glob('./../data/fma_small/*/*.mp3')\n\n # Get the number of files N and initialize a counter\n N = len(file_list)\n counter = 0\n\n # For each file/filepath, convert that file to wav format and save it to data/wavs/*/*.wav (so as a wave file)\n for filepath in file_list:\n\n # Every 100 file conversions, print a progress update\n if counter % 50 == 49 and show_progress:\n progress = str(round(100 * counter / N, 2))\n print('File conversion ' + progress + '% complete.')\n\n # Get the file name from the path and define a new path for the wav file\n file_name = filepath[24:-4]\n new_path = './../data/wavs/' + file_name + '.wav'\n\n # Call the subprocess using ffmpeg to convert the file to wav format (and supress all the output)\n subprocess.call(['ffmpeg', '-i', filepath, new_path], stdout=devnull)\n\n # Increment the counter\n counter += 1", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'):\n if encoder == 'mpg123':\n bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file]\n else:\n bash_command = ['ffmpeg', '-i', mp3_file, wav_file]\n subprocess.run(bash_command)", "def apply_fourier_transform(chunked_audio):\n pass", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def encodeMP3(self, wavf: str, dstf: str, cover: str, meta: TrackMeta) -> None:\n FNULL = open(os.devnull, 'w')\n subprocess.call(['lame', '-V2', wavf, dstf], stdout=FNULL, stderr=FNULL)\n FNULL.close()\n # tag MP3\n mm = TrackMeta(meta)\n mp3 = MP3(dstf, ID3=ID3)\n mp3[\"TIT2\"] = TIT2(encoding=3, text=mm.title())\n mp3[\"TPE1\"] = TPE1(encoding=3, text=mm.artist())\n mp3[\"TALB\"] = TALB(encoding=3, text=mm.album())\n mp3[\"TPE2\"] = TPE2(encoding=3, text=mm.albumartist())\n if mm.date():\n mp3[\"TDRC\"] = TDRC(encoding=3, text=mm.date())\n mp3[\"TRCK\"] = TRCK(encoding=3,\n text=mm.tracknumber() + \"/\" + mm.tracktotal())\n mp3[\"TPOS\"] = TPOS(encoding=3,\n text=mm.discnumber() + \"/\" + mm.disctotal())\n\n # composer\n if mm.composer():\n mp3[\"TCM\"] = TCM(encoding=3, text=mm.composer())\n\n # cover\n if cover:\n data = open(cover, 'rb').read()\n if cover.endswith('png'):\n mime = 'image/png'\n else:\n mime = 'image/jpeg'\n mp3.tags.add(APIC(encoding=3, mime=mime, type=3, desc=u'Cover', data=data))\n\n # save\n mp3.save()", "def to_audio(self, _in, _out, bitrate, file_format):\n\n # Default output parameter\n # If not current directory, append '/'\n if os.path.isdir(_out):\n _out = '' if _out == '.' else _out + '/'\n _out += self.get_name_from_path(_in,\n replace=True) + '.' + file_format\n _out = _out.replace('//', '/')\n self.out = _out\n\n # File format unchecked for single inputs\n if not check_is_video(_in):\n msg = \" is not a supported media type\"\n self.abort_conversion(\n self.get_name_from_path(_in) + msg)\n\n \"\"\"\n else:\n base_name = os.path.basename(_out)\n ext = os.path.splitext(base_name)[1]\n _out = _out.replace(ext, '.mp3')\n \"\"\"\n commands = ['ffmpeg', '-i', _in,\n '-vn', '-ar', '44100',\n '-ac', '2', '-ab',\n bitrate, _out]\n try:\n self.run_convert_commands(commands)\n except FileNotFoundError as er:\n res = require_ffmepg()\n\n if not res:\n self.abort_conversion(\"Dependecy not installed.\")", "def to_voice(item):\r\n item.seek(0)\r\n item = AudioSegment.from_file(item)\r\n m = io.BytesIO()\r\n m.name = \"voice.ogg\"\r\n item.split_to_mono()\r\n dur = len(item) / 1000\r\n item.export(m, format=\"ogg\", bitrate=\"64k\", codec=\"libopus\")\r\n m.seek(0)\r\n return m, dur", "def data_to_waves(self, data):\n raise NotImplementedError", "def mp3_to_wav(song_dir, snip_dir, bird_list_path='bird_list.txt'):\n if os.path.exists(snip_dir):\n shutil.rmtree(snip_dir)\n os.makedirs(snip_dir)\n with open(bird_list_path) as f:\n lines = f.readlines()\n bird_list = [line.rstrip('\\n') for line in lines]\n # Build the bird-labeled subdirectories in 'snip_dir'.\n _make_bird_dirs(snip_dir, birds_list)\n # Populate the subdirectory with recordings converted from .mp3 to .wav.\n for f in os.listdir(song_dir):\n bird = extract_bird_name(f)\n if bird in birds_list:\n index = birds_list.index(bird)\n wav_filename = os.path.splitext(f)[0].replace(' ', '_') + '.wav'\n orig = os.path.join(mp3_dir, f)\n new = os.path.join(snip_dir, str(index), wav_filename)\n # MP3-to-WAV conversion requires the ffmpeg package.\n call([\"ffmpeg\", \"-i\", orig, new])", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def _to_wav(self):\n self._status = 0\n fname = fm.file2wav(self.get_filename()) \n if fname != self.get_filename(): # can change the name\n self._set_filename(fname) # in case of wave transcoding\n self._status = 1", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def write(f, sr, x, normalized=False):\n channels = 2 if (x.ndim == 2 and x.shape[1] == 2) else 1\n if normalized: # normalized array - each item should be a float in [-1, 1)\n y = np.int16(x * 2 ** 15)\n else:\n y = np.int16(x)\n song = pydub.AudioSegment(y.tobytes(), frame_rate=sr, sample_width=2, channels=channels)\n song.export(f, format=\"mp3\", bitrate=\"64k\")", "def export_wav(\n filename_wav: Path,\n tradb: vae.io.TraDatabase,\n channel: int,\n time_start: Optional[float] = None,\n time_stop: Optional[float] = None,\n decimation_factor: int = 1,\n):\n y, fs = tradb.read_continuous_wave(\n channel=channel,\n time_start=time_start,\n time_stop=time_stop,\n time_axis=False,\n show_progress=False,\n raw=True, # read as ADC values (int16)\n )\n\n if decimation_factor > 1:\n y = signal.decimate(y, decimation_factor).astype(np.int16)\n fs //= decimation_factor\n\n wavfile.write(filename_wav, fs, y)", "def convert_to_mp3(self,path, filename):\n\n codec = \"libmp3lame\"\n mp3_filename = filename + \".mp3\"\n\n command = [self.FFMPEG_BIN,\n \"-n\",\n \"-i\", path,\n \"-acodec\", codec,\n \"-ab\", \"128k\",\n mp3_filename\n ]\n\n return command", "def wav_wav(orig, dest, **_kwargs):\n\n # options = kwargs.get(\"tree\").cmd_options.get(\"options\", [])\n\n # first demux it to 16 bit 48khz\n dest_list = []\n for index, orig_elem in enumerate(tools.get_iter(orig)):\n tmp_dest = os.path.join(\n os.path.dirname(dest),\n \"{0}_{1}\".format(index, os.path.basename(dest)))\n cmd = \"ffmpeg -i {orig} -acodec pcm_s16le -ar 48000 {dest}\".format(\n dest=tmp_dest,\n orig=orig_elem)\n logger.debug(cmd)\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n continue\n dest_list.append(tmp_dest)\n\n if len(dest_list) > 1:\n cmd = \"sox {orig} {dest}\".format(\n orig=\" \".join(orig),\n dest=dest)\n logger.debug(cmd)\n try:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n else:\n os.rename(dest_list[0], dest)\n return dest", "def convert_wav(src_wav, dst_wav, subtype='PCM_16'):\n assert os.path.exists(src_wav), \"{} not exists!\".format(src_wav)\n data, sr = soundfile.read(src_wav)\n soundfile.write(dst_wav, data, sr, subtype=subtype)", "def forward(self, audio):\n feature_extractor = self.feature_extractor\n wave_gan = self.wave_gan\n pqmf = self.pqmf\n use_noise_input = self.use_noise_input\n config = self.config\n pad_fn = self.pad_fn\n\n # Added for processing single audio file as in deepspeech armory [Sonal 29Oct20]\n if audio.ndim == 1:\n num_samples = audio.shape[0]\n mel_spectrogram = feature_extractor.transform(audio)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=mel_spectrogram.device,\n )\n inputs += (noise,)\n\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n return reconstructed_audio\n\n else:\n reconstructions = []\n num_samples = audio.shape[1]\n for idx in range(audio.shape[0]):\n recording = audio[idx, :]\n mel_spectrogram = feature_extractor.transform(recording)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=recording.device,\n )\n inputs += (noise,)\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:, :num_samples]\n reconstructions.append(reconstructed_audio)\n return torch.stack(reconstructions)", "def chunkify(song):\n assert len(song) >= CHUNK_SIZE * 2\n for i in xrange(0, len(song) - CHUNK_SIZE, CHUNK_SIZE // 2):\n yield np.fft.rfft(song[i: i + CHUNK_SIZE])", "def decode_audio(in_file, out_file):\r\n # construct the decoder\r\n autoencoder = keras.models.load_model(\"audio_autoencoder.model\")\r\n in_layer = keras.layers.Input(shape=(13,))\r\n decode = autoencoder.layers[-13](in_layer)\r\n decode = autoencoder.layers[-12](decode)\r\n decode = autoencoder.layers[-11](decode)\r\n decode = autoencoder.layers[-10](decode)\r\n decode = autoencoder.layers[-9](decode)\r\n decode = autoencoder.layers[-8](decode)\r\n decode = autoencoder.layers[-7](decode)\r\n decode = autoencoder.layers[-6](decode)\r\n decode = autoencoder.layers[-5](decode)\r\n decode = autoencoder.layers[-4](decode)\r\n decode = autoencoder.layers[-3](decode)\r\n decode = autoencoder.layers[-2](decode)\r\n decode = autoencoder.layers[-1](decode)\r\n decoder = keras.models.Model(in_layer, decode)\r\n\r\n # Load the data\r\n ins = np.load(in_file + \".npz\")\r\n encoded = ins['data']\r\n samp_rate = ins['rate']\r\n channels = ins['channels']\r\n\r\n # Run the decoder\r\n outputs = decoder.predict(encoded)\r\n\r\n # reform output data to the original shape and range\r\n out = outputs.reshape(outputs.shape[0] * outputs.shape[1])\r\n out = ((out * 2.0) - 1.0) * float(pow(2, 15))\r\n out = np.rint(out).astype(np.int16)\r\n\r\n if channels == 2:\r\n out = out.reshape(len(out)//2, 2)\r\n out1 = out[:, 0]\r\n out2 = out[:, 1]\r\n\r\n # perform stft on output data to be in frequency domain\r\n frequencies, times, spectrogram1 = signal.stft(out1, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n frequencies, times, spectrogram2 = signal.stft(out2, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n # eliminate values with frequencies higher than 1680 HZ to decrease noise\r\n spectrogram1[40:, :] = 0\r\n spectrogram2[40:, :] = 0\r\n # perform inverse stft to get back data in time domain\r\n _, out1 = signal.istft(spectrogram1, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n _, out2 = signal.istft(spectrogram2, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n out1 = np.rint(out1).astype(np.int16)\r\n out2 = np.rint(out2).astype(np.int16)\r\n out1 = out1.reshape(out1.shape[0], 1)\r\n out2 = out2.reshape(out2.shape[0], 1)\r\n out = np.concatenate((out1, out2), axis=1)\r\n elif channels == 1:\r\n # perform stft on output data to be in frequency domain\r\n frequencies, times, spectrogram = signal.stft(out, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n # eliminate values with frequencies higher than 1680 HZ to decrease noise\r\n spectrogram[40:, :] = 0\r\n # perform inverse stft to get back data in time domain\r\n _, out = signal.istft(spectrogram, samp_rate, window='hann', nperseg=1024, noverlap=512)\r\n out = np.rint(out).astype(np.int16)\r\n\r\n # build the wav file\r\n wavfile.write(out_file+'.wav', samp_rate, out)", "def mels_to_audio(\n self, mels: np.ndarray, settings: typing.Optional[SettingsType] = None,\n ) -> np.ndarray:\n pass", "def convert2mel(audio,base_path,fs, n_fft,fmax,n_mels,hop_length_samples, window_lenght,type_training):\n\n path = os.path.join(base_path, audio)\n if type_training != \"train\":\n if os.path.isfile(os.path.join(base_path,\"processed_wavs_train\",audio)):\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_train\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_test\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data, _ = librosa.core.load(path, sr=fs, res_type=\"kaiser_best\")\n data = normalize_amplitude(data)\n\n powSpectrum = np.abs(stft(data+ 0.00001,n_fft,hop_length = hop_length_samples, win_length = window_lenght, window = windowing(window_lenght, sym=False), center=True, pad_mode='reflect'))**2\n\n mels = melspectrogram(y= None,n_fft=n_fft ,sr=fs ,S= powSpectrum, hop_length= hop_length_samples ,n_mels=n_mels,fmax=fmax , fmin = 0.0).T\n mels = librosa.core.power_to_db(mels, ref=np.min(mels))\n mels = mels / np.max(mels)\n\n return mels.T", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def check_wav(song, source_folder, temp_folder, encoder='mpg123'):\n # Name of files\n song_name, extension = os.path.splitext(song)\n mp3_file = os.path.join(source_folder, song)\n if '.wav' != extension:\n wav_file = os.path.join(temp_folder, song_name + '.wav')\n try:\n if not os.path.isfile(wav_file):\n mp3_to_wav(\n mp3_file=mp3_file,\n wav_file=wav_file,\n encoder=encoder)\n else:\n pass\n except MemoryError:\n logger.error('MemoryError: %s MP3 couldn\\'t be transformed into WAV', song_name)\n else: # Already a wav file\n copyfile(mp3_file, os.path.join(temp_folder, song_name))", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def toFourier(self):\n\n\t\tif self.space==\"fourier\":\n\t\t\tpass \n\t\telse:\n\t\t\tself.data = fftengine.rfft2(self.data)\n\t\t\tself.space=\"fourier\"", "def encode_audio(in_file, out_file):\r\n # construct the encoder\r\n autoencoder = keras.models.load_model(\"audio_autoencoder.model\")\r\n in_layer = keras.layers.Input(shape=(416, 1))\r\n encode = autoencoder.layers[1](in_layer)\r\n encode = autoencoder.layers[2](encode)\r\n encode = autoencoder.layers[3](encode)\r\n encode = autoencoder.layers[4](encode)\r\n encode = autoencoder.layers[5](encode)\r\n encode = autoencoder.layers[6](encode)\r\n encode = autoencoder.layers[7](encode)\r\n encode = autoencoder.layers[8](encode)\r\n encode = autoencoder.layers[9](encode)\r\n encode = autoencoder.layers[10](encode)\r\n encode = autoencoder.layers[11](encode)\r\n encode = autoencoder.layers[12](encode)\r\n encoder = keras.models.Model(in_layer, encode)\r\n\r\n # Read the file\r\n samp_rate, data = wavfile.read(in_file)\r\n # check if the file is mono or stereo\r\n if len(data.shape) == 2:\r\n data = np.concatenate(data)\r\n chans = 2\r\n else:\r\n chans = 1\r\n\r\n # Rescale integer samples over range [-32768,32767] to floats over range [0.0,1.0]\r\n data = data.astype('float32') / float(pow(2, 15))\r\n data += 1.0\r\n data = data / 2.0\r\n\r\n # Pad the samples with zeroes, if needed, to make the last encoding frame full\r\n padded = np.pad(data, (0, 416 - (len(data) % 416)), 'constant')\r\n\r\n # Construct input layer\r\n inputs = padded.reshape(len(padded) // 416, 416, 1)\r\n\r\n # Encode the data\r\n encoded = encoder.predict(inputs)\r\n\r\n # Save the encoded data, as well as the important parameters\r\n np.savez_compressed(out_file, data=encoded, rate=samp_rate, Type=1, channels=chans)", "def wav_to_fourier(wav_file,\n rate_limit=6000.0,\n step=1.0):\n rate, aud_data = read(wav_file)\n # Should be mono\n if len(aud_data) != len(aud_data.ravel()):\n aud_data = np.mean(aud_data, axis=1)\n\n # Zero padding\n len_data = aud_data.shape[0]\n channel_1 = np.zeros(2 ** (int(np.ceil(np.log2(len_data)))))\n channel_1[0:len_data] = aud_data\n\n # Fourier analysis\n fourier = np.abs(np.fft.fft(channel_1))\n freq = np.linspace(0, rate, fourier.shape[0])\n\n freq, fourier = limit_by_freq(freq,\n fourier,\n upper_limit=rate_limit)\n freq, fourier = group_by_freq(freq,\n fourier,\n step=step)\n # Max frequency should be 100.0\n a = np.max(np.abs(fourier)) / 100.0\n fourier = fourier / a\n\n return freq, fourier", "def write_wav(fname, samps, sampling_rate=16000, normalize=True):\n\t# for multi-channel, accept ndarray [Nsamples, Nchannels]\n\tif samps.ndim != 1 and samps.shape[0] < samps.shape[1]:\n\t\tsamps = np.transpose(samps)\n\t\tsamps = np.squeeze(samps)\n\t# same as MATLAB and kaldi\n\tif normalize:\n\t\tsamps = samps * MAX_INT16\n\t\tsamps = samps.astype(np.int16)\n\tfdir = os.path.dirname(fname)\n\tif fdir and not os.path.exists(fdir):\n\t\tos.makedirs(fdir)\n\t# NOTE: librosa 0.6.0 seems could not write non-float narray\n\t# so use scipy.io.wavfile instead\n\twavfile.write(fname, sampling_rate, samps)", "def convert_to_wav (filename, name, origpath, wavpath, mono):\n print(\"Converting {0} to .wav...\".format(filename))\n if not re.match(r\".*_\\d+$\",name):\n # If filenames do include video titles\n name = name.rsplit('_',1)[0]\n\n channel, vid_num = name.rsplit('_', 1)\n channel = re.sub(r'[^A-Za-z1-9]', '', channel)\n newname = '_'.join([channel, vid_num])\n\n exportname = newname + \".wav\"\n filepath = path.join(origpath, filename)\n\n if not path.exists(wavpath):\n makedirs(wavpath)\n exportPath = path.join(wavpath, exportname)\n sound = AudioSegment.from_file(filepath,\"mp4\")\n if mono == True:\n sound = sound.set_channels(1)\n sound.export(exportPath, format=\"wav\")", "def forward(self, audio, feat_kinds=['sp','mcc','f0','ap','en']):\n device = audio.device\n audio = audio.detach().cpu().numpy()\n feat = dict()\n for feat_kind in feat_kinds:\n feat[feat_kind] = list()\n\n for x in audio:\n # Preprocess\n x = x * MAX_WAV_VALUE\n x = self.low_cut_filter(x, cutoff=self.cutoff_freq)\n # Extract f0\n f0, time_axis = pyworld.harvest(x, self.fs, f0_floor=self.minf0, f0_ceil=self.maxf0, frame_period=self.shiftms)\n\n # Extract sp \n sp = pyworld.cheaptrick(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n if 'sp' in feat_kinds:\n feat['sp'].append(torch.from_numpy(sp).float().t())\n\n # Extract ap\n if 'ap' in feat_kinds:\n ap = pyworld.d4c(x, f0, time_axis, self.fs, fft_size=self.fft_size)\n feat['ap'].append(torch.from_numpy(ap).float().t())\n\n # Extract mcc\n if 'mcc' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n feat['mcc'].append(torch.from_numpy(mcc).float().t())\n\n # Extract energy\n if 'en' in feat_kinds:\n mcc = pysptk.sp2mc(sp, self.mcc_dim, self.mcc_alpha)\n en = pysptk.mc2e(mcc, alpha=self.mcc_alpha, irlen=256)\n # en = np.clip(en, 1e-10, None)\n feat['en'].append(torch.from_numpy(en).float().view(-1)) \n\n # Fix f0\n if 'f0' in feat_kinds:\n f0[f0 < 0] = 0\n feat['f0'].append(torch.from_numpy(f0).float().view(-1))\n\n for key, val_list in feat.items():\n feat[key] = torch.cat([val.unsqueeze(0) for val in val_list],dim=0).to(device)\n\n return feat", "def from_wav(cls, fps):\n fpi = iter(fps)\n fs, data = wavfile.read(next(fpi))\n hlist = [data] + [wavfile.read(fp)[1] for fp in fpi]\n\n h = np.array(hlist)\n if data.dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (np.iinfo(data.dtype).min, np.iinfo(data.dtype).max)\n lim_new = (-1.0, 1.0)\n h = _rescale(h, lim_orig, lim_new).astype(np.double)\n\n return cls.from_time(fs, h)", "def waves_to_data(self, waves):\n raise NotImplementedError", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n return sig", "def main(directory, wavelength=16000, replace=True):\n\n if os.path.isdir(directory):\n # get the directory of mp3 files\n mpthree_files = find_directory__files(directory, 'mp3')\n\n # check whether there are mp3 files\n if len(mpthree_files) > 0:\n # converts all the mp3 files to wav files\n map(lambda x: convert_mp3_to_wav(x, replace=replace), mpthree_files.values())\n\n # now get the wav files after conversion(if any)\n wav_files = find_directory__files(directory, 'wav')\n\n # convert\n map(lambda x: convert_wavelength_file(x, wavelength=wavelength, replace=replace), wav_files.values())\n elif os.path.isfile(directory):\n\n # check if it's a wav\n filetype = find_filetype(directory)\n if filetype != 'wav':\n if filetype == 'mp3':\n convert_mp3_to_wav(directory, replace=replace)\n # get the new file name\n directory = directory.replace('mp3', 'wav')\n else:\n raise ValueError(\"Not a supported filetype at this moment\")\n\n # when filetype == wav or after converting from mp3 to wav\n convert_wavelength_file(directory, wavelength, replace=replace)\n else:\n raise ValueError(\"input is wrong\")", "def wav2mfcc(file_path, max_len=44, n_mfcc=20):", "def decode_wav(raw_data):\n return _kaldi_module.decode_wav(raw_data)", "def convert_one_song(audiofile,output,mbconnect=None,verbose=0,DESTROYAUDIO=False):\n # inputs + sanity checks\n if not os.path.exists(audiofile):\n print 'ERROR: song file does not exist:',songfile\n return 0\n if os.path.exists(output):\n print 'ERROR: hdf5 output file already exist:',output,', delete or choose new path'\n return 0\n # get EN track / song / artist for that song\n if verbose>0: print 'get analysis for file:',audiofile\n track = trackEN.track_from_filename(audiofile)\n song_id = track.song_id\n song = songEN.Song(song_id)\n if verbose>0: print 'found song:',song.title,'(',song_id,')'\n artist_id = song.artist_id\n artist = artistEN.Artist(artist_id)\n if verbose>0: print 'found artist:',artist.name,'(',artist_id,')'\n # hack to fill missing values\n try:\n track.foreign_id\n except AttributeError:\n track.__setattr__('foreign_id','')\n if verbose>0: print 'no track foreign_id found'\n try:\n track.foreign_release_id\n except AttributeError:\n track.__setattr__('foreign_release_id','')\n if verbose>0: print 'no track foreign_release_id found'\n # create HDF5 file\n if verbose>0: print 'create HDF5 file:',output\n HDF5.create_song_file(output,force=False)\n # fill hdf5 file from track\n if verbose>0:\n if mbconnect is None:\n print 'fill HDF5 file with info from track/song/artist'\n else:\n print 'fill HDF5 file with info from track/song/artist/musicbrainz'\n h5 = HDF5.open_h5_file_append(output)\n HDF5.fill_hdf5_from_artist(h5,artist)\n HDF5.fill_hdf5_from_song(h5,song)\n HDF5.fill_hdf5_from_track(h5,track)\n if not mbconnect is None:\n HDF5.fill_hdf5_from_musicbrainz(h5,mbconnect)\n h5.close()\n # done\n if DESTROYAUDIO:\n if verbose>0: print 'We remove audio file:',audiofile\n os.remove(audiofile)\n return 1", "def audio_resample(self, data):\n\n data = np.asarray(data)\n if data.ndim <= 1:\n logging.log_first_n(logging.INFO,\n 'Converting %s sound from shape %s to 2-D' %\n (self._name, data.shape), 5)\n data = np.reshape(data, (-1, 1))\n if data.shape[1] > data.shape[0]:\n logging.log_first_n(logging.INFO,\n 'Transposing %s sound from shape %s' %\n (self._name, data.shape), 5)\n data = np.transpose(data)\n\n # Get half window size in seconds.\n half_window_size = 0.5 * self._window / self._fs_out\n\n # Concatenate and update buffer.\n if self._buff is not None:\n data = np.concatenate((self._buff, data), axis=0)\n tau = self._buff.shape[0]\n else:\n tau = 0\n self._buff = data[-int(self._fs_in * half_window_size):, :]\n\n # Get i/o data dimensions.\n frames_in = data.shape[0]\n frames_out = int(round((frames_in - tau) / self._fs_in * self._fs_out))\n\n # Resample data via moving average.\n data_out = np.zeros((frames_out, data.shape[1]))\n if self._fs_out < self._fs_in or self._window > 1:\n for i in range(frames_out):\n t = float(i) / self._fs_out # center of window in seconds\n t1 = int(max(0, round(self._fs_in * (t - half_window_size)) + tau))\n t2 = int(min(frames_in,\n round(self._fs_in * (t + half_window_size)) + tau))\n data_out[i, :] = np.mean(data[t1:t2, :], axis=0)\n\n else:\n\n data_out = data\n\n return data_out", "def spectrogram2wav(mag):\n # Transpose\n mag = mag.T\n\n # De-noramlize\n mag = (np.clip(mag, 0, 1) * Config.max_db) - Config.max_db + Config.ref_db\n\n # to amplitude\n mag = np.power(10.0, mag * 0.05)\n\n # wav reconstruction\n wav = griffin_lim(mag**Config.power)\n\n # De-preemphasis\n wav = scipy.signal.lfilter([1], [1, -Config.preemphasis], wav)\n\n # Remove leading and trailing silence\n wav, _ = librosa.effects.trim(wav)\n\n return wav.astype(np.float32)", "def _save_wav(buff, data, rate) -> None:\n # Code inspired from `IPython.display.Audio`\n data = np.array(data, dtype=float)\n\n bit_depth = 16\n max_sample_value = int(2**(bit_depth - 1)) - 1\n\n num_channels = data.shape[1] if len(data.shape) > 1 else 1\n scaled = np.int16(data / np.max(np.abs(data)) * max_sample_value)\n # The WAVE spec expects little-endian integers of \"sampwidth\" bytes each.\n # Numpy's `astype` accepts array-protocol type strings, so we specify:\n # - '<' to indicate little endian\n # - 'i' to specify signed integer\n # - the number of bytes used to represent each integer\n # See: https://numpy.org/doc/stable/reference/arrays.dtypes.html\n encoded_wav = scaled.astype(f'<i{bit_depth // 8}', copy=False).tobytes()\n\n with wave.open(buff, mode='wb') as waveobj:\n waveobj.setnchannels(num_channels)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(bit_depth // 8)\n waveobj.setcomptype('NONE', 'NONE')\n waveobj.writeframes(encoded_wav)", "def save_mp3(ndarray, sr, feature_name, out_path, x, y, new_labels, mp3_filename=None):\n import soundfile as sf\n\n def _save_mp3(source_path, out_path):\n cmd = [\n 'lame',\n '--preset',\n 'insane',\n str(source_path),\n str(out_path)\n ]\n errno = subprocess.call(cmd)\n if errno:\n print('{} encoding failed with code'.format(source_path), end=' ')\n print(errno)\n print('skipping...')\n return errno\n os.remove(source_path)\n return 0\n\n # this is kind-of standard\n if mp3_filename is None:\n mp3_filename = FeatureExtractor.get_file_name(x, feature_name, 'mp3')\n wav_filename = mp3_filename.replace('mp3', 'wav')\n sf.write(str(out_path / wav_filename), ndarray, sr) # write wav file\n errno = _save_mp3(out_path / wav_filename,\n out_path / mp3_filename) # load wav, encode as mp3 and remove wav file\n if errno:\n # if any error, then keep wav\n filename = wav_filename\n else:\n # non-error clause, then it was successfully exported to mp3\n filename = mp3_filename\n if new_labels is not None:\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def write_wav(filename, data, rate = 44100):\r\n \r\n # Compress the data (the input format is likely to be float64)\r\n # Make sure that the format is readable by Librosa\r\n maxv = np.iinfo(np.int16).max\r\n lb_write_wav(filename, (data * maxv).astype(np.int16), rate) \r\n \r\n return(None)", "def convert_to_wav(txt_file, sph_path, target_dir):\n wav_dir = os.path.join(target_dir, 'wav/')\n txt_dir = os.path.join(target_dir, 'txt/')\n os.makedirs(wav_dir, exist_ok=True)\n os.makedirs(txt_dir, exist_ok=True)\n path_to_data = os.path.dirname(txt_file)\n\n def process(x):\n file_path = x[\"audio_file\"]\n text = x[\"transcription\"]\n start_time = x[\"start_time\"]\n duration = x[\"end_time\"] - start_time\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n file_name = str(start_time) + \"_\" + str(duration) + file_name\n text = text.strip().upper()\n with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:\n f.write(text)\n cmd = \"sox -v 0.6 -t wav {} -r {} -b 16 -c 1 -t wav {} trim {} {}\".format(\n os.path.join(path_to_data, file_path),\n args.sample_rate,\n os.path.join(wav_dir, file_name + \".wav\"),\n start_time,\n duration)\n subprocess.call([cmd], shell=True)\n print('Converting wav to wav for {}.'.format(txt_file))\n # generate processed data\n data = read_transcription_file(txt_file, sph_path)\n with ThreadPool(10) as pool:\n pool.map(process, data)", "def compute_chunk_features(mp3_file):\n # On convertit le fichier mp3 en un fichier wav mono, 1avec un sample rate de 10000Hertz: on utilise\n # On utilise l'application sox \"c:/Program Files (x86)/sox-14.4.0/sox\"\n\n sox_command = \"./sox-14.4.0/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def _create_wave_file(self):\n is_wave_open = False\n try:\n wv = wave.open(self.audio_file_name, mode='wb')\n is_wave_open = True\n wv.setparams((1, # 1 channel (mono)\n 2, # 2 bytes per sample * 1 channel\n self.sample_rate,\n 0, # Initial number of samples.\n 'NONE',\n 'not compressed'))\n wv.writeframes(self.sample_buffer)\n except:\n print('Error creating audio file')\n if is_wave_open:\n wv.close()", "def reconstruct_waveform(self, mel, n_iter=32):\n amp_mel = self._denormalize(mel)\n S = librosa.feature.inverse.mel_to_stft(\n amp_mel,\n power=1,\n sr=self.config['sampling_rate'],\n n_fft=self.config['n_fft'],\n fmin=self.config['f_min'],\n fmax=self.config['f_max'])\n wav = librosa.core.griffinlim(\n S,\n n_iter=n_iter,\n hop_length=self.config['hop_length'],\n win_length=self.config['win_length'])\n return wav", "def export_wav(self, folder, name_fmt=\"{:02d}.wav\", dtype=np.int16):\n data = np.atleast_2d(self.in_time)\n\n assert data.ndim == 2\n assert np.all(np.abs(data) <= 1.0)\n\n # convert and scale to new output datatype\n if dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (-1.0, 1.0)\n lim_new = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = _rescale(data, lim_orig, lim_new).astype(dtype)\n elif dtype != np.float32:\n raise TypeError(f\"dtype {dtype} is not supported by scipy.wavfile.write.\")\n\n path = Path(folder)\n if not path.is_dir():\n path.mkdir(parents=True, exist_ok=False)\n\n for i in range(data.shape[0]):\n wavfile.write(path / name_fmt.format(i + 1), self.fs, data[i])", "def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):\n length = length*divide\n #fs = sample rate, sound = multichannel sound signal\n try:\n fs1, sound = wavfile.read(filename)\n except ValueError:\n print(str(filename) + ' failed to process')\n return 'failed'\n if fs1 != fs_in:\n raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)\n sig1 = sound[:0] #left channel\n pre_emphasis = 0.97\n sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])\n\n \n fs2, sig2 = downsample(sig1,fs1,q)\n N2 = len(sig2)\n sig3 = sig2[N2//2-length:N2//2+length]\n #print(len(sig3))\n\n FFT = abs(scipy.fft(sig3))\n FFT_side = FFT[range(len(FFT)//2)]\n #freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n #plt.plot(freqs,FFT)\n if len(FFT_side) != length:\n print('ERROR MESSAGE DETAILS')\n print('filename: ' + filename)\n print('length = ' + str(length))\n print('fs_in = ' + str(fs_in))\n print('q = ' + str(q))\n print('divide = ' + str(divide))\n total_time = len(sig1)/fs1\n print('total_time = ' + str(total_time))\n print('Please check: length < total_time*fs//(2*q)')\n print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))\n raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))\n \n \n FFT_log = []\n # normalize FFT\n for value in FFT_side:\n value = np.log(value)\n FFT_log.append(value)\n max_val = getMax(FFT_log)[1]\n FFT_norm = []\n for value in FFT_log:\n FFT_norm.append(value/max_val)\n \n \n FFT_side = np.array(FFT_norm)\n FFT_divided = FFT_side[range(length//divide)]\n #plot = True\n if plot == True:\n freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n freqs_divided = np.array(freqs[range(len(FFT_divided))])\n plt.plot(freqs_divided,FFT_divided) # plotting the complete fft spectrum\n plt.show()\n \n return FFT_divided", "def compute_chunk_features(mp3_file):\n # Extract MP3 file to a mono, 10kHz WAV file\n sox_command = \"/usr/local/bin/sox\"\n out_file = 'temp.wav'\n #cmd = sox_command % (out_file, mp3_file)\n temp2 = subprocess.call([sox_command, mp3_file,'-r 10000','-c 1',out_file])\n # Read in chunks of data from WAV file\n wav_data1, wav_data2 = read_wav(out_file)\n # We'll cover how the features are computed in the next section!\n return np.array(features(wav_data1)), np.array(features(wav_data2))", "def mic_audio(dur):\n\n audio,b = microphone.record_audio(dur)\n audio = np.hstack([np.frombuffer(i,np.int16) for i in audio])\n return audio", "def analyze_mp3(mp3filespec):\n \n # Make a temporary working directory for storing the wav file\n # that soundstretch should analyze\n wavfilespec = tempfile.NamedTemporaryFile(suffix='.wav') \n \n # Use lame to make a wav representation of the mp3 file to be analyzed\n wav_command = 'sox %s %s' % (mp3filespec, wavfilespec.name)\n subprocess.call([wav_command], shell=True, stderr=open(os.devnull, 'w'))\n \n # Call soundstretch to analyze the wav file\n bpm_command = 'soundstretch %s -bpm' % wavfilespec.name\n p = subprocess.Popen([bpm_command], shell=True,stdout=subprocess.PIPE)\n output = p.communicate()[0]\n \n # Delete temporary working directory and its contents\n #shutil.rmtree(workingdir)\n\n bpm_suggestion = _get_bpm_from_soundstretch(output)\n\n return fit_bpm_in_window(bpm_suggestion)", "def towave(filename, rate, data):\n if hasattr(filename, 'write'):\n fid = filename\n else:\n fid = open(filename, 'wb')\n\n try:\n dkind = data.dtype.kind\n if not (dkind == 'i' or dkind == 'f' or (dkind == 'u' and data.dtype.itemsize == 1)):\n raise ValueError(\"Unsupported data type '%s'\" % data.dtype)\n\n fid.write(b'RIFF')\n fid.write(b'\\x00\\x00\\x00\\x00')\n fid.write(b'WAVE')\n # fmt chunk\n fid.write(b'fmt ')\n if dkind == 'f':\n comp = 3\n else:\n comp = 1\n if data.ndim == 1:\n noc = 1\n else:\n noc = data.shape[1]\n bits = data.dtype.itemsize * 8\n sbytes = rate * (bits // 8) * noc\n ba = noc * (bits // 8)\n fid.write(struct.pack('<ihHIIHH', 16, comp, noc, rate, sbytes, ba, bits))\n # data chunk\n fid.write(b'data')\n fid.write(struct.pack('<i', data.nbytes))\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\n data = data.byteswap()\n fid.write(data.ravel().view('b').data)\n\n # Determine file size and place it in correct\n # position at start of the file.\n size = fid.tell()\n fid.seek(4)\n fid.write(struct.pack('<i', size - 8))\n\n finally:\n if not hasattr(filename, 'write'):\n fid.close()\n else:\n fid.seek(0)", "def create_audio_file():\n # Get the response from boto3\n raw_audio = generate_audio()\n # pull the Audiostream object from the response from boto3\n raw_audio = raw_audio[\"AudioStream\"]\n # create output location\n # process the whole block\n with closing(raw_audio) as audio:\n with open(\"output_audio.mp3\", \"wb\") as file:\n file.write(raw_audio.read())", "def float2wav(rawData, wavFile, bit=16, samplingRate = 16000):\n rawData = rawData * np.power(2.0, bit-1)\n rawData[rawData >= np.power(2.0, bit-1)] = np.power(2.0, bit-1)-1\n rawData[rawData < -1*np.power(2.0, bit-1)] = -1*np.power(2.0, bit-1)\n \n # write as signed 16bit PCM\n if bit == 16:\n rawData = np.asarray(rawData, dtype=np.int16)\n elif bit == 32:\n rawData = np.asarray(rawData, dtype=np.int32)\n else:\n print(\"Only be able to save wav in int16 and int32 type\")\n print(\"Save to int16\")\n rawData = np.asarray(rawData, dtype=np.int16)\n scipy.io.wavfile.write(wavFile, samplingRate, rawData)\n return", "def make_waves(wave_array, filename: str, num_cycle=1):\n sampleRate = 44100.0 # hertz\n duration = 1.0 # seconds\n frequency = 440.0 # hertz\n obj = wave.open(filename, 'w')\n obj.setnchannels(1) # mono\n obj.setsampwidth(2)\n obj.setframerate(sampleRate)\n waves = list(wave_array)\n for w in range(num_cycle):\n for i in waves:\n value = i\n data = struct.pack('<h', int(value))\n obj.writeframesraw(data)\n obj.close()", "def spectrogram_to_audio(self, spectrogram, phase):\n\n return self.data_set.istft(spectrogram * phase)", "def webm_to_wav(webm_file: str):\n wav_file = webm_file.replace(\".webm\", \".wav\")\n wav = AudioSegment.from_file(webm_file)\n wav.export(wav_file, format=\"wav\")\n return wav_file", "def enregistre_audio(\n audio: AudioSegment, \n chemin:str = 'animalese.wav',\n format:str ='wav'\n ) -> AudioSegment:\n\n which = pydub.utils.which\n\n if which(\"avconv\"):\n app = \"avconv\"\n elif which(\"ffmpeg\"):\n app = \"ffmpeg\"\n elif format not in {'raw', 'wav'}:\n raise FileNotFoundError(\"ffmpeg/avconv introuvable.\") \n\n return audio.export(chemin, format=format)", "def play_wav_on_index(audio_data, stream_object):\n\n stream_object.write(audio_data)", "def save_to_file(\n sources,\n codec='wav', audio_adapter=ffmpeg.FFMPEGProcessAudioAdapter(),\n bitrate='128k', synchronous=True):\n\n # filename = \"chengdu.mp3\"\n pool = Pool()\n tasks = []\n for instrument, data in sources.items():\n path = \"./out/\"+instrument + \".\" + codec\n\n if pool:\n task = pool.apply_async(audio_adapter.save, (\n path,\n data,\n 44100,\n codec,\n bitrate))\n tasks.append(task)\n else:\n audio_adapter.save(path, data, 44100, codec, bitrate)\n if synchronous and pool:\n while len(tasks) > 0:\n task = tasks.pop()\n task.get()\n task.wait(timeout=200)", "def path_to_audio(path):\n print(path)\n audio, sr = librosa.load(path, sr=SAMPLING_RATE)\n x = audio.shape[0]\n audio = np.reshape(audio, (x, 1))\n audio = mt.array_to_tensor(audio)\n print(audio.shape)\n #audio = tf.io.read_file(path)\n #audio, _ = tf.audio.decode_wav(audio, 1, SAMPLING_RATE)\n return audio", "def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "def write_wave(data, samp_rate, file):\n if data.dtype != np.float16:\n assert(data.dtype in [np.float32, np.float64])\n if (len(data.shape) < 2 or data.shape[0] > data.shape[1] or\n not data.dtype in [np.float32, np.float64]):\n raise ValueError(\"Input audio had unexpected type or shape or dtype: {},{}\"\n .format(data.shape, data.dtype))\n max_val = data.max() * 32768.0\n min_val = data.min() * 32768.0\n\n truncation_scale = 1.0\n if max_val > 32767.0:\n # The + 0.1 below is a small offset to prevent roundoff causing\n # wrap-around errors.\n truncation_scale = 32767.0 / (max_val + 0.1)\n if min_val < -32768.0:\n s = 32768.0 / (-min_val + 0.1);\n if s > truncation_scale:\n truncation_scale = s\n scale = 32768.0 * truncation_scale\n data = np.rint(data * scale).astype(np.int16)\n data = data.swapaxes(0, 1)\n file = file_utils.open_or_fd(file, \"w\", encoding=None)\n wavio.write(file, data, samp_rate, scale='none')\n file.close()", "def tag_file(filename, artist, title, year=None, genre=None, artwork_url=None, album=None, track_number=None, url=None):\n\n try:\n audio = EasyMP3(filename)\n audio.tags = None\n audio[\"artist\"] = artist\n audio[\"title\"] = title\n if year:\n audio[\"date\"] = str(year)\n if album:\n audio[\"album\"] = album\n if track_number:\n audio[\"tracknumber\"] = track_number\n if genre:\n audio[\"genre\"] = genre\n if url: # saves the tag as WOAR\n audio[\"website\"] = url\n audio.save()\n\n if artwork_url:\n\n artwork_url = artwork_url.replace('https', 'http')\n\n mime = 'image/jpeg'\n if '.jpg' in artwork_url:\n mime = 'image/jpeg'\n if '.png' in artwork_url:\n mime = 'image/png'\n\n if '-large' in artwork_url:\n new_artwork_url = artwork_url.replace('-large', '-t500x500')\n try:\n image_data = requests.get(new_artwork_url).content\n except Exception as e:\n # No very large image available.\n image_data = requests.get(artwork_url).content\n else:\n image_data = requests.get(artwork_url).content\n\n audio = MP3(filename, ID3=OldID3)\n audio.tags.add(\n APIC(\n encoding=3, # 3 is for utf-8\n mime=mime,\n type=3, # 3 is for the cover image\n desc='Cover',\n data=image_data\n )\n )\n audio.save()\n\n # because there is software that doesn't seem to use WOAR we save url tag again as WXXX\n if url:\n audio = MP3(filename, ID3=OldID3)\n audio.tags.add(WXXX(encoding=3, url=url))\n audio.save()\n\n return True\n\n except Exception as e:\n puts(colored.red(\"Problem tagging file: \") + colored.white(\"Is this file a WAV?\"))\n return False", "def wavPlayer(data, rate, scale=False, autoplay=False):\r\n #if np.max(abs(data)) > 1 or scale:\r\n # data = data/np.max(abs(data))\r\n #data = (2**13*data).astype(np.int16)\r\n \r\n buffer = BytesIO()\r\n buffer.write(b'RIFF')\r\n buffer.write(b'\\x00\\x00\\x00\\x00')\r\n buffer.write(b'WAVE')\r\n \r\n buffer.write(b'fmt ')\r\n if data.ndim == 1:\r\n noc = 1\r\n else:\r\n noc = data.shape[1]\r\n \r\n bits = data.dtype.itemsize * 8\r\n sbytes = rate*(bits // 8)*noc\r\n ba = noc * (bits // 8)\r\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\r\n\r\n # data chunk\r\n buffer.write(b'data')\r\n buffer.write(struct.pack('<i', data.nbytes))\r\n\r\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\r\n data = data.byteswap()\r\n\r\n buffer.write(data.astype(np.int16).tostring())\r\n\r\n # Determine file size and place it in correct position at start of the file.\r\n size = buffer.tell()\r\n buffer.seek(4)\r\n buffer.write(struct.pack('<i', size-8))\r\n \r\n val = buffer.getvalue()\r\n autoplay = \" autoplay=\\\"autoplay\\\"\"*autoplay + \"\"\r\n \r\n src = \"\"\"<audio controls=\"controls\" style=\"width:600px\"{autoplay}>\r\n <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\r\n Your browser does not support the audio element.\r\n </audio>\"\"\".format(base64=base64.b64encode(val).decode(\"ascii\"), autoplay=autoplay)\r\n display(HTML(src))", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def save_combined(self, fname, ffmpeg_output=False, master_volume=1.):\n # setup list to house wav stream data \n inputs = [None]*len(self.out_channels)\n\n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n \n print(\"Creating temporary .wav files...\")\n \n for c in range(len(self.out_channels)):\n tempfname = f\"./.TEMP_{c}.wav\"\n wav.write(tempfname, \n self.out_channels[str(c)].values,\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n inputs[self.channels.forder[c]] = ff.input(tempfname)\n \n print(\"Joning temporary .wav files...\")\n (\n ff.filter(inputs, 'join', inputs=len(inputs), channel_layout=self.channels.setup)\n .output(fname)\n .overwrite_output()\n .run(quiet=~ffmpeg_output)\n )\n \n print(\"Cleaning up...\")\n for c in range(len(self.out_channels)):\n os.remove(f\"./.TEMP_{c}.wav\")\n \n print(\"Saved.\")", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def fft2(data):\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.fft(data, 2, normalized=True)\n data = fftshift(data, dim=(-3, -2))\n return data", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def save_wav(file_name, signal, fs):\n wavfile.write(file_name, fs, np.int16(signal/np.max(np.abs(signal)) * (2**(16)/2-1)))", "def play(self):\n assert pyaudio is not None, (\"You need to have pyaudio installed to \"\n \"use the play_wav function\")\n filename = os.path.join(tempfile.gettempdir(),\n '6003_wave_%s.wav' % abs(hash(tuple(self.samples))))\n self.save(filename)\n f = wave.open(filename, 'r')\n try:\n p = pyaudio.PyAudio()\n stream = p.open(format=p.get_format_from_width(f.getsampwidth()),\n channels=f.getnchannels(),\n rate=f.getframerate(),\n output=True)\n\n data = f.readframes(10240)\n while data:\n stream.write(data)\n data = f.readframes(10240)\n\n stream.stop_stream()\n stream.close()\n p.terminate()\n finally:\n f.close()\n os.unlink(filename)", "def download_audio(url,output_dir,ffmpeg_dir):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'ffmpeg_location': ffmpeg_dir,\n 'outtmpl': output_dir + '/%(title)s.%(ext)s',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])", "def fft2(data):\n assert data.size(-1) == 2\n data = ifftshift(data, dim=(-3, -2))\n data = torch.fft(data, 2, normalized=False)\n data = fftshift(data, dim=(-3, -2))\n return data", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def run(self):\r\n\r\n p = pyaudio.PyAudio()\r\n\r\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\r\n channels=wf.getnchannels(),\r\n rate=wf.getframerate(),\r\n output=True)\r\n\r\n musicdata = wf.readframes(CHUNK)\r\n\r\n while playing:\r\n if self.streamnum == 1:\r\n stream.write(musicdata)\r\n musicdata = wf.readframes(CHUNK)\r\n else:\r\n stream.write(musicdata)\r\n musicdata = wf2.readframes(CHUNK)\r\n if len(musicdata) < CHUNK or musicdata == '':\r\n if self.streamnum == 1:\r\n self.streamnum = 2\r\n else:\r\n self.streamnum = 1\r\n self.next = False\r\n if self.pause:\r\n while True:\r\n if not playing:\r\n return\r\n elif not self.pause:\r\n break\r\n\r\n stream.stop_stream()\r\n stream.close()\r\n\r\n p.terminate()", "def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def spectrogram_from_file(filename, step=10, window=20, max_freq=None,\n eps=1e-14, time_up=12, time_down=2):\n sample_rate, audio = wavfile.read(filename) \n audio = audio / np.sqrt(np.sum(np.square(audio)))\n if audio.ndim >= 2:\n audio = np.mean(audio, 1)\n if max_freq is None:\n max_freq = sample_rate / 2\n if max_freq > sample_rate / 2:\n raise ValueError(\"max_freq must not be greater than half of \"\n \" sample rate\")\n if step > window:\n raise ValueError(\"step size must not be greater than window size\")\n hop_length = int(0.001 * step * sample_rate)\n fft_length = int(0.001 * window * sample_rate)\n pxx, freqs = spectrogram(\n audio, fft_length=fft_length, sample_rate=sample_rate,\n hop_length=hop_length)\n ind = np.where(freqs <= max_freq)[0][-1] + 1\n\n # audio record time limit\n is_saved = False\n sample_time = int(len(audio) / sample_rate * 1000)\n if sample_time <= time_up * 1000 and sample_time >= time_down * 1000:\n is_saved = True\n\n return np.transpose(np.log(pxx[:ind, :] + eps)), is_saved", "def _record_wav(stream, N, CHUNK):\n frames = []\n for i in range(N):\n data = stream.read(CHUNK)\n frames.append(data)\n return np.fromstring(b\"\".join(frames), 'Int16')", "def waveFloatToPCMFile(waveData, wavFile, bit=16, sr=16000):\n \n # recover to 16bit range [-32768, +32767]\n rawData = waveData * np.power(2.0, bit-1)\n rawData[rawData >= np.power(2.0, bit-1)] = np.power(2.0, bit-1)-1\n rawData[rawData < -1*np.power(2.0, bit-1)] = -1*np.power(2.0, bit-1)\n \n # write as signed 16bit PCM\n if bit == 16:\n rawData = np.asarray(rawData, dtype=np.int16)\n elif bit == 32:\n rawData = np.asarray(rawData, dtype=np.int32)\n else:\n print(\"Only be able to save wav in int16 and int32 type\")\n print(\"Save to int16\")\n rawData = np.asarray(rawData, dtype=np.int16)\n scipy.io.wavfile.write(wavFile, sr, rawData)\n return", "def make_audio(audio_path):\n content, sample_rate = librosa.load(audio_path, sr=16000)\n del sample_rate\n if content.dtype in (np.float32, np.float64):\n content = (content * np.iinfo(np.int16).max).astype(np.int16)\n return speech.RecognitionAudio(content=content.tobytes())", "def convert_spectrogram_to_audio(self, spec: 'torch.tensor', **kwargs) -> 'torch.tensor':", "def fourier_from_scales(scales, wf, w0):\n\n scales_arr = np.asarray(scales)\n\n if wf == 'morlet':\n return (4 * np.pi * scales_arr) / (w0 + np.sqrt(2 + w0**2))\n else:\n raise ValueError('wavelet function not available')", "def wavwrite(fname, Fs, xt):\n # convert to np.int16 data type\n xt = np.array((2**15-1)*xt, np.int16)\n sio_wav.write(fname, Fs, xt)", "def transform_folder(source_folder,\n output_folder,\n temp_folder,\n rate_limit=6000.0,\n overwrite=True,\n plot=False,\n image_folder=None,\n multiprocess=False,\n encoder='mpg123',\n step=5.0):\n merged_file = os.path.join(output_folder, 'merged_file.json')\n\n os.makedirs(temp_folder, exist_ok=True)\n os.makedirs(output_folder, exist_ok=True)\n if os.path.isfile(merged_file):\n os.remove(merged_file)\n if plot:\n os.makedirs(image_folder, exist_ok=True)\n\n # Check if mp3 is already transformed into wav. Right\n # now, foucluster doesn't have a direct read from mp3\n logger.info('Checking if songs are in WAV format...')\n if source_folder != temp_folder:\n [check_wav(song=song,\n source_folder=source_folder,\n temp_folder=temp_folder,\n encoder=encoder)\n for song in os.listdir(source_folder)]\n\n if multiprocess is True:\n logger.debug('Fourier is applied in multiprocess')\n songs = [(song, temp_folder, output_folder, rate_limit,\n overwrite, plot, image_folder, step)\n for song in os.listdir(source_folder)]\n\n # with mp.Pool(processes=max(int(mp.cpu_count() / 2.0), 1)) as p:\n with mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1) as p:\n p.starmap(time_to_frequency, songs)\n else:\n logger.debug('Fourier is applied in single core')\n [time_to_frequency(song=song,\n temp_folder=temp_folder,\n output_folder=output_folder,\n rate_limit=rate_limit,\n overwrite=overwrite,\n plot=plot,\n image_folder=image_folder,\n step=step)\n for song in os.listdir(source_folder)]\n\n # read_files = glob.glob(os.path.join(output_folder, '*.json'))\n # with open(merged_file, 'w') as outfile:\n # file_contents = [open(f).read() for f in read_files]\n # outfile.write('[{}]'.format(','.join(file_contents)))", "def save_wfm(self, source, dest):\n self.bus.write('SAV:WAVE %s,%s' % (source, dest))", "def output_wave_file(predicted_mfccs, filename):\n global eng\n predicted_mfccs_transposed = np.transpose(predicted_mfccs)\n\n\n # MFCC features need to be a numpy array of shape (num_coefficients x num_frames) in order to be passed to the invmelfcc function\n inverted_wav_data = eng.invmelfcc(matlab.double(predicted_mfccs_transposed.tolist()), 16000.0, 25, 100.0, 0.005, 0.005)\n\n inverted_wav_data = np.squeeze(np.array(inverted_wav_data))\n\n # scales the waveform to be between -1 and 1\n maxVec = np.max(inverted_wav_data)\n minVec = np.min(inverted_wav_data)\n inverted_wav_data = ((inverted_wav_data - minVec) / (maxVec - minVec) - 0.5) * 2\n\n wav.write(filename + '.wav', 16000.0, inverted_wav_data)", "def get_audio_data(filename):\n\n audio_file = eyed3.load(filename)\n artist = audio_file.tag.artist\n title = audio_file.tag.title\n time = audio_file.info.time_secs\n album = audio_file.tag.album\n genre = re.sub('^\\(.*\\)', '', str(audio_file.tag._getGenre().name).lower().replace('|', ',').replace('/', ','))\n\n try:\n year = audio_file.tag.getBestDate().year\n except:\n year = None\n\n comments = []\n for i in audio_file.tag.comments:\n comment = correct_playlist_names(i.text.lower().strip())\n comments += comment.replace('|', ',').replace('/', ',').strip('|').split(',')\n\n return {\n 'artist' : artist,\n 'title' : title,\n 'album' : album,\n 'time' : time,\n 'comments' : filter(None, comments),\n 'genre' : genre.split(','),\n 'year' : year\n }", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')", "def fourier(data, temp_freq, axis, output = 'amplitude'):\n\t\t\n\t\n\t# take largest possible multiple of F1 from PSTH.\n\t# Generate freq and fft\n\t# generate amplitude\n\t# return amplitude, F0, F1 and F2 values" ]
[ "0.6867949", "0.6767562", "0.6552796", "0.64654547", "0.62336755", "0.6104877", "0.6079277", "0.6008433", "0.59922224", "0.5926576", "0.589333", "0.58891535", "0.58476907", "0.5833807", "0.5785292", "0.56830513", "0.5678468", "0.5659172", "0.56562704", "0.5653985", "0.5649063", "0.5636142", "0.561131", "0.55961293", "0.55803704", "0.5544978", "0.5521648", "0.55199665", "0.54779404", "0.5466245", "0.5450212", "0.5445111", "0.5436192", "0.5422062", "0.5421598", "0.5416742", "0.54064816", "0.53705317", "0.53705317", "0.53649235", "0.5341756", "0.53364706", "0.5336135", "0.53195363", "0.5318765", "0.5314477", "0.53062224", "0.530274", "0.5278632", "0.5266761", "0.52553374", "0.5230944", "0.5229415", "0.5226988", "0.522286", "0.5213732", "0.5210776", "0.5202379", "0.5199698", "0.51877165", "0.51860446", "0.51856726", "0.5169718", "0.5169716", "0.5167357", "0.5155061", "0.5152157", "0.51506853", "0.5146321", "0.51352125", "0.5134664", "0.5129086", "0.5126437", "0.51263905", "0.5121131", "0.5112335", "0.5100698", "0.50998425", "0.5097317", "0.5096114", "0.5095883", "0.5095635", "0.5088378", "0.50854796", "0.5085238", "0.5071755", "0.5071669", "0.5069535", "0.5069533", "0.50651336", "0.50599605", "0.5056254", "0.5055356", "0.504092", "0.50384796", "0.5032477", "0.5025081", "0.50247544", "0.5013465", "0.50072336" ]
0.5470141
29
Transform a directory full of MP3 files into WAV files, and then into Fourier series, working with directories.
def transform_folder(source_folder, output_folder, temp_folder, rate_limit=6000.0, overwrite=True, plot=False, image_folder=None, multiprocess=False, encoder='mpg123', step=5.0): merged_file = os.path.join(output_folder, 'merged_file.json') os.makedirs(temp_folder, exist_ok=True) os.makedirs(output_folder, exist_ok=True) if os.path.isfile(merged_file): os.remove(merged_file) if plot: os.makedirs(image_folder, exist_ok=True) # Check if mp3 is already transformed into wav. Right # now, foucluster doesn't have a direct read from mp3 logger.info('Checking if songs are in WAV format...') if source_folder != temp_folder: [check_wav(song=song, source_folder=source_folder, temp_folder=temp_folder, encoder=encoder) for song in os.listdir(source_folder)] if multiprocess is True: logger.debug('Fourier is applied in multiprocess') songs = [(song, temp_folder, output_folder, rate_limit, overwrite, plot, image_folder, step) for song in os.listdir(source_folder)] # with mp.Pool(processes=max(int(mp.cpu_count() / 2.0), 1)) as p: with mp.Pool(processes=mp.cpu_count(), maxtasksperchild=1) as p: p.starmap(time_to_frequency, songs) else: logger.debug('Fourier is applied in single core') [time_to_frequency(song=song, temp_folder=temp_folder, output_folder=output_folder, rate_limit=rate_limit, overwrite=overwrite, plot=plot, image_folder=image_folder, step=step) for song in os.listdir(source_folder)] # read_files = glob.glob(os.path.join(output_folder, '*.json')) # with open(merged_file, 'w') as outfile: # file_contents = [open(f).read() for f in read_files] # outfile.write('[{}]'.format(','.join(file_contents)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def mp3_to_wav(show_progress=True):\n\n # Define a devnull var to supress subprocess output\n devnull = open(os.devnull, 'w')\n\n # Get a list of the filepath for each of the mp3 files in each subdirectory of data/fma_small\n file_list = glob.glob('./../data/fma_small/*/*.mp3')\n\n # Get the number of files N and initialize a counter\n N = len(file_list)\n counter = 0\n\n # For each file/filepath, convert that file to wav format and save it to data/wavs/*/*.wav (so as a wave file)\n for filepath in file_list:\n\n # Every 100 file conversions, print a progress update\n if counter % 50 == 49 and show_progress:\n progress = str(round(100 * counter / N, 2))\n print('File conversion ' + progress + '% complete.')\n\n # Get the file name from the path and define a new path for the wav file\n file_name = filepath[24:-4]\n new_path = './../data/wavs/' + file_name + '.wav'\n\n # Call the subprocess using ffmpeg to convert the file to wav format (and supress all the output)\n subprocess.call(['ffmpeg', '-i', filepath, new_path], stdout=devnull)\n\n # Increment the counter\n counter += 1", "def main(directory, wavelength=16000, replace=True):\n\n if os.path.isdir(directory):\n # get the directory of mp3 files\n mpthree_files = find_directory__files(directory, 'mp3')\n\n # check whether there are mp3 files\n if len(mpthree_files) > 0:\n # converts all the mp3 files to wav files\n map(lambda x: convert_mp3_to_wav(x, replace=replace), mpthree_files.values())\n\n # now get the wav files after conversion(if any)\n wav_files = find_directory__files(directory, 'wav')\n\n # convert\n map(lambda x: convert_wavelength_file(x, wavelength=wavelength, replace=replace), wav_files.values())\n elif os.path.isfile(directory):\n\n # check if it's a wav\n filetype = find_filetype(directory)\n if filetype != 'wav':\n if filetype == 'mp3':\n convert_mp3_to_wav(directory, replace=replace)\n # get the new file name\n directory = directory.replace('mp3', 'wav')\n else:\n raise ValueError(\"Not a supported filetype at this moment\")\n\n # when filetype == wav or after converting from mp3 to wav\n convert_wavelength_file(directory, wavelength, replace=replace)\n else:\n raise ValueError(\"input is wrong\")", "def list_files_to_convert():\n for root, dirs, files in os.walk(video_dir):\n file_list = [name for name in files if not name.endswith('.mp3')]\n for name in file_list:\n filepath = os.path.join(root, name)\n media_info = MediaInfo.parse(filepath, library_file=dll_path)\n for track in media_info.tracks:\n if 'Audio' in track.track_type:\n # print(track.track_type, track.bit_rate)\n # print(filepath, \"Is an Audio/Video file, and should be converted because a sound track is found\")\n yield dict(path=filepath, info=media_info)", "def collect_files(path, audio_files):\n\n for entry in os.scandir(path):\n if entry.is_dir():\n collect_files(entry.path, audio_files)\n if entry.is_file() and (entry.path.endswith(\".flac\") or entry.path.endswith(\".wav\")):\n audio_files.append(entry.path)", "def mp3_to_wav(song_dir, snip_dir, bird_list_path='bird_list.txt'):\n if os.path.exists(snip_dir):\n shutil.rmtree(snip_dir)\n os.makedirs(snip_dir)\n with open(bird_list_path) as f:\n lines = f.readlines()\n bird_list = [line.rstrip('\\n') for line in lines]\n # Build the bird-labeled subdirectories in 'snip_dir'.\n _make_bird_dirs(snip_dir, birds_list)\n # Populate the subdirectory with recordings converted from .mp3 to .wav.\n for f in os.listdir(song_dir):\n bird = extract_bird_name(f)\n if bird in birds_list:\n index = birds_list.index(bird)\n wav_filename = os.path.splitext(f)[0].replace(' ', '_') + '.wav'\n orig = os.path.join(mp3_dir, f)\n new = os.path.join(snip_dir, str(index), wav_filename)\n # MP3-to-WAV conversion requires the ffmpeg package.\n call([\"ffmpeg\", \"-i\", orig, new])", "def getAudioFiles(directory):\n\n # Fetch list of files in selected directory\n fileList = os.listdir(directory)\n fileList.sort()\n\n # Create Audio objects\n audioList = []\n for f in fileList:\n if f.endswith('.wav'):\n audioList.append(Audio(directory, f))\n\n return audioList", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def create_playlist_in_directory(root_directory, directory):\n # Use glob to find .m4a and .mp3 files in the current directory\n audio_files = glob.glob(os.path.join(directory, '*.m4a')) + \\\n glob.glob(os.path.join(directory, '*.mp3'))\n\n audio_files = randomize_array(audio_files)\n\n # If there are any audio files in the current directory\n if audio_files:\n # Create a .m3u8 file in the current directory\n # playlist_name = f\"{os.path.basename(directory)} ({remove_denied_symbols(directory.replace(root_directory, ''))}).m3u8\"\n\n prefix = directory.replace(root_directory, '').replace(\n os.path.basename(directory), '').lstrip(\"/\").lstrip(\"\\\\\")\n\n prefix = prefix.replace(\n \"/\", \", \").replace(\"\\\\\", \", \").replace(\":\", \"-\")\n\n playlist_name = f\"{remove_spaces(prefix).lower()} - {os.path.basename(directory).upper()}.m3u8\"\n\n print(prefix)\n if (prefix == \"\"):\n playlist_name = f\"{os.path.basename(directory).upper()}.m3u8\"\n\n playlist_path = os.path.join(\n root_directory, playlist_name)\n\n print(f'Creating playlist {playlist_path}')\n\n # Write the paths of the audio files to the .m3u8 file\n with open(playlist_path, 'w', encoding='utf-8') as f:\n f.write('#EXTM3U\\n')\n for audio_file in audio_files:\n # Write the path relative to the playlist file\n f.write(audio_file + '\\n')\n\n print(f'Successfully created playlist {playlist_path}')\n else:\n print(f'No audio files found in {directory}')", "def play_audio():\n directory = os.fsencode(MINI_PATH)\n print(directory)\n adp= []\n # lst = os.listdir(directory)\n # lst.sort()\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n #print(file)\n\n if filename.endswith(\".mp3\"): \n adp.append(MINI_PATH+filename)\n #print(adp)\n adp.sort()\n print(\"ADP: \", adp)\n x = \"|\".join(adp)\n print( f'concat:{x}')\n subprocess.call(['ffmpeg', '-i', f'concat:{x}', '-acodec', 'copy', RESULT_PATH])\n \n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n print(filename)\n if filename.endswith(\".mp3\"):\n os.remove(MINI_PATH+filename)", "def convert(\n album,\n):\n for track in list_dir(album):\n ext = splitext(track)[1]\n if ext != \".mp3\":\n new_track = track.replace(ext, \".mp3\")\n if not exists(new_track):\n track_non_mp3 = AudioSegment.from_file(track, format=ext[1:])\n print(f\"{track} -> {new_track}\")\n track_non_mp3.export(new_track, format=\"mp3\")\n os.remove(track)", "def convert_and_move_dir (dirname, origpath, wavpath, mp4path, mono):\n print(dirname)\n origdirpath = path.join(origpath, dirname)\n wavdirpath = path.join(wavpath, dirname)\n for filename in listdir(origdirpath):\n name, ext = path.splitext(filename)\n if ext == \".mp4\":\n print(filename)\n convert_to_wav(filename, name, origdirpath, wavdirpath, mono)\n\n if not path.exists(mp4path):\n makedirs(mp4path)\n shutil.move(origdirpath, mp4path)", "def main_convert():\n\n verbose = True\n\n # Build parser.\n parser = argparse.ArgumentParser()\n\n parser.add_argument('fname_pattern', action='store', help='File name pattern')\n parser.add_argument('-R', '--recursive', action='store_true', default=True,\n help='Search several subdirectories')\n\n # Run parser, extract arguments.\n args = parser.parse_args()\n\n # List of files.\n pattern = os.path.normpath(unicode(args.fname_pattern))\n\n if os.path.isdir(pattern):\n pattern = os.path.join(pattern, '*')\n fname_list = glob.glob(pattern)\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n pattern = os.path.join(pattern, '*')\n fname_list.extend(glob.glob(pattern))\n\n else:\n fname_list = glob.glob(pattern)\n\n to_be_removed = []\n for f in fname_list:\n if os.path.isdir(f):\n to_be_removed.append(f)\n\n for f in to_be_removed:\n fname_list.remove(f)\n\n # Do the work.\n num_files = len(fname_list)\n for k, f_src in enumerate(fname_list):\n f_src = os.path.abspath(f_src)\n\n b_src, e = os.path.splitext(f_src)\n\n folder = os.path.basename(os.path.dirname(f_src))\n if (e == '.mp3' or e == '.wma' or e == '.wav' or e == '.aiff') and b_src != 'tmp' and folder != '.audio_convert':\n\n if verbose:\n try:\n print('%3d/%d: [%s -> .m4a] %s' % (k, num_files, e, os.path.basename(b_src)))\n except Exception as e:\n val = repr(f_src)\n raise Exception('Problem processing file: %s' % val)\n\n # Temporary working copy.\n path_work = os.path.dirname(f_src)\n f_tmp_src = os.path.join(path_work, 'tmp' + e)\n shutil.copy(f_src, f_tmp_src)\n\n # Transcode file format.\n f_tmp_dst = convert(f_tmp_src, verbose=verbose)\n\n # Finish.\n b_tmp_dst, e_dst = os.path.splitext(f_tmp_dst)\n\n f_dst = b_src + e_dst\n if os.path.isfile(f_dst):\n os.remove(f_dst)\n os.rename(f_tmp_dst, f_dst)\n\n if os.path.isfile(f_tmp_src):\n os.remove(f_tmp_src)\n\n if os.path.isfile(f_dst):\n move_processed_file(f_src)\n\n # Done.", "def convert_multiple(self, video_files, out, brate, _format):\n\n for video in video_files:\n self.to_audio(os.path.abspath(video),\n out, brate, _format)", "def convert_files_sequential(self) -> None:\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n self.convert_file(os.path.join(\n self.audios_dir, file), self.output_format)", "def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates", "def load_music_files():\n # Make a list of music files, right now it is done by collection all files\n # below the current folder whose extension starts with mp3/wav \n print('Loading music files...')\n for path, dirs, files in os.walk('.'):\n for file_ in files:\n file_path = os.path.relpath(os.path.join(path, file_))\n url_path = os.path.join(*[quote(part) for part in os.path.split(file_path)]) \n ext = os.path.splitext(file_)[1].lower()\n name = os.path.splitext(file_)[0].lower()\n key = ''.join(name.split()) # unique key - no spaces\n audio_file = None\n if ext.startswith('.mp3'):\n audio = MP3(file_path) \n audio_file = AudioFile(url_path, audio.info.length, name, key) \n if audio_file:\n music_files.append(audio_file)\n print('Found:', music_files[-1])", "def load_audio_files(path, single_bar=True):\n\n audios = []\n\n for file_root, dirs, files in os.walk(path):\n for name in files:\n # be careful not to get stuck in wrong files like .DS_Store\n if not re.match(r'.*wav', name):\n continue\n name = os.path.join(file_root, name)\n data, sr = sf.read(name)\n assert sr == 44100\n\n if len(data.shape) == 2 and data.shape[1] == 2:\n data = 0.5 * (data[:, 0] + data[:, 1])\n\n # We only use the 2nd bar out of 4\n if single_bar:\n if data.shape[0] >= 4*44100:\n data = data[2*44100:4*44100]\n else:\n data = data[:2*44100]\n\n data = data.astype(np.float32)\n data = torch.from_numpy(data).unsqueeze(dim=0)\n audios.append(data)\n\n return audios", "def apply_fourier_transform(chunked_audio):\n pass", "def mp3_to_wav(mp3_file, wav_file, encoder='mpg123'):\n if encoder == 'mpg123':\n bash_command = ['mpg123', '-w', wav_file, '--mono', mp3_file]\n else:\n bash_command = ['ffmpeg', '-i', mp3_file, wav_file]\n subprocess.run(bash_command)", "def normalize_volumes_mixmode(directory, amplitude=0.08, ext='.wav'):\n subdirectories = [x[0] for x in os.walk(directory)]\n for subdirectory in subdirectories:\n os.system(f\"normalize-audio -w 16 -a {amplitude} -b '{subdirectory}/'*{ext}\")", "def find_wavs(directory, pattern='**/*.wav'):\n return glob(os.path.join(directory, pattern), recursive=True)", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def convert_to_wav(mp3_filename):\n\n wav_filename = mp3_filename[:-4] + \".wav\"\n complete_mp3FileName = os.path.join(MP3_FOLDER, mp3_filename)\n complete_wavFileName = os.path.join(WAV_FOLDER, wav_filename)\n\n mp3_file = AudioSegment.from_mp3(complete_mp3FileName)\n mp3_file.export(complete_wavFileName, format=\"wav\")\n\n print(f\"The mp3 file {complete_mp3FileName} was successfully converted to \" \\\n + f\"the wav file {complete_wavFileName}.\")", "def store_samples(self, directory, preprocess_fnc):\n print('Called with', directory)\n out_directory = self._get_directory(preprocess_fnc, directory)\n print('Outdir', out_directory)\n if not os.path.exists(out_directory):\n os.makedirs(out_directory)\n\n print('scanning', os.path.join(self._data_directory, directory))\n\n audio_files = list(iglob_recursive(os.path.join(self._data_directory, directory), '*.flac'))\n print('audio files:', len(audio_files), 'from', os.path.join(self._data_directory, directory))\n with Pool(processes=multiprocessing.cpu_count()) as pool:\n transcript_dict = self._transcript_dict\n\n for audio_file in audio_files:\n audio_id = self._extract_audio_id(audio_file)\n transcript_entry = transcript_dict[audio_id]\n transform_args = (audio_file, preprocess_fnc, transcript_entry, out_directory)\n pool.apply_async(SpeechCorpusReader._transform_and_store_sample, transform_args)\n\n pool.close()\n pool.join()", "def create_many_from_mp3_dir(cls, path_to_mp3_dir):\n songs = []\n path_to_mp3_dir = os.path.abspath(path_to_mp3_dir)\n dirty_mp3_names = os.listdir(path_to_mp3_dir)\n clean_mp3_paths = [\n os.path.join(path_to_mp3_dir, mp3_path) for\n mp3_path in dirty_mp3_names if\n mp3_path.lower().endswith(\".mp3\")\n ]\n\n if not clean_mp3_paths:\n raise EnvironmentError(\"No mp3's found in: %s\" % path_to_mp3_dir)\n\n for mp3_path in clean_mp3_paths:\n songs.append(cls.create_from_mp3_path(mp3_path))\n\n return songs", "def convert_data(data_dir, dname):\n # Get videos from the original dataset\n seq_generator = get_seq(data_dir, dname)\n # Process videos\n for n, (f, k, seq) in enumerate(seq_generator):\n # Create a directory for the video\n f = os.path.splitext(os.path.basename(f))[0]\n dirname = os.path.join(data_dir, 'processed_data', dname, f, f'{k:03d}')\n os.makedirs(dirname)\n # Save all frames in .png files\n for i, img in enumerate(seq):\n img.save(os.path.join(dirname, f'{i:03d}.png'), 'PNG')\n print(f'{dirname} ({n + 1})')", "def merge_waves(self):\n dirname = self.dirname\n name = self.get_name()\n videocluster = os.path.join(dirname, name)\n if sys.platform == 'win32':\n videocluster = dirname + '/' + name\n listwaves = os.listdir(videocluster)\n listwaves.sort()\n listw = [os.path.join(videocluster, fil) for fil in listwaves]\n #file_basename = os.path.join(dirname, name)\n if sys.platform == 'win32':\n listw = [videocluster + '/' + fil for fil in listwaves] \n # file_basename = dirname + '/' + name\n self.wave = os.path.join(dirname, name + \".wav\")\n if sys.platform == 'win32':\n self.wave = dirname + '/' + name + \".wav\"\n fm.merge_waves(listw, self.wave)", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def get_mp3_files(path):\n for dirname, dirnames, filenames in sorted(os.walk(path)):\n for filename in filenames:\n filepath = os.path.join(dirname, filename)\n if is_mp3_file(filepath):\n yield filepath", "def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name, ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def MusicScan():\r\n for root, dirnames, filenames in os.walk(dlPath):\r\n for extend in mustypes:\r\n for filename in fnmatch.filter(filenames, extend):\r\n matches.append(os.path.join(root, filename))\r\n print(os.path.join(root, filename))\r\n shutil.move(os.path.join(root, filename), os.path.join(musicPath, filename))\r\n print color.GREEN + 'File succesfully moved!' + color.ENDC\r\n print 'Finished Scanning For Music'", "def process_wav_files(wav_dir, id_list, out_dir, calc_mvn):\n file_ids = utils.get_file_ids(wav_dir, id_list)\n\n os.makedirs(os.path.join(out_dir, 'f0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'lf0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'vuv'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'sp'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'ap'), exist_ok=True)\n\n @utils.multithread\n def save_wav_to_files(file_id):\n wav_path = os.path.join(wav_dir, '{}.wav'.format(file_id))\n wav = wav_features.Wav(wav_path)\n\n f0, vuv, sp, ap = wav.extract_features()\n\n file_io.save_bin(f0, os.path.join(out_dir, 'f0', '{}.f0'.format(file_id)))\n file_io.save_bin(np.log(f0), os.path.join(out_dir, 'lf0', '{}.lf0'.format(file_id)))\n file_io.save_bin(vuv, os.path.join(out_dir, 'vuv', '{}.vuv'.format(file_id)))\n file_io.save_bin(sp, os.path.join(out_dir, 'sp', '{}.sp'.format(file_id)))\n file_io.save_bin(ap, os.path.join(out_dir, 'ap', '{}.ap'.format(file_id)))\n\n save_wav_to_files(file_ids)\n\n if calc_mvn:\n calclate_mvn_parameters(out_dir, 'f0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'lf0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'vuv', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'sp', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'ap', id_list=id_list, dtype=np.float32)", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name,ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)", "def _download_audio_files(self, records, target_path):\n\n for record in logger.progress(records):\n audio_folder = os.path.join(target_path, 'audio', record[2])\n audio_file = os.path.join(audio_folder, '{}.mp3'.format(record[0]))\n os.makedirs(audio_folder, exist_ok=True)\n\n download_url = 'https://audio.tatoeba.org/sentences/{}/{}.mp3'.format(record[2], record[0])\n download.download_file(download_url, audio_file)", "def getMusic(self, path):\n music =[]\n files = [ f for f in listdir(path) if isfile(join(path,f))]\n for f in files:\n m = join(path,f)\n #if not f.endswith('.mp3'):\n # files.remove(f)\n if f.endswith('.mp3'):\n music.append(m)\n return music", "def get_audio(name, n):\n audio_path = os.path.join(args.input_folder, name, \"audio.ogg\")\n if not os.path.exists(audio_path):\n ## Some folders have multiple .ogg files, so we need to first combine them into one file. Example:\n ## |── Universe\n ##  │   ├── aligned.swc\n ##  │   ├── audio1.ogg\n ##  │   ├── audio2.ogg\n ##  │   ├── audio3.ogg\n ##  │   ├── audio4.ogg\n ##  │   ├── audiometa.txt\n ##  │   ├── info.json\n ##  │   ├── wiki.html\n ##  │   ├── wiki.txt\n ##  │   └── wiki.xml\n\n multiple_ogg_files = []\n for i in range(1, 5):\n path = os.path.join(args.input_folder, name, \"audio\" + str(i) + \".ogg\")\n if os.path.exists(path):\n multiple_ogg_files.append(path)\n else:\n break\n if len(multiple_ogg_files) == 0:\n return\n elif len(multiple_ogg_files) == 1:\n os.system(\"cp \\\"\" + multiple_ogg_files[0] + \"\\\" \\\"\" + audio_path + \"\\\"\")\n else:\n tmp_file_name = \"ffmeg_inputs.txt\"\n print(\"tmp_file_name=\", tmp_file_name)\n with open(tmp_file_name, \"w\", encoding=\"utf-8\") as tmp_file:\n for path in multiple_ogg_files:\n tmp_file.write(\"file '\" + path + \"'\\n\")\n cmd = \"ffmpeg -f concat -i \\\"\" + tmp_file_name + \"\\\" -c copy \\\"\" + audio_path + \"\\\"\"\n print(cmd)\n os.system(cmd)\n\n output_audio_path = args.destination_folder + \"/audio/\" + str(n) + \".ogg\"\n os.system(\"cp \\\"\" + audio_path + \"\\\" \" + output_audio_path)", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def process_audio(fname, output_dir, poller):\n result = []\n try:\n if poller.params.candidate_transcripts is not None:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".json\")\n else:\n out_path = \"{}/{}{}\".format(output_dir, os.path.splitext(os.path.basename(fname))[0], \".txt\")\n audio, audio_length = load_audio(fname, poller.params.model.sampleRate())\n pred = transcribe_audio(poller.params.model, audio, candidate_transcripts=poller.params.candidate_transcripts)\n with open(out_path, \"w\") as fp:\n fp.write(pred)\n result.append(out_path)\n except KeyboardInterrupt:\n poller.keyboard_interrupt()\n except:\n poller.error(\"Failed to process audio file: %s\\n%s\" % (fname, traceback.format_exc()))\n return result", "def save_combined(self, fname, ffmpeg_output=False, master_volume=1.):\n # setup list to house wav stream data \n inputs = [None]*len(self.out_channels)\n\n # first pass - find max amplitude value to normalise output\n vmax = 0.\n for c in range(len(self.out_channels)):\n vmax = max(\n abs(self.out_channels[str(c)].values.max()),\n abs(self.out_channels[str(c)].values.min()),\n vmax\n ) / master_volume\n \n print(\"Creating temporary .wav files...\")\n \n for c in range(len(self.out_channels)):\n tempfname = f\"./.TEMP_{c}.wav\"\n wav.write(tempfname, \n self.out_channels[str(c)].values,\n self.samprate, \n scale = (-vmax,vmax),\n sampwidth=3)\n inputs[self.channels.forder[c]] = ff.input(tempfname)\n \n print(\"Joning temporary .wav files...\")\n (\n ff.filter(inputs, 'join', inputs=len(inputs), channel_layout=self.channels.setup)\n .output(fname)\n .overwrite_output()\n .run(quiet=~ffmpeg_output)\n )\n \n print(\"Cleaning up...\")\n for c in range(len(self.out_channels)):\n os.remove(f\"./.TEMP_{c}.wav\")\n \n print(\"Saved.\")", "def list_directory(path):\n files = []\n for f in listdir(path):\n if isfile(join(path, f)) and f.endswith('.mp3'):\n files.append(f)\n return files", "def extract_audio_from(file, out_dir=''):\n output_filename = f'{os.path.join(out_dir, os.path.basename(file)[:-4])}.wav'\n os.system(f'ffmpeg -i {file} {output_filename}')\n return output_filename", "def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()", "def load_all_sfx(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n effects = {}\n for fx in os.listdir(directory):\n name, ext = os.path.splitext(fx)\n if ext.lower() in accept:\n effects[name] = pg.mixer.Sound(os.path.join(directory, fx))\n return effects", "def edMusFiles(rootDir):\n\n #Check for zip files directory first, then go through music files\n if os.path.exists(os.path.join(rootDir, 'MUSzipFiles')) == False:\n print(\"MUSzipFiles does not exist, trying to make...\")\n os.makedirs(os.path.join(rootDir, 'MUSzipFiles'))\n\n for root, dirs, files in os.walk(rootDir, topdown=False):\n \n for name in files:\n \n if \"desktop.ini\" in name:\n os.remove(os.path.join(root, name))\n print(\"dektop.ini removed!\") \n \n elif \"Folder.jpg\" in name:\n os.remove(os.path.join(root, name))\n print(\"Folder.jpg removed!\")\n \n elif \"_Small.jpg\" in name:\n os.remove(os.path.join(root, name))\n print(\"_Small.jpg removed!\")\n\n elif \"AlbumArtSmall.jpg\" in name:\n os.remove(os.path.join(root, name))\n print(\"AlbumArtSmall.jpg removed!\")\n \n elif \"README\" in name:\n os.remove(os.path.join(root, name))\n print(\"README removed!\")\n \n #Set so that if the file is already in zip file folder, leave it alone.\n elif \".zip\" in name:\n if (os.path.isfile(os.path.join(rootDir, 'MUSzipFiles', name)) == False): \n os.rename(os.path.join(root, name), os.path.join(rootDir, 'MUSzipFiles', name))\n print(\"Moved {0} to MUSzipFiles!\".format(name)) \n\n elif '.7z' in name:\n if (os.path.isfile(os.path.join(rootDir, 'MUSzipFiles', name)) == False):\n os.rename(os.path.join(root, name), os.path.join(rootDir, 'MUSzipFiles', name))\n print(\"Moved {0} to MUSzipFiles!\".format(name))", "def wav_reader(directory):\n wav_list = find_wavs(directory)\n res_list = []\n\n for wav in wav_list:\n temp_list = [wav]\n\n if re.match(r'.*target1.*\\.wav$', wav):\n temp_list.append(True)\n else:\n temp_list.append(False)\n\n res_list.append(tuple(temp_list))\n\n return res_list", "def filterAudioFilesFromFilelist(filelist):\n audioFileList = []\n for audioFilter in filelist:\n audioRoot, audioExt = os.path.splitext(audioFilter)\n if audioExt in ['.wav', '.aiff', '.aif']:\n audioFileList.append(audioFilter)\n # end for loop\n return audioFileList", "def transcribe(self, paths2audio_files: List[str], batch_size: int = 4) -> List[str]:\n pass", "def transcribe_audio_files(model, input_dir, output_dir, tmp_dir=None,\n poll_wait=1.0, continuous=False, use_watchdog=False, watchdog_check_interval=10.0,\n delete_input=False, beam_width=None, scorer=None, lm_alpha=None, lm_beta=None,\n hot_words=None, candidate_transcripts=None, verbose=False, quiet=False):\n\n if verbose:\n print(\"Loading model: %s\" % model)\n ds = load_model(model, beam_width=beam_width, scorer=scorer, lm_alpha=lm_alpha, lm_beta=lm_beta, hot_words=hot_words)\n\n poller = Poller()\n poller.input_dir = input_dir\n poller.output_dir = output_dir\n poller.tmp_dir = tmp_dir\n poller.extensions = SUPPORTED_EXTS\n poller.delete_input = delete_input\n poller.verbose = verbose\n poller.progress = not quiet\n poller.check_file = None\n poller.process_file = process_audio\n poller.poll_wait = poll_wait\n poller.continuous = continuous\n poller.use_watchdog = use_watchdog\n poller.watchdog_check_interval = watchdog_check_interval\n poller.params.model = ds\n poller.params.candidate_transcripts = candidate_transcripts\n poller.poll()", "def find_files(directory, pattern='**/*.wav'):\n return glob(os.path.join(directory, pattern), recursive=True)", "def main(\n):\n music_home = \"/home/banana/music\"\n for inode in list_dir(music_home):\n if basename(inode) in [\n \"annotate\",\n \"metadata\",\n \"sped-up\",\n \"tracklists\",\n ] or isfile(inode):\n continue\n convert(inode)", "def move_tracks_to_music_folder(self):\n home = os.path.expanduser(\"~\")\n dest = home + \"/Music/\"\n for each_file, artist in self.past_songs_db_data:\n sub_folder = artist + \"/\" if artist != \"\" else \"\" \n # possible race condition\n if not os.path.exists(dest + sub_folder):\n os.makedirs(dest + sub_folder)\n\n if os.path.isfile(each_file) and \\\n not os.path.isfile(dest + each_file): \n shutil.move(each_file, dest + sub_folder)", "def create_mp3():\n\n #TODO: les roles ne devraient pas etre en dur\n list_all_roles = [\n [],\n [\"morgan\"],\n [\"oberon\"],\n [\"mordred\"],\n [\"morgan\", \"oberon\"],\n [\"morgan\", \"mordred\"],\n [\"oberon\", \"mordred\"],\n [\"morgan\", \"oberon\", \"mordred\"]\n ]\n\n for list_roles in list_all_roles:\n\n list_mp3 = [\"init.mp3\", \"serv_mord.mp3\"]\n if \"oberon\" in list_roles:\n list_mp3.append(\"oberon.mp3\")\n list_mp3.append(\"red_identi.mp3\")\n\n if \"morgan\" in list_roles:\n list_mp3.append(\"add_per_mor.mp3\")\n\n list_mp3.append(\"serv_mord.mp3\")\n if \"mordred\" in list_roles:\n list_mp3.append(\"mordred.mp3\")\n list_mp3.extend([\"merlin_identi.mp3\", \"end.mp3\"])\n\n mp3_combined = AudioSegment.empty()\n for mp3 in list_mp3:\n mp3_combined += AudioSegment.from_mp3(\"resources/{}\".format(mp3))\n\n mp3_combined.export(\"resources/_{}.mp3\".format('-'.join(sorted(list_roles))), format=\"mp3\")", "def flusi_to_wabbit_dir(dir_flusi, dir_wabbit , *args, **kwargs ):\n import re\n import os\n import glob\n\n if not os.path.exists(dir_wabbit):\n os.makedirs(dir_wabbit)\n if not os.path.exists(dir_flusi):\n err(\"The given directory does not exist!\")\n\n files = glob.glob(dir_flusi+'/*.h5')\n files.sort()\n for file in files:\n\n fname_wabbit = dir_wabbit + \"/\" + re.split(\"_\\d+.h5\",os.path.basename(file))[0]\n\n flusi_to_wabbit(file, fname_wabbit , *args, **kwargs )", "def on_created(self, event):\n extensions_watched = [\".m4a\", \".wav\"]\n \n if event.is_directory:\n return\n \n filepath, ext = os.path.splitext(event.src_path) \n\n if ext in extensions_watched:\n # self.tp.apply_async(self._convert,(self.convert_to_mp3(event.src_path,filepath),) )\n # self.tp.apply_async(self._convert,(self.convert_to_ogg(event.src_path,filepath),) )\n self.tp.apply_async(self._convert,(self.convert_to_m4a(event.src_path,filepath),) )\n \n return", "def to_audio(self, _in, _out, bitrate, file_format):\n\n # Default output parameter\n # If not current directory, append '/'\n if os.path.isdir(_out):\n _out = '' if _out == '.' else _out + '/'\n _out += self.get_name_from_path(_in,\n replace=True) + '.' + file_format\n _out = _out.replace('//', '/')\n self.out = _out\n\n # File format unchecked for single inputs\n if not check_is_video(_in):\n msg = \" is not a supported media type\"\n self.abort_conversion(\n self.get_name_from_path(_in) + msg)\n\n \"\"\"\n else:\n base_name = os.path.basename(_out)\n ext = os.path.splitext(base_name)[1]\n _out = _out.replace(ext, '.mp3')\n \"\"\"\n commands = ['ffmpeg', '-i', _in,\n '-vn', '-ar', '44100',\n '-ac', '2', '-ab',\n bitrate, _out]\n try:\n self.run_convert_commands(commands)\n except FileNotFoundError as er:\n res = require_ffmepg()\n\n if not res:\n self.abort_conversion(\"Dependecy not installed.\")", "def check_wav(song, source_folder, temp_folder, encoder='mpg123'):\n # Name of files\n song_name, extension = os.path.splitext(song)\n mp3_file = os.path.join(source_folder, song)\n if '.wav' != extension:\n wav_file = os.path.join(temp_folder, song_name + '.wav')\n try:\n if not os.path.isfile(wav_file):\n mp3_to_wav(\n mp3_file=mp3_file,\n wav_file=wav_file,\n encoder=encoder)\n else:\n pass\n except MemoryError:\n logger.error('MemoryError: %s MP3 couldn\\'t be transformed into WAV', song_name)\n else: # Already a wav file\n copyfile(mp3_file, os.path.join(temp_folder, song_name))", "def mix_audio_and_video(self):\n\t\tself._logger.info('Starting ffmpeg...')\n\t\tsp.run('ffmpeg -v 0 -i {0} -i {1} -c:v copy '\n\t\t '-c:a aac -strict experimental {2}'\n\t\t .format(self.TEMP_FOLDER + self._video_manager.output_file_name,\n\t\t self.TEMP_FOLDER + self._audio_manager.output_file_name,\n\t\t self.SAVE_FOLDER + self._video_manager.output_file_name))\n\t\tself._logger.info('Save done. Output file: {}'\n\t\t .format(self._video_manager.output_file_name))\n\t\tself._logger.info('Cleaning the temp folder...')\n\t\tfor file in glob.glob(self.TEMP_FOLDER + '*'):\n\t\t\tos.remove(file)", "def outputs(folderName):\n for i in itertools.count(1):\n yield io.open('%s/Video_%s.h264' %\n (folderName,\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S')),\n 'wb')", "def batch_analyze_wav(self, filePaths):\n\n toCSV = self.settings['output']['toCSV']\n toJSON = self.settings['output']['toJSON']\n\n start = time.time()\n\n fileTotal = 0\n for path in filePaths:\n if os.path.isdir(path):\n blockName = os.path.basename(path)\n print(f'Block: {blockName}')\n\n files = [os.path.join(path, file) for file in os.listdir(path) if '.wav' in file]\n fileTotal += len(files)\n\n if toCSV:\n if not os.path.exists(os.path.join(path, 'fft_results_csv')):\n os.makedirs(os.path.join(path, 'fft_results_csv'))\n resultFilePath = os.path.join(path, 'fft_results_csv')\n\n print('Processing FFTs...')\n with multiprocessing.Pool(processes=4) as pool:\n results = pool.starmap(Utils.AnalyzeFFT, zip(files, itertools.repeat(True),\n itertools.repeat(True)))\n results = [result for result in results if result is not None]\n\n peaks = [result[0] for result in results]\n ffts = [result[1] for result in results]\n\n print('Writing to .csv...')\n resultFileName = os.path.join(resultFilePath, f'{blockName}_Peaks.csv')\n peakFrames = pd.concat(peaks)\n peakFrames.to_csv(resultFileName, index=False, header=True)\n with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:\n executor.map(self.multi_csv_write, ffts)\n\n if toJSON:\n if not os.path.exists(os.path.join(path, 'fft_results_json')):\n os.makedirs(os.path.join(path, 'fft_results_json'))\n print(os.path.join(path, 'fft_results_json'))\n\n print('Processing FFTs...')\n with multiprocessing.Pool(processes=4) as pool:\n results = pool.starmap(Utils.AnalyzeFFT, zip(files, itertools.repeat(True),\n itertools.repeat(False),\n itertools.repeat(True)))\n results = [result for result in results if result is not None]\n\n print('Writing to .json...')\n with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:\n executor.map(self.multi_json_write, results)\n\n end = time.time()\n print(f'**Done!** {len(filePaths)} blocks with {fileTotal} files took {round(end-start, 1)}s')", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def collect_music_results(self, folder, database_filename=\"results.db\"):\n # get list of successful events\n file_list = glob(path.join(path.abspath(folder), '*'))\n print('There are %d events in total' % len(file_list))\n success_folder_list = []\n for ifolder in range(len(file_list)):\n folder_name = file_list[ifolder]\n if path.isdir(path.join(folder_name, 'tmp', 'outputs')):\n success_folder_list.append(folder_name)\n print('There are %d events are successfully processed'\n % len(success_folder_list))\n\n # the data collection loop\n db = SqliteDB(path.join(folder, database_filename))\n print(\"-\"*60)\n print(\"Collecting results from MUSIC outputs...\")\n print(\"-\"*60)\n\n for ifolder in range(len(success_folder_list)):\n folder_name = success_folder_list[ifolder]\n event_id = ifolder\n print(\"Collecting %s as with event-id: %s\"\n % (folder_name, event_id))\n # collect results from one hydro event\n self.collect_music_event(folder_name, event_id, db)", "def export_wav(self, folder, name_fmt=\"{:02d}.wav\", dtype=np.int16):\n data = np.atleast_2d(self.in_time)\n\n assert data.ndim == 2\n assert np.all(np.abs(data) <= 1.0)\n\n # convert and scale to new output datatype\n if dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (-1.0, 1.0)\n lim_new = (np.iinfo(dtype).min, np.iinfo(dtype).max)\n data = _rescale(data, lim_orig, lim_new).astype(dtype)\n elif dtype != np.float32:\n raise TypeError(f\"dtype {dtype} is not supported by scipy.wavfile.write.\")\n\n path = Path(folder)\n if not path.is_dir():\n path.mkdir(parents=True, exist_ok=False)\n\n for i in range(data.shape[0]):\n wavfile.write(path / name_fmt.format(i + 1), self.fs, data[i])", "def process_files(audio_files, context=[]):\n\n results = []\n bar_limit = len(audio_files)\n client = speech.SpeechClient()\n with Bar('Processing:', max=bar_limit) as bar:\n for audio in audio_files:\n response = convert_speech_to_text(client, audio, context)\n (transcription, confidence) = transcript(response)\n results.append({\n \"path\": audio,\n \"transcription\": transcription,\n \"confidence\": confidence\n })\n bar.next()\n return results", "def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')", "def get_filenames():\r\n datadir = \"./phase3_data/\"\r\n samples = os.listdir(datadir)\r\n all_files = []\r\n for i in range(len(samples)):\r\n sampfiles = []\r\n datadir = \"./phase3_data/\" + samples[i]\r\n files = os.listdir(datadir)\r\n for file in files:\r\n if file.endswith(\".bin\"):\r\n sampfiles += [file]\r\n all_files += [sampfiles]\r\n return samples, all_files", "def download_audio(url,output_dir,ffmpeg_dir):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'ffmpeg_location': ffmpeg_dir,\n 'outtmpl': output_dir + '/%(title)s.%(ext)s',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'mp3',\n 'preferredquality': '192',\n }],\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])", "def test_bunch_of_files(self):\n bunch = [\"1.тест.mp3\", \"2.smash.mp3\", \"3.дdд.mp3\"]\n expected = [\"1.test.mp3\", \"2.smash.mp3\", \"3.ddd.mp3\"]\n for audio in bunch:\n f = open(audio, 'w+')\n f.close()\n audios = filter(lambda x: x.endswith(\".mp3\"), os.listdir())\n for audio in audios:\n rename_audio(audio)\n audios = filter(lambda x: x.endswith(\".mp3\"), os.listdir())\n for a, b in zip(audios, expected):\n print(a, b)\n for filename, expectation in zip(audios, expected):\n self.assertEqual(filename, expectation)", "def convert_to_wav (filename, name, origpath, wavpath, mono):\n print(\"Converting {0} to .wav...\".format(filename))\n if not re.match(r\".*_\\d+$\",name):\n # If filenames do include video titles\n name = name.rsplit('_',1)[0]\n\n channel, vid_num = name.rsplit('_', 1)\n channel = re.sub(r'[^A-Za-z1-9]', '', channel)\n newname = '_'.join([channel, vid_num])\n\n exportname = newname + \".wav\"\n filepath = path.join(origpath, filename)\n\n if not path.exists(wavpath):\n makedirs(wavpath)\n exportPath = path.join(wavpath, exportname)\n sound = AudioSegment.from_file(filepath,\"mp4\")\n if mono == True:\n sound = sound.set_channels(1)\n sound.export(exportPath, format=\"wav\")", "def process_files(lab_dir, wav_dir, id_list, out_dir, state_level, question_file, subphone_feat_type, calc_mvn=False):\n file_ids = utils.get_file_ids(lab_dir, id_list)\n _file_ids = utils.get_file_ids(wav_dir, id_list)\n\n if len(file_ids) != len(_file_ids) or sorted(file_ids) != sorted(_file_ids):\n raise ValueError(\"Please provide id_list, or ensure that wav_dir and lab_dir contain the same files.\")\n\n os.makedirs(out_dir, exist_ok=True)\n\n # Linguistic feature directories.\n os.makedirs(os.path.join(out_dir, 'lab'), exist_ok=True)\n if subphone_feat_type is not None:\n os.makedirs(os.path.join(out_dir, 'counters'), exist_ok=True)\n\n # Acoustic feature directories.\n os.makedirs(os.path.join(out_dir, 'f0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'lf0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'vuv'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'sp'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'ap'), exist_ok=True)\n\n # Sequence length feature directories.\n os.makedirs(os.path.join(out_dir, 'dur'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'n_frames'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'n_phones'), exist_ok=True)\n\n questions = lab_features.QuestionSet(question_file)\n subphone_features = lab_features.SubphoneFeatureSet(subphone_feat_type)\n\n @utils.multithread\n def save_lab_and_wav_to_files(file_id):\n lab_path = os.path.join(lab_dir, '{}.lab'.format(file_id))\n label = lab_features.Label(lab_path, state_level)\n\n if subphone_feat_type is None:\n numerical_labels = label.normalise(questions, upsample_to_frame_level=False)\n else:\n numerical_labels, counter_features = label.normalise(questions, subphone_features, False)\n\n wav_path = os.path.join(wav_dir, '{}.wav'.format(file_id))\n wav = wav_features.Wav(wav_path)\n f0, vuv, sp, ap = wav.extract_features()\n lf0 = np.log(f0)\n\n # Often there is a small difference in number of frames between labels and vocoder features.\n durations = label.phone_durations\n n_frames = sum(durations)\n diff = n_frames - f0.shape[0]\n\n if diff > len(durations):\n raise ValueError(\"Number of label frames and vocoder frames is too different for {name}\\n\"\n \"\\tvocoder frames {voc}\\n\"\n \"\\tlabel frames {lab}\\n\"\n \"\\tnumber of phones {phones}\"\n .format(name=file_id, voc=f0.shape[0], lab=n_frames, phones=len(durations)))\n\n # Remove excess durations if there is a shape mismatch.\n if diff > 0:\n # Remove 1 frame from each phone's duration starting at the end of the sequence.\n durations[-diff:] -= 1\n n_frames = f0.shape[0]\n\n assert n_frames == sum(durations)\n\n make_feature_path = lambda name: os.path.join(out_dir, name, '{}.{}'.format(file_id, name))\n\n # Save linguistic features in binary .npy files.\n file_io.save_bin(numerical_labels, make_feature_path('lab'))\n if subphone_feat_type is not None:\n file_io.save_bin(counter_features[:n_frames], make_feature_path('counters'))\n\n # Save acoustic features in binary .npy files.\n file_io.save_bin(f0[:n_frames], make_feature_path('f0'))\n file_io.save_bin(lf0[:n_frames], make_feature_path('lf0'))\n file_io.save_bin(vuv[:n_frames], make_feature_path('vuv'))\n file_io.save_bin(sp[:n_frames], make_feature_path('sp'))\n file_io.save_bin(ap[:n_frames], make_feature_path('ap'))\n\n # Save sequence length features in text files.\n file_io.save_txt(durations, make_feature_path('dur'))\n file_io.save_txt(n_frames, make_feature_path('n_frames'))\n file_io.save_txt(len(label.phones), make_feature_path('n_phones'))\n\n # Save dimensionality of linguistic and acoustic features to text files.\n make_dim_path = lambda name: os.path.join(out_dir, '{}.dim'.format(name))\n\n file_io.save_txt(numerical_labels.shape[1], make_dim_path('lab'))\n if subphone_feat_type is not None:\n file_io.save_txt(counter_features.shape[1], make_dim_path('counters'))\n\n file_io.save_txt(f0.shape[1], make_dim_path('f0'))\n file_io.save_txt(lf0.shape[1], make_dim_path('lf0'))\n file_io.save_txt(vuv.shape[1], make_dim_path('vuv'))\n file_io.save_txt(sp.shape[1], make_dim_path('sp'))\n file_io.save_txt(ap.shape[1], make_dim_path('ap'))\n\n save_lab_and_wav_to_files(file_ids)\n\n if calc_mvn:\n calclate_mvn_parameters(out_dir, 'dur', id_list=id_list, is_npy=False)\n calclate_mvn_parameters(out_dir, 'f0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'lf0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'vuv', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'sp', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'ap', id_list=id_list, dtype=np.float32)", "def extract_audio(file_name, audio_directory):\n basename = os.path.splitext(os.path.basename(file_name))[0]\n audio_file_name = audio_directory + '/' + basename + '.wav'\n subprocess.call(['ffmpeg', '-y', '-i', file_name, '-ac', '1', audio_file_name])\n return audio_file_name", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def encode_movie(dir):\n root, ext = 'movie', 'avi'\n for i in itertools.count():\n path = '.'.join([root + str(i).zfill(5), ext])\n\n if not os.path.exists(path):\n break\n\n call(['mencoder', 'mf://' + dir + '/*.png', '-mf', 'fps=10', '-o',\n path, '-ovc', 'xvid', '-xvidencopts', 'bitrate=3000'])\n\n shutil.rmtree(dir)\n\n print('movie saved to %s.' % path)", "def test_audio_to_target_dataset(self):\n # Data setup\n random_seed = 42\n sample_rate = 16000\n num_examples = 25\n data_num_channels = {\n 'input_signal': 4,\n 'target_signal': 2,\n }\n data_min_duration = 2.0\n data_max_duration = 8.0\n data_key = {\n 'input_signal': 'input_filepath',\n 'target_signal': 'target_filepath',\n }\n\n # Tolerance\n atol = 1e-6\n\n # Generate random signals\n _rng = np.random.default_rng(seed=random_seed)\n\n # Input and target signals have the same duration\n data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)\n data_duration_samples = np.floor(data_duration * sample_rate).astype(int)\n\n data = dict()\n for signal, num_channels in data_num_channels.items():\n data[signal] = []\n for n in range(num_examples):\n if num_channels == 1:\n random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples[n]))\n else:\n random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_duration_samples[n]))\n data[signal].append(random_signal)\n\n with tempfile.TemporaryDirectory() as test_dir:\n\n # Build metadata for manifest\n metadata = []\n\n for n in range(num_examples):\n\n meta = dict()\n\n for signal in data:\n # filenames\n signal_filename = f'{signal}_{n:02d}.wav'\n\n # write audio files\n sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')\n\n # update metadata\n meta[data_key[signal]] = signal_filename\n\n meta['duration'] = data_duration[n]\n metadata.append(meta)\n\n # Save manifest\n manifest_filepath = os.path.join(test_dir, 'manifest.json')\n write_manifest(manifest_filepath, metadata)\n\n # Test 1\n # - No constraints on channels or duration\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n sample_rate=sample_rate,\n )\n\n # Also test the corresponding factory\n config = {\n 'manifest_filepath': manifest_filepath,\n 'input_key': data_key['input_signal'],\n 'target_key': data_key['target_signal'],\n 'sample_rate': sample_rate,\n }\n dataset_factory = audio_to_audio_dataset.get_audio_to_target_dataset(config)\n\n # Test number of channels\n for signal in data:\n assert data_num_channels[signal] == dataset.num_channels(\n signal\n ), f'Num channels not correct for signal {signal}'\n assert data_num_channels[signal] == dataset_factory.num_channels(\n signal\n ), f'Num channels not correct for signal {signal}'\n\n # Test returned examples\n for n in range(num_examples):\n item = dataset.__getitem__(n)\n item_factory = dataset_factory.__getitem__(n)\n\n for signal in data:\n item_signal = item[signal].cpu().detach().numpy()\n golden_signal = data[signal][n]\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n item_factory_signal = item_factory[signal].cpu().detach().numpy()\n assert np.allclose(\n item_factory_signal, golden_signal, atol=atol\n ), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 2\n # - Filtering based on signal duration\n min_duration = 3.5\n max_duration = 7.5\n\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n min_duration=min_duration,\n max_duration=max_duration,\n sample_rate=sample_rate,\n )\n\n filtered_examples = [n for n, val in enumerate(data_duration) if min_duration <= val <= max_duration]\n\n for n in range(len(dataset)):\n item = dataset.__getitem__(n)\n\n for signal in data:\n item_signal = item[signal].cpu().detach().numpy()\n golden_signal = data[signal][filtered_examples[n]]\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 2: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 3\n # - Use channel selector\n channel_selector = {\n 'input_signal': [0, 2],\n 'target_signal': 1,\n }\n\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n input_channel_selector=channel_selector['input_signal'],\n target_channel_selector=channel_selector['target_signal'],\n sample_rate=sample_rate,\n )\n\n for n in range(len(dataset)):\n item = dataset.__getitem__(n)\n\n for signal in data:\n cs = channel_selector[signal]\n item_signal = item[signal].cpu().detach().numpy()\n golden_signal = data[signal][n][cs, ...]\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 3: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 4\n # - Use fixed duration (random segment selection)\n audio_duration = 4.0\n audio_duration_samples = int(np.floor(audio_duration * sample_rate))\n\n filtered_examples = [n for n, val in enumerate(data_duration) if val >= audio_duration]\n\n for random_offset in [True, False]:\n # Test subsegments with the default fixed offset and a random offset\n\n dataset = AudioToTargetDataset(\n manifest_filepath=manifest_filepath,\n input_key=data_key['input_signal'],\n target_key=data_key['target_signal'],\n sample_rate=sample_rate,\n min_duration=audio_duration,\n audio_duration=audio_duration,\n random_offset=random_offset, # random offset when selecting subsegment\n )\n\n for n in range(len(dataset)):\n item = dataset.__getitem__(n)\n\n golden_start = golden_end = None\n for signal in data:\n item_signal = item[signal].cpu().detach().numpy()\n full_golden_signal = data[signal][filtered_examples[n]]\n\n # Find random segment using correlation on the first channel\n # of the first signal, and then use it fixed for other signals\n if golden_start is None:\n golden_start = get_segment_start(\n signal=full_golden_signal[0, :], segment=item_signal[0, :]\n )\n if not random_offset:\n assert (\n golden_start == 0\n ), f'Expecting the signal to start at 0 when random_offset is False'\n\n golden_end = golden_start + audio_duration_samples\n golden_signal = full_golden_signal[..., golden_start:golden_end]\n\n # Test length is correct\n assert (\n item_signal.shape[-1] == audio_duration_samples\n ), f'Test 4: Signal length ({item_signal.shape[-1]}) not matching the expected length ({audio_duration_samples})'\n\n assert (\n item_signal.shape == golden_signal.shape\n ), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'\n # Test signal values\n assert np.allclose(\n item_signal, golden_signal, atol=atol\n ), f'Test 4: Failed for example {n}, signal {signal} (random seed {random_seed})'\n\n # Test 5:\n # - Test collate_fn\n batch_size = 16\n batch = [dataset.__getitem__(n) for n in range(batch_size)]\n batched = dataset.collate_fn(batch)\n\n for n, signal in enumerate(data.keys()):\n signal_shape = batched[2 * n].shape\n signal_len = batched[2 * n + 1]\n\n assert signal_shape == (\n batch_size,\n data_num_channels[signal],\n audio_duration_samples,\n ), f'Test 5: Unexpected signal {signal} shape {signal_shape}'\n assert len(signal_len) == batch_size, f'Test 5: Unexpected length of signal_len ({len(signal_len)})'\n assert all(signal_len == audio_duration_samples), f'Test 5: Unexpected signal_len {signal_len}'", "def _init_wave_files(self, files, directory):\n\n # 2048 triggers bug in https://github.com/adafruit/circuitpython/issues/3030\n self._file_buf = bytearray(512) # DO NOT CHANGE size til #3030 is fixed\n\n missing = []\n fhs = {}\n for file in files:\n wav_file = None\n filename = directory + \"/\" + file + \".wav\"\n try:\n wav_file = open(filename, \"rb\")\n fhs[file] = WaveFile(wav_file, self._file_buf)\n except OSError:\n # OSError: [Errno 2] No such file/directory: 'filename.ext'\n missing.append(filename)\n\n # Raises an exception at the end to allow it to report ALL\n # of the missing files in one go to help out the user\n if missing:\n raise SampleJukeboxError(missing)\n self._wave_files = fhs", "def convert_all(base_path: Path,\n dest_path: Path,\n count: int,\n max_size: int) -> None:\n os.makedirs(DEST_FOLDER, exist_ok=True)\n os.makedirs(CONVERTED_VIDEOS_FOLDER, exist_ok=True)\n\n processes_count = mp.cpu_count() - 1 or 1\n with mp.Pool(processes_count) as pool:\n pool.starmap(\n convert_file_to_mp4, files(base_path, dest_path, count, max_size)\n )", "def forward(self, audio):\n feature_extractor = self.feature_extractor\n wave_gan = self.wave_gan\n pqmf = self.pqmf\n use_noise_input = self.use_noise_input\n config = self.config\n pad_fn = self.pad_fn\n\n # Added for processing single audio file as in deepspeech armory [Sonal 29Oct20]\n if audio.ndim == 1:\n num_samples = audio.shape[0]\n mel_spectrogram = feature_extractor.transform(audio)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=mel_spectrogram.device,\n )\n inputs += (noise,)\n\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n return reconstructed_audio\n\n else:\n reconstructions = []\n num_samples = audio.shape[1]\n for idx in range(audio.shape[0]):\n recording = audio[idx, :]\n mel_spectrogram = feature_extractor.transform(recording)\n # Setup inputs\n inputs = ()\n if use_noise_input:\n noise = torch.randn(\n 1,\n 1,\n len(mel_spectrogram) * config[\"hop_size\"],\n device=recording.device,\n )\n inputs += (noise,)\n mel_spectrogram = pad_fn(mel_spectrogram.unsqueeze(0).transpose(2, 1))\n inputs += (mel_spectrogram,)\n # Generate\n if config[\"generator_params\"][\"out_channels\"] == 1:\n reconstructed_audio = wave_gan(*inputs).view(-1)\n reconstructed_audio = reconstructed_audio[:num_samples]\n else:\n reconstructed_audio = pqmf.synthesis(wave_gan(*inputs)).view(-1)\n reconstructed_audio = reconstructed_audio[:, :num_samples]\n reconstructions.append(reconstructed_audio)\n return torch.stack(reconstructions)", "def create_playlist(directory, recursive):\n\n root_directory = directory\n\n if recursive:\n # Use os.walk to go through the directory and all its subdirectories\n for root, dirs, files in os.walk(directory):\n create_playlist_in_directory(root_directory, root)\n else:\n create_playlist_in_directory(root_directory, directory)", "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)", "def convert2mel(audio,base_path,fs, n_fft,fmax,n_mels,hop_length_samples, window_lenght,type_training):\n\n path = os.path.join(base_path, audio)\n if type_training != \"train\":\n if os.path.isfile(os.path.join(base_path,\"processed_wavs_train\",audio)):\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_train\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data,_ = librosa.core.load(os.path.join(base_path,\"processed_wavs_test\",audio), sr=fs, res_type=\"kaiser_best\")\n else:\n data, _ = librosa.core.load(path, sr=fs, res_type=\"kaiser_best\")\n data = normalize_amplitude(data)\n\n powSpectrum = np.abs(stft(data+ 0.00001,n_fft,hop_length = hop_length_samples, win_length = window_lenght, window = windowing(window_lenght, sym=False), center=True, pad_mode='reflect'))**2\n\n mels = melspectrogram(y= None,n_fft=n_fft ,sr=fs ,S= powSpectrum, hop_length= hop_length_samples ,n_mels=n_mels,fmax=fmax , fmin = 0.0).T\n mels = librosa.core.power_to_db(mels, ref=np.min(mels))\n mels = mels / np.max(mels)\n\n return mels.T", "def pack_audio_files_to_hdf5(args):\n\n # Arguments & parameters\n dataset_dir = args.dataset_dir\n workspace = args.workspace\n data_type = args.data_type\n mini_data = args.mini_data\n\n sample_rate = config.sample_rate\n audio_length = config.audio_length\n classes_num = config.classes_num\n lb_to_idx = config.lb_to_idx\n frames_per_second = config.frames_per_second\n frames_num = frames_per_second * config.audio_duration\n\n has_strong_target = data_type in ['testing', 'evaluation']\n\n # Paths\n audios_dir = os.path.join(dataset_dir, data_type)\n weak_label_csv_path = os.path.join(dataset_dir, 'metadata', \n get_weak_csv_filename(data_type))\n\n if data_type == 'testing':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_testing_set.csv')\n elif data_type == 'evaluation':\n strong_label_csv_path = os.path.join(dataset_dir, 'metadata', \n 'groundtruth_strong_label_evaluation_set.csv')\n\n if mini_data:\n packed_hdf5_path = os.path.join(workspace, 'features', \n 'minidata_{}.waveform.h5'.format(data_type))\n else:\n packed_hdf5_path = os.path.join(workspace, 'features', \n '{}.waveform.h5'.format(data_type))\n create_folder(os.path.dirname(packed_hdf5_path))\n\n # Read metadata\n weak_meta_list = read_weak_csv(weak_label_csv_path, data_type)\n\n # Use a small amount of data for debugging\n if mini_data:\n random.seed(1234)\n random.shuffle(weak_meta_list)\n weak_meta_list = weak_meta_list[0 : 100]\n\n audios_num = len(weak_meta_list)\n\n feature_time = time.time()\n with h5py.File(packed_hdf5_path, 'w') as hf:\n hf.create_dataset(\n name='audio_name', \n shape=(audios_num,), \n dtype='S80')\n\n hf.create_dataset(\n name='waveform', \n shape=(audios_num, audio_length), \n dtype=np.int32)\n\n hf.create_dataset(\n name='weak_target', \n shape=(audios_num, classes_num), \n dtype=np.float32)\n\n if has_strong_target:\n strong_meta_dict = read_strong_csv(strong_label_csv_path) \n \n hf.create_dataset(\n name='strong_target', \n shape=(0, frames_num, classes_num), \n maxshape=(None, frames_num, classes_num), \n dtype=np.bool)\n\n for n in range(audios_num):\n print(n)\n weak_meta_dict = weak_meta_list[n]\n audio_name = weak_meta_dict['audio_name']\n audio_path = os.path.join(audios_dir, audio_name)\n (audio, fs) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n audio = pad_truncate_sequence(audio, audio_length)\n\n hf['audio_name'][n] = audio_name.encode()\n hf['waveform'][n] = float32_to_int16(audio)\n hf['weak_target'][n] = weak_target = get_weak_target(\n weak_meta_dict['labels'], lb_to_idx)\n\n if has_strong_target:\n strong_target = get_strong_target(\n weak_meta_dict['audio_name'][1:], strong_meta_dict, \n frames_num, frames_per_second, lb_to_idx)\n \n hf['strong_target'].resize((n + 1, frames_num, classes_num))\n hf['strong_target'][n] = strong_target\n\n print('Write hdf5 to {}'.format(packed_hdf5_path))\n print('Time: {:.3f} s'.format(time.time() - feature_time))", "def youtube_dl_mp3(url, directory=expanduser('~/')):\n outtmpl = directory + '%(id)s.%(ext)s'\n options = {\n 'format': 'bestaudio/best', # choice of quality\n 'extractaudio' : True, # only keep the audio\n 'audioformat' : \"mp3\", # convert to mp3\n 'outtmpl': outtmpl, # name the file the title of the video\n 'noplaylist' : True, # only download single song, not playlist\n }\n\n ydl = youtube_dl.YoutubeDL(options)\n ydl.download([url])\n info = ydl.extract_info(url, download=False)\n\n path = directory + info['id'] + '.' + info['ext']\n print '[crawler] path to downloaded audio: %s' % path\n return {\n 'path': path,\n 'title': info['title']\n }", "def wav_wav(orig, dest, **_kwargs):\n\n # options = kwargs.get(\"tree\").cmd_options.get(\"options\", [])\n\n # first demux it to 16 bit 48khz\n dest_list = []\n for index, orig_elem in enumerate(tools.get_iter(orig)):\n tmp_dest = os.path.join(\n os.path.dirname(dest),\n \"{0}_{1}\".format(index, os.path.basename(dest)))\n cmd = \"ffmpeg -i {orig} -acodec pcm_s16le -ar 48000 {dest}\".format(\n dest=tmp_dest,\n orig=orig_elem)\n logger.debug(cmd)\n try:\n subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n continue\n dest_list.append(tmp_dest)\n\n if len(dest_list) > 1:\n cmd = \"sox {orig} {dest}\".format(\n orig=\" \".join(orig),\n dest=dest)\n logger.debug(cmd)\n try:\n subprocess.check_call(cmd, shell=True)\n except subprocess.CalledProcessError as error:\n logger.error(error)\n logger.error(tools.to_unicode(error.output))\n else:\n os.rename(dest_list[0], dest)\n return dest", "def get_data(path):\n if path.endswith('.mp3'):\n path = prepare_file(path, path.rstrip('mp3')+'wav')\n x, sr = librosa.load(path, duration=30)\n\n else:\n x, sr = librosa.load(path, duration=30)\n directory, file_name = os.path.split(path)\n return x, sr, file_name", "def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")", "def change_file(paths, object):\r\n \"\"\" \r\n for x in os.listdir(dirs): print(x) \r\n \"\"\"\r\n check_path = paths + \"\\changer\"\r\n\r\n for x in object:\r\n pass_data = False\r\n name = x.split('.')[0]\r\n for y in os.listdir(check_path):\r\n if name in y:\r\n pass_data = True\r\n\r\n if pass_data:\r\n pass\r\n else:\r\n vid = VideoFileClip(os.path.join(paths, \"\", x))\r\n\r\n vid.audio.write_audiofile(os.path.join(\r\n paths, \"changer\", name+\".mp3\")\r\n )\r\n show_result(check_path)", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def create_folders(CD):\n \n path_recordings = CD['path_recordings']\n path_temp_recordings = CD['path_temp_recordings']\n \n # Create the recordings folder if it does not already exist\n if not os.path.isdir(path_recordings):\n os.mkdir(path_recordings)\n \n # Create the temporary audio files folder or clear it if it already exists\n if not os.path.isdir(path_temp_recordings):\n os.mkdir(path_temp_recordings)\n else:\n for f in os.listdir(path_temp_recordings):\n os.remove(path_temp_recordings+f)", "def audiofile(self, directory=None):\n\n return self.make_path(directory, '.wav')", "def compress_datasets(directory_path: str, holdout: float) -> None:\n\n dataset_path = Path(directory_path)\n sar_sets = get_sar_paths(directory_path)\n make_directory_dataset(directory_path)\n divide_sar_files(dataset_path, sar_sets, holdout)\n remove_subdirectories(directory_path)", "def concatenate_audio_files(input_paths, output_path):\n if not input_paths:\n raise ValueError(\"Empty input paths\")\n pure_path = pathlib.PurePath(input_paths[0])\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n for input_path in input_paths[1:]:\n pure_path = pathlib.PurePath(input_path)\n audio_seg += pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n pure_path = pathlib.PurePath(output_path)\n output_format = pure_path.suffix[1:].lower()\n audio_seg.export(pure_path, output_format)\n if output_format != \"wav\":\n audio_seg.export(pure_path.with_suffix(\".wav\"), \"wav\")\n return len(audio_seg) / 1e3", "def RunData(files, wavelength=None, out='testdata'):\n for i, file in enumerate(files):\n forwardModel(file=file, out='results/%s%i' % (out, i), wavelength=wavelength)", "def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)", "def convert_urban_pcm24_to_pcm16():\n src_dir = ['/data1/data/UrbanSound8K/audio/fold{:d}'.format(i+1) for i in range(10)]\n dst_dir = ['/data1/data/UrbanSound8K-16bit/audio/fold{:d}'.format(i+1) for i in range(10)]\n converted_wav_paths = []\n for dsrc, ddst in zip(src_dir, dst_dir):\n maybe_create_directory(ddst)\n wav_files = filter(lambda FP: FP if FP.endswith('.wav') else None, \n [FP for FP in os.listdir(dsrc)])\n for wav_file in wav_files:\n src_wav, dst_wav = os.path.join(dsrc, wav_file), os.path.join(ddst, wav_file)\n convert_wav(src_wav, dst_wav, subtype='PCM_16')\n converted_wav_paths.append(dst_wav)\n print('converted count:', len(converted_wav_paths))\n print(converted_wav_paths, len(converted_wav_paths))", "def _process_utterance(pml_dir, wav_dir, index, wav_path, pml_path, hparams):\n try:\n # Load the audio as numpy array\n wav = audio.load_wav(wav_path)\n except FileNotFoundError: # catch missing wav exception\n print('file {} present in csv metadata is not present in wav folder. skipping!'.format(\n wav_path))\n return None\n\n # rescale wav\n if hparams.rescale:\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n\n # Assert all audio is in [-1, 1]\n if (wav > 1.).any() or (wav < -1.).any():\n raise RuntimeError('wav has invalid value: {}'.format(wav_path))\n\n # Mu-law quantize\n if is_mulaw_quantize(hparams.input_type):\n # [0, quantize_channels)\n out = mulaw_quantize(wav, hparams.quantize_channels)\n\n constant_values = mulaw_quantize(0, hparams.quantize_channels)\n out_dtype = np.int16\n\n elif is_mulaw(hparams.input_type):\n # [-1, 1]\n out = mulaw(wav, hparams.quantize_channels)\n constant_values = mulaw(0., hparams.quantize_channels)\n out_dtype = np.float32\n\n else:\n # [-1, 1]\n out = wav\n constant_values = 0.\n out_dtype = np.float32\n\n # Get the PML features from the cmp file\n pml_cmp = np.fromfile(pml_path, dtype=np.float32)\n pml_features = pml_cmp.reshape((-1, hparams.pml_dimension))\n pml_frames = pml_features.shape[0]\n\n if pml_frames > hparams.max_pml_frames and hparams.clip_pmls_length:\n return None\n\n # Find parameters\n n_fft = (hparams.num_freq - 1) * 2\n\n if hparams.use_lws:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l, r = audio.pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Zero pad audio signal\n out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)\n else:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l_pad, r_pad = audio.librosa_pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency)\n out = np.pad(out, (l_pad, r_pad), mode='constant', constant_values=constant_values)\n\n # print(len(out), pml_frames, audio.get_hop_size(hparams), pml_frames * audio.get_hop_size(hparams))\n assert len(out) >= pml_frames * audio.get_hop_size(hparams)\n\n # time resolution adjustment\n # ensure length of raw audio is multiple of hop size so that we can use\n # transposed convolution to upsample\n out = out[:pml_frames * audio.get_hop_size(hparams)]\n assert len(out) % audio.get_hop_size(hparams) == 0\n time_steps = len(out)\n\n # Write the spectrogram and audio to disk\n audio_filename = os.path.join(wav_dir, 'audio-{}.npy'.format(index))\n pml_filename = os.path.join(pml_dir, 'pml-{}.npy'.format(index))\n np.save(audio_filename, out.astype(out_dtype), allow_pickle=False)\n np.save(pml_filename, pml_features, allow_pickle=False)\n\n # global condition features\n if hparams.gin_channels > 0:\n raise RuntimeError('When activating global conditions, please set your speaker_id rules in line 129 of '\n 'datasets/wavenet_preprocessor.py to use them during training')\n else:\n speaker_id = '<no_g>'\n\n # Return a tuple describing this training example\n return audio_filename, pml_path, pml_filename, speaker_id, time_steps, pml_frames", "def create_directory_list(root_dir: str):\n if not os.path.exists(root_dir):\n raise FileNotFoundError(\"Directory {} does not exist\".format(root_dir))\n\n # List all directories associated to different videos.\n recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]\n\n input_data_path = []\n for g in recording_path_list:\n # Append the different directories associated to different video frame intervals.\n input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])\n\n return input_data_path", "def getAudioFileFromFilelist(audiofiltered):\n for audioFile in audiofiltered:\n audioRoot, audioExt = os.path.splitext(audioFile)\n if audioExt in ['.wav', '.aiff', '.aif']:\n return audioFile" ]
[ "0.7115496", "0.6985259", "0.67233366", "0.6686839", "0.65235364", "0.64989144", "0.63002807", "0.6277914", "0.6166281", "0.61311907", "0.61195964", "0.606834", "0.6029333", "0.60196835", "0.6009603", "0.5915913", "0.5896318", "0.5880936", "0.58191925", "0.5811102", "0.58104247", "0.58071506", "0.580469", "0.57833445", "0.5747106", "0.5729291", "0.5729148", "0.57266265", "0.57223344", "0.57106704", "0.5680814", "0.5668575", "0.56666255", "0.5663736", "0.5660149", "0.56444526", "0.562979", "0.56191707", "0.5609232", "0.55919087", "0.55834055", "0.5582529", "0.5558983", "0.5558641", "0.5536887", "0.55102676", "0.5509247", "0.5508883", "0.55076224", "0.54948366", "0.5494047", "0.54783463", "0.54548794", "0.5447619", "0.5417633", "0.5409999", "0.540843", "0.5408052", "0.5400204", "0.538617", "0.53786474", "0.53783053", "0.53766274", "0.5375554", "0.537134", "0.53687197", "0.53585094", "0.53552645", "0.5347756", "0.53454417", "0.53405905", "0.5336998", "0.5336375", "0.5335291", "0.5332388", "0.5323127", "0.5315961", "0.5314145", "0.53080946", "0.5306353", "0.5299632", "0.52978134", "0.5291841", "0.5290332", "0.52748454", "0.5264242", "0.52623546", "0.5259665", "0.5249575", "0.5247187", "0.52438635", "0.52359354", "0.5225462", "0.52246875", "0.5217952", "0.5217113", "0.52169544", "0.52102417", "0.5205447", "0.5200849" ]
0.6515147
5
Initialize the SingleImage object
def __init__(self, camera): self.__camera = camera self.__innerOrientationParameters = None self.__isSolved = False self.__exteriorOrientationParameters = np.array([0, 0, 0, 0, 0, 0], 'f') self.__rotationMatrix = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, image):\n self.image = image", "def __init__(self, img):\n self.img = img", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def __init__(self, image: np.ndarray) -> None:\n self.image = image", "def __init__(self, image1, image2):\n self.image1 = image1\n self.image2 = image2", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def __init__(self, ms, srcdict=None):\n Imaging.__init__(self, ms)\n self.srcdict = srcdict", "def initialize(self):\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)", "def __init__(self, img, settings):\r\n self.img_orig = img\r\n self.settings = settings", "def OnInit(self):\r\n self.imageID = self.loadImage()", "def OnInit( self ):\n self.imageID = self.loadImage ()", "def __init__(self):\n super().__init__()\n self._active = False\n # Counter, used in the animation\n self._time = 0\n # Store the current image id, initially it's 'default'\n self._image = 'default'", "def initialize(self):\n super(QtImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)", "def __init__(self) -> None:\n self.registration_method = None\n self.fixed_image = None\n self.moving_image = None\n self.transform = None", "def _init(self):\n # A string of the last image taken\n self.last_image = None\n\n # Number of images captured\n self.image_count = 0\n\n # Duration tracking. Set high to cause capture on first load\n self._duration_start = -(60 * 60 * 24)", "def __init__(__self__,\n resource_name: str,\n args: ImageInitArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def initialise(self):\r\n self.set_image(\"wall.png\")\r\n return self", "def init(self):\n imageDim = u.getDimImage(self.length, 0, 0, 78) # 54.5, 42.3, 66.17\n self.imageInfo['ratio'] = u.getRatio(self.imageInfo['shape'],\n imageDim)\n\n self.measuring = pymeasuring.Measuring(self.imageInfo, self.length)\n\n # rospy.loginfo(\"dims of image [mm]: \" + str(imageDim))\n # rospy.loginfo(\"ratios [mm/px]: \" + str(self.imageInfo['ratio']))\n # rospy.loginfo(\"shape [px]: \" + str(self.imageInfo['shape']))\n rospy.loginfo('init of measuring object is complete.')", "def __init__(self, image_size=224):\n super().__init__()\n raster_settings = {'image_size': image_size, 'blur_radius': 0.0, 'faces_per_pixel': 1, 'bin_size': None, 'max_faces_per_bin': None, 'perspective_correct': False}\n raster_settings = dict2obj(raster_settings)\n self.raster_settings = raster_settings", "def __init__(self, image_path, color = 'g'):\n self.image_path = image_path\n self.color = color", "def __init__(self):\n super(ISimpleITKImageMetric, self).__init__()\n self.metric = 'ISimpleITKImageMetric'\n self.ground_truth = None # SimpleITK.Image\n self.segmentation = None # SimpleITK.Image", "def __init__(self, image_size: tuple):\n # todo: Check that the next line gets both the X and Y size. May need two variables.\n self.__screen_size = pyautogui.size()\n self.__ratio = get_cursor_ratio(image_size, self.__screen_size)\n\n self.__enabled = True", "def __init__(self, rect, image):\n self.rect = rect\n self.image = image\n self.state = self.S_ACTIVE", "def __init__(self, version, image):\n self.version = version\n self.major_version = get_major_version(self.version)\n self.image = image", "def __init__(self, width, height):\n self._image = tk.PhotoImage(master=root, width = width, height = height)\n self.fill((0,0,0))", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def __init__(self,imageObject):\n getJsonEnv()\n self.meta=imageObject\n self.imageNames=self.getImageName()\n self.gcsBucket=self.setUpCredentials()\n self.sources = []", "def __init__(self,bits):\n self.image_bits = bits", "def __init__(self, *args, **kwargs):\n super(AscatL2Image, self).__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], Image):\n # Pretend it is a hs signal, copy axes and metadata\n sdict = args[0]._to_dictionary()\n Image.__init__(self, **sdict)\n else:\n Image.__init__(self, *args, **kwargs)", "def __init__(self):\n self.__img = None\n self.__mask = None\n self.__colorThresholds = ColorFilter.DEFAULT_COLOR_THRESHOLDS", "def setUp(self):\n self.gray_image = np.ndarray((100, 200), dtype=np.uint8)\n self.rgb_image = np.ndarray((100, 200, 3), dtype=np.uint8)", "def __init__(self):\n # Effective batch size\n self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT\n\n # Input image size\n if self.IMAGE_RESIZE_MODE == \"crop\":\n self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM,\n self.IMAGE_CHANNEL_COUNT])\n else:\n self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM,\n self.IMAGE_CHANNEL_COUNT])\n\n # Image meta data length\n # See compose_image_meta() for details\n self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES", "def __init__(self, input):\n try:\n img = Image.open(input).resize(ImageVector.size)\n self.pixels = img.getdata()\n except:\n self.pixels = input", "def __init__(self) -> None:\n try:\n # TurboJPEG checks for libturbojpeg\n # when its created, but it imports\n # numpy which may or may not work so\n # we have to guard the import here.\n from turbojpeg import TurboJPEG # pylint: disable=import-outside-toplevel\n\n TurboJPEGSingleton.__instance = TurboJPEG()\n except Exception: # pylint: disable=broad-except\n _LOGGER.exception(\n \"Error loading libturbojpeg; Cameras may impact HomeKit performance\"\n )\n TurboJPEGSingleton.__instance = False", "def __init__(self, *args, **kwargs):\n _gdi_.Bitmap_swiginit(self,_gdi_.new_Bitmap(*args, **kwargs))", "def __init__(self, image_id):\n\n self.image_id = image_id\n self.encoding = None\n self.records = None\n self.img = None\n self.contours = None\n\n logging.info(\"Image id: {}\".format(self.image_id))", "def __init__(self, filename):\n\t\tself.im_raw = sunpy.map.Map(filename)\n\t\ttry:\n\t\t\tself.B0 = self.im_raw.meta['B0']\n\t\texcept KeyError:\n\t\t\tself.B0 = self.im_raw.meta['OBS_B0']\n\t\ttry:\n\t\t\tself.L0 = self.im_raw.meta['L0']\n\t\texcept KeyError:\n\t\t\tself.L0 = self.im_raw.meta['OBS_L0']\n\t\ttry:\n\t\t\tself.X0 = self.im_raw.meta['X0']\n\t\texcept KeyError:\n\t\t\tself.X0 = self.im_raw.meta['IMG_X0']\n\t\ttry:\n\t\t\tself.Y0 = self.im_raw.meta['Y0']\n\t\texcept KeyError:\n\t\t\tself.Y0 = self.im_raw.meta['IMG_Y0']\n\t\tif self.im_raw.detector == 'SPMG':\n\t\t\tself.rsun = self.im_raw.rsun_obs.value / self.im_raw.meta['SCALE']\t\n\t\telse:\n\t\t\tself.rsun = self.im_raw.rsun_obs.value", "def __init__(self, filename=None, image=None):\n self.image = image\n self.filename = filename\n self.metadata = Metadata()\n self.metadata['Convolved'] = False\n if self.image is None and filename is not None and os.path.exists(filename): # read the image from file\n self.from_file(filename)", "def __init__(self, *args):\n _itkRGBAPixelPython.itkRGBAPixelUS_swiginit(self,_itkRGBAPixelPython.new_itkRGBAPixelUS(*args))", "def setUp(self):\n self.image = np.random.randint(\n 0, 256, size=(10, 10, 3)).astype('uint8')", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.camera = Camera.instance()\n\n # define directories and file paths\n date_str = datetime.today().strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.log_dir = f\"{const.Storage.DATA}/{date_str}\"\n self.img_dir = f\"{self.log_dir}/img/\"\n self.log_path = f\"{self.log_dir}/log.csv\"\n self.img_extension = \"npy\"\n\n # ensure that the necessary directories exist\n os.mkdir(self.log_dir)\n os.mkdir(self.img_dir)\n assert os.path.isdir(self.log_dir), \"data directory could not be created\"\n assert os.path.isdir(self.img_dir), \"image directory could not be created\"", "def __init__(self, smallImageUrl=None, largeImageUrl=None):\n default_attr = dict(smallImageUrl=str(),\n largeImageUrl=str())\n self.smallImageUrl = smallImageUrl\n self.largeImageUrl = largeImageUrl\n self._set_default_attr(default_attr)", "def __init__(self):\n self.reader = vtk.vtkImageData()\n\n self.dims = self.reader.GetDimensions()\n self.bounds = self.reader.GetBounds()\n self.spacing = self.reader.GetSpacing()\n self.origin = self.reader.GetOrigin()\n self.value_range = self.reader.GetScalarRange()\n\n # self.plane_widget_x = vtk.vtkImagePlaneWidget()\n # self.plane_widget_y = vtk.vtkImagePlaneWidget()\n # self.plane_widget_z = vtk.vtkImagePlaneWidget()\n\n self.flag_read = False", "def __init__(self, pins, image):\n self._pins = pins\n self._image = image", "def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode([c.SCREEN_WIDTH, c.SCREEN_HEIGHT])\n\n # initialise background to grey\n self.screen.fill(pygame.Color(100, 100, 100, 100))\n\n self.car = pygame.image.load(c.CAR_IMAGE).convert()\n self.red_light = pygame.image.load(c.LIGHT_IMAGE_RED).convert()\n self.green_light = pygame.image.load(c.LIGHT_IMAGE_GREEN).convert()", "def __init__(self, data, (x,y)):\n\t\tGameImage.__init__(self, data)\n\t\tself.coords = (x,y)", "def __call__(self):\n Texture()", "def __init__(self, *args):\n _gdi_.NativePixelData_swiginit(self,_gdi_.new_NativePixelData(*args))", "def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)", "def small_image(self):\n pass", "def __init__(self):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.image.load('assets/' + 'singleLaser.png')\n\n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()\n self.rect.center = (settings.SCREEN_WIDTH / 2, settings.SCREEN_HEIGHT / 2)", "def __init__(self, image=None):\n self.openapi_types = {\"image\": ImageInfoSummary}\n\n self.attribute_map = {\"image\": \"image\"}\n\n self._image = image", "def __init__(self, *args, **kwargs):\n if len(args) > 0 and isinstance(args[0], (Image, CImage)):\n # Pretend it is a hs signal, copy axes and metadata\n sdict = args[0]._to_dictionary()\n CImage.__init__(self, **sdict)\n else:\n CImage.__init__(self, *args, **kwargs)", "def __init__(self,f=None,new=False) :\n if new:\n self.matrix=Image.new('1',(1,1))\n else:\n self.matrix=Image.open(f)", "def __init__(self, group, image, x, y, tile_size):\n\t\tsuper().__init__(group, image, x, y, tile_size)", "def init_graphics(self):\n if type(self.image_ref) is Surface:\n # This is the case for the special visual effect\n self.image = self.image_ref\n else:\n image = GLOBAL.img(self.image_ref)\n if type(image) is tuple:\n # for decode purpose\n self.image = Surface(TILESIZE_SCREEN)\n self.image.fill(image)\n elif type(image) is list or type(image) is dict:\n self.animated = True\n self.current_frame = 0\n self.last_update = 0\n if type(image) is list:\n self.list_image = image\n self.image = self.list_image[self.current_frame]\n else:\n self.last_direction = (1, 0)\n self.dict_image = image\n self.image = self.dict_image['E'][self.current_frame]\n else:\n self.image = image\n self._reposition_rect()", "def __init__(self, l_i):\n self.l_i = l_i\n self.reset_img()", "def __init__(self, *args, **kwargs):\n _gdi_.ImageList_swiginit(self,_gdi_.new_ImageList(*args, **kwargs))", "def setUp(self):\n self.new_image = Images(image=\"image.jpg\", image_name=\"roses\", caption=\"live\",\n user_id=1, user='Joy', likes=0, posted_on=\"111-2019\")", "def __init__(self):\n super(HybridImageModel, self).__init__()", "def __init__ (self):\r\n\r\n self.path = 'c:\\\\python22\\\\work\\\\'\r\n self.bfh_vals = (BM, 0, 0, 0, 0)\r\n self.bih_vals = (BIH_SIZE, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0)\r\n self.the_file = None\r\n self.image = []\r\n self.colourmap = []", "def __init__(self, **kwargs):\n super(ImageExporter, self).__init__(**kwargs)", "def load_image(self, **kwargs):\n ...", "def __init__(self, parent=None):\n self.image_float_fg = None\n self.image_float_bg = None\n self.image_8bit_fg = None\n self.image_8bit_bg = None\n self.image_unchanged_fg = None\n self.image_unchanged_bg = None\n self.blur = -1\n self.closing = -1\n self.thresh = -1\n self.ready = True", "def __init__(self, *args):\n _itkRGBAPixelPython.itkRGBAPixelF_swiginit(self,_itkRGBAPixelPython.new_itkRGBAPixelF(*args))", "def __init__(self, metadata_folder='./'):\n self.metadata = self.load_metadata(metadata_folder)\n self.prefix = 'data/miap/images/'\n return", "def _setup_new_image(self):\n\n if not self._viewer is None:\n if not self._input_image is None:\n self._viewer.SetInput(self._input_image)\n else:\n self._viewer.SetInput(self._dummy_image_source.GetOutput())\n\n ii = self._viewer.GetInput()\n \n ii.UpdateInformation()\n ii.Update()\n range = ii.GetScalarRange()\n self._viewer.SetColorWindow(range[1] - range[0])\n self._viewer.SetColorLevel(0.5 * (range[1] + range[0]))\n \n icp = self._view_frame._image_control_panel\n icp.slider.SetRange(self._viewer.GetSliceMin(),\n self._viewer.GetSliceMax())\n icp.slider.SetValue(self._viewer.GetSliceMin())\n \n #self._viewer.UpdateDisplayExtent()\n self._viewer.GetRenderer().ResetCamera()", "def __init__(self, *center_point_and_pixmap):\n GraphicsObject.__init__(self, []) # initialize\n if len(center_point_and_pixmap) == 1: # assume image\n self.anchor = None\n self.pixmap = center_point_and_pixmap[0]\n elif len(center_point_and_pixmap) == 2: # assume point, image\n self.anchor = center_point_and_pixmap[0].clone()\n self.pixmap = center_point_and_pixmap[1]\n else:\n raise AttributeError, \"invalid parameters to Image(); need 1 or 2\"\n\n\n self.imageId = Image.idCount # give this image a number\n Image.idCount = Image.idCount + 1 # increment global counter\n\n\n\t#New code by JWS to work with a Picture, Filename, or Pixmap.\n\tif type(self.pixmap) == type(\"\"): # assume a filename\n picture = Picture()\n picture.load(self.pixmap)\n self.pixmap = makePixmap(picture)\n self.img = self.pixmap.image\n\n elif type(self.pixmap) == Picture : #Create from a picture\n self.pixmap = makePixmap(self.pixmap)\n self.img = self.pixmap.image\n\n else: #Otherwise, assume they gave us a valid pixmap!\n self.img = self.pixmap.image", "def __init__(self):\n\n # Call the parent class (sprite) constructor\n super().__init__()\n # Create image of block and fill with color.\n self.image = pygame.Surface([20, 20])\n self.image.fill(BLACK)\n\n # Fetch rectangle object that has dimensions of image. Update position of object by setting values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self, image, input_size) -> None:\n self.input_image = image\n if isinstance(image, str):\n self.input_image = self._load_image(image)\n self.orig_size = self.input_image.shape[:2]\n self.input_size = input_size\n # 'Resize' is TRUE if input_size is not equal to image size (orig_size).\n self.resize = True if self.input_size != self.orig_size else False\n\n self._image_nd = self._preprocessing(self.input_image)", "def __init__(self, nb_sub_images, window_size, recovery, image_horiz_size):\n self.nb_sub_images = nb_sub_images\n self.window_size = window_size\n self.recovery = recovery\n self.image_horiz_size = image_horiz_size", "def set_default_image(self, image):\n raise NotImplementedError", "def __init__(self, width, height):\n\n self._width = width\n self._height = height\n \n # The images in the cache!\n self._images = {} # {filename : bitmap}\n\n return", "def __init__(__self__,\n resource_name: str,\n args: StreamingImageArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(self, image_prefix=\"\", **kwargs):\n LUIObject.__init__(self)\n self.set_size(\"100%\", \"100%\")\n self._prefix = image_prefix\n self._parts = {}\n for i in self._MODES:\n self._parts[i] = LUISprite(self, \"blank\", \"skin\")\n self._update_layout()\n LUIInitialState.init(self, kwargs)", "def __init__(self):\n self.hmd = None\n self.vr_render_models = None\n self.render_width = 0\n self.render_height = 0", "def __init__(self, image, extent, starting_target_band, band_maps=None,\n source_crs=\"epsg:4326\", extent_crs=\"epsg:4326\"):\n\n self.image = image\n self.extent = extent\n self.starting_target_band = starting_target_band\n self.band_maps = band_maps or self.create_band_maps()\n self.source_crs = source_crs\n self.extent_crs = extent_crs", "def _initialize_src(self):\n\n self._img_list = []\n if type(self._src) is str:\n\n # Parse a directory of png files\n if os.path.isdir(self._src):\n self._img_list = glob.glob(os.path.join(self._src,\"*.png\"))\n self._img_list.sort()\n\n # Parse a video file\n elif os.path.isfile(self._src):\n self._img_list = pyfx.util.video_to_array(self._src)\n\n # Error\n else:\n err = \"source type is not recognized.\\n\"\n raise ValueError(err)\n\n # Parse a list of image files\n elif type(self._src) is list or type(self._src) is tuple:\n\n for x in self._src:\n if not os.path.isfile(x):\n err = \"input file {} not found\\n\".format(x)\n raise FileNotFoundError(err)\n\n self._img_list = copy.copy(self._src)\n\n else:\n err = \"could not parse src of type {}\\n\".format(type(self._src))\n raise ValueError(err)\n\n self._current_time = 0\n self._max_time = len(self._img_list) - 1\n self._shape = pyfx.util.to_array(self._img_list[0],num_channels=1).shape", "def __init__(self, focallength_px=None, focallength_x_px=None, focallength_y_px=None, center_x_px=None,\n center_y_px=None, center=None, focallength_mm=None, image_width_px=None, image_height_px=None,\n sensor_width_mm=None, sensor_height_mm=None, image=None, sensor=None, view_x_deg=None,\n view_y_deg=None):\n # either no image specified\n if image is not None and (image_width_px or image_height_px):\n raise ValueError(\"cannot provide both an image shape tuple and image_width_px or image_height_px\")\n # ... or both dimensions of the image specified\n if image is None and (image_width_px is None or image_height_px is None):\n raise ValueError(\"need to provide either parameter 'image' or 'image_width_px' and 'image_height_px'\")\n\n if image is not None:\n try:\n image_height_px, image_width_px = image.shape[:2]\n except AttributeError:\n image_width_px, image_height_px = image\n\n \"\"\" if the image center is specified \"\"\"\n # if the center is specified\n if center and (center_x_px or center_y_px):\n raise ValueError(\"cannot provide center tuple and center_x_px and center_y_px\")\n if center is not None:\n center_x_px, center_y_px = center\n if center_x_px is None:\n center_x_px = image_width_px / 2\n if center_y_px is None:\n center_y_px = image_height_px / 2\n\n \"\"\" different init cases \"\"\"\n # init using focal length and image dimensions\n if focallength_px or focallength_x_px or focallength_y_px:\n # only a generic focal length or individual ones\n if focallength_px and (focallength_x_px or focallength_y_px):\n raise ValueError(\"cannot provide both an focallength_px and focallength_x_px or focallength_y_px\")\n if focallength_mm is not None:\n raise ValueError(\"cannot provide both an focallength_mm and a focallength in px\")\n if focallength_px:\n focallength_x_px = focallength_px\n focallength_y_px = focallength_px\n\n if sensor and (sensor_width_mm or sensor_height_mm):\n raise ValueError(\"cannot provide both an sensor shape tuple and sensor_width_mm or sensor_height_mm\")\n if sensor:\n sensor_width_mm, sensor_height_mm = sensor\n if sensor_width_mm and sensor_height_mm is None:\n sensor_height_mm = sensor_width_mm / image_width_px * image_height_px\n if sensor_width_mm is None and sensor_height_mm:\n sensor_width_mm = sensor_height_mm / image_height_px * image_width_px\n\n if focallength_mm:\n if sensor_width_mm is None or sensor_height_mm is None:\n raise ValueError(\"when using focallength_mm a sensor size has to be provided\")\n focallength_x_px = focallength_mm / sensor_width_mm * image_width_px\n focallength_y_px = focallength_mm / sensor_height_mm * image_height_px\n\n if view_x_deg or view_y_deg:\n self.image_width_px = image_width_px\n self.image_height_px = image_height_px\n if view_x_deg:\n if focallength_x_px is not None:\n raise ValueError(\"cannot set both focallength_x_px and view_x_deg\")\n focallength_x_px = self.focallengthFromFOV(view_x=view_x_deg)\n if not view_y_deg:\n focallength_y_px = focallength_x_px\n if view_y_deg:\n if focallength_y_px:\n raise ValueError(\"cannot set both focallength_y_px and view_y_deg\")\n focallength_y_px = self.focallengthFromFOV(view_y=view_y_deg)\n if not view_x_deg:\n focallength_x_px = focallength_y_px\n\n if focallength_x_px is None or focallength_y_px is None:\n raise ValueError(\"Either provide a focal length or a field of view angle.\")\n\n self.parameters = ParameterSet(\n # the intrinsic parameters\n focallength_x_px=Parameter(focallength_x_px, type=TYPE_INTRINSIC), # the focal length in px\n focallength_y_px=Parameter(focallength_y_px, type=TYPE_INTRINSIC), # the focal length in px\n center_x_px=Parameter(center_x_px, default=0, type=TYPE_INTRINSIC), # the focal length in mm\n center_y_px=Parameter(center_y_px, default=0, type=TYPE_INTRINSIC), # the focal length in mm\n image_height_px=Parameter(image_height_px, type=TYPE_INTRINSIC), # the image height in px\n image_width_px=Parameter(image_width_px, type=TYPE_INTRINSIC), # the image width in px\n sensor_height_mm=Parameter(sensor_height_mm, default=13.0, type=TYPE_INTRINSIC), # the sensor height in mm\n sensor_width_mm=Parameter(sensor_width_mm, default=17.3, type=TYPE_INTRINSIC), # the sensor width in mm\n )\n\n # add parameter focallength_px that sets x and y simultaneously\n fx = self.parameters.parameters[\"focallength_x_px\"]\n fy = self.parameters.parameters[\"focallength_y_px\"]\n f = Parameter(focallength_x_px, type=TYPE_INTRINSIC)\n\n def callback():\n fx.value = f.value\n if fx.callback is not None:\n fx.callback()\n fy.value = f.value\n if fy.callback is not None:\n fy.callback()\n\n f.callback = callback\n self.parameters.parameters[\"focallength_px\"] = f\n\n if view_x_deg is not None or view_y_deg is not None:\n if sensor_width_mm is None:\n if focallength_mm is not None and self.focallength_x_px and sensor_width_mm is not None:\n self.sensor_width_mm = focallength_mm / self.focallength_x_px * self.image_width_px\n if focallength_mm is not None and self.focallength_px and sensor_height_mm is not None:\n self.sensor_height_mm = focallength_mm / self.focallength_y_px * self.image_height_px", "def __init__(\n self,\n title,\n body,\n img_path_xs,\n img_path_sm,\n img_path_md,\n img_path_lg,\n is_active):\n\n self.title = title\n self.body = body\n self.img_path_xs = img_path_xs\n self.img_path_sm = img_path_sm\n self.img_path_md = img_path_md\n self.img_path_lg = img_path_lg\n self.is_active = is_active", "def __init__(self):\n self.version = (7, 0, 0)\n self.legacy = False\n self.convert_cmd = ['magick']\n self.identify_cmd = ['magick', 'identify']\n self.compare_cmd = ['magick', 'compare']", "def __init__(self, image):\n\n # Call the parent class (Sprite) constructor\n super().__init__()\n\n # Create an image of the block, and fill it with a color.\n # This could also be an image loaded from the disk.\n self.image = pygame.image.load('assets/' + image)\n\n # Fetch the rectangle object that has the dimensions of the image\n # image.\n # Update the position of this object by setting the values\n # of rect.x and rect.y\n self.rect = self.image.get_rect()", "def __init__(self, caption, tag, image_path, scale=.7):\n super(LatexImage, self).__init__(caption, tag)\n self._image_path = image_path\n self._scale = scale", "def __init__(self, center=None, gs=None):\n\t\tself.gs = gs\n\t\tself.image = pygame.image.load(\"imgs/gun.png\")\n\t\tself.orig_image = pygame.image.load(\"imgs/gun.png\")\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = center", "def __init__(self, map_state):\n self.map_state = map_state\n self.image = map_prepare.GFX[\"misc\"][\"interface\"]\n self.make_widgets()", "def __init__(self, folder_path: PathType):\n self.tifffile = _get_tiff_reader()\n self.folder_path = Path(folder_path)\n\n self._ome_tif_files = list(self.folder_path.glob(\"*.ome.tif\"))\n assert self._ome_tif_files, f\"The TIF image files are missing from '{folder_path}'.\"\n\n # load the 'DisplaySettings.json' file that contains the sampling frequency of images\n settings = self._load_settings_json()\n self._sampling_frequency = float(settings[\"PlaybackFPS\"][\"scalar\"])\n\n first_tif = self.tifffile.TiffFile(self._ome_tif_files[0])\n # extract metadata from Micro-Manager\n micromanager_metadata = first_tif.micromanager_metadata\n assert \"Summary\" in micromanager_metadata, \"The 'Summary' field is not found in Micro-Manager metadata.\"\n self.micromanager_metadata = micromanager_metadata\n self._width = self.micromanager_metadata[\"Summary\"][\"Width\"]\n self._height = self.micromanager_metadata[\"Summary\"][\"Height\"]\n self._num_channels = self.micromanager_metadata[\"Summary\"][\"Channels\"]\n if self._num_channels > 1:\n raise NotImplementedError(\n f\"The {self.extractor_name}Extractor does not currently support multiple color channels.\"\n )\n self._channel_names = self.micromanager_metadata[\"Summary\"][\"ChNames\"]\n\n # extact metadata from OME-XML specification\n self._ome_metadata = first_tif.ome_metadata\n ome_metadata_root = self._get_ome_xml_root()\n\n schema_name = re.findall(\"\\{(.*)\\}\", ome_metadata_root.tag)[0]\n pixels_element = ome_metadata_root.find(f\"{{{schema_name}}}Image/{{{schema_name}}}Pixels\")\n self._num_frames = int(pixels_element.attrib[\"SizeT\"])\n self._dtype = np.dtype(pixels_element.attrib[\"Type\"])\n\n # all the file names are repeated under the TiffData tag\n # the number of occurences of each file path corresponds to the number of frames for a given TIF file\n tiff_data_elements = pixels_element.findall(f\"{{{schema_name}}}TiffData\")\n file_names = [element[0].attrib[\"FileName\"] for element in tiff_data_elements]\n\n # count the number of occurrences of each file path and their names\n file_counts = Counter(file_names)\n self._check_missing_files_in_folder(expected_list_of_files=list(file_counts.keys()))\n # Initialize the private imaging extractors with the number of frames for each file\n imaging_extractors = []\n for file_path, num_frames_per_file in file_counts.items():\n extractor = _MicroManagerTiffImagingExtractor(self.folder_path / file_path)\n extractor._num_frames = num_frames_per_file\n extractor._image_size = (self._height, self._width)\n imaging_extractors.append(extractor)\n super().__init__(imaging_extractors=imaging_extractors)", "def _initialize(self):\n if not self._is_initialized:\n self.connect(retries=Camera.CONNECTION_RETRIES)\n self.cam.resolution = (self.resolution['x'], self.resolution['y'])\n self.cam.start_preview()\n time.sleep(2)\n self._is_initialized = True", "def __init__(self, *args):\n _gdi_.AlphaPixelData_swiginit(self,_gdi_.new_AlphaPixelData(*args))", "def setup(self):\n self.fname = None\n self.remote = self.camera.get('remote', None)\n self.picture_directory = self.camera.get('directory', Bawt.DEFAULT_DIRECTORY)\n self.resolution = self.camera.get('resolution', Bawt.DEFAULT_RESOLUTION)\n LOG.info(\"Picture directory set to: %s\" % self.picture_directory)\n LOG.info(\"Resolution set to %s\" % self.resolution)\n self.timelapse = self.camera.get('timelapse', None)\n self._is_initialized = False", "def __init__(self, x0 = 0, y0 = 0, num_images = 2, num_rois = 1):\n self.x0 = x0\n self.y0 = y0\n self.num_images = num_images\n\n self.rois = []\n self.set_num_rois(num_rois)\n self.set_num_images(num_images)", "def __init__(self, reNum, imNum):\n self._reNum = reNum\n self._imNum = imNum", "def __init__(self, filename: str):\n self.tif_file = None\n self.byteOrder = 'big'\n self.magic = None\n self.ifds = []\n\n if filename is not None:\n self.tif_file = TiffFileHandler(filename)\n self.load_tiff()", "def __init__(self, images, batch_size, ctx, multisp):\n self.ctx = ctx\n self.batch_size = batch_size\n self.images = []\n self.multisp = multisp\n\n self.images=images\n\n if self.images:\n self.channels, self.imgsize, _ = self._read_img(self.images[0]['data']).shape\n\n logging.info(\"Found a total of {} images\".format(len(self.images)))", "def prepare_single_image(cls, path):\n return T.Compose(cls.base_transforms)(Image.open(path))" ]
[ "0.7476075", "0.72321916", "0.70613927", "0.70613927", "0.70613927", "0.70282173", "0.7005531", "0.7002043", "0.7002043", "0.7002043", "0.7002043", "0.7002043", "0.6960504", "0.69188726", "0.6878795", "0.67705446", "0.6648267", "0.6634296", "0.6630021", "0.6602309", "0.6548551", "0.6508341", "0.64878255", "0.64600325", "0.6447491", "0.63842165", "0.6365384", "0.6344992", "0.63303095", "0.63293946", "0.6320131", "0.63168085", "0.6308815", "0.6307306", "0.6303225", "0.63008344", "0.6287179", "0.62861216", "0.6271759", "0.62699777", "0.62532794", "0.624407", "0.6216058", "0.61946875", "0.6192695", "0.61904675", "0.6180453", "0.6176455", "0.617096", "0.616534", "0.6159425", "0.61447", "0.61423486", "0.6132359", "0.61169386", "0.60947657", "0.6091549", "0.60911554", "0.6089524", "0.60856473", "0.6083063", "0.6082218", "0.60792565", "0.6078861", "0.60772955", "0.6069459", "0.60663956", "0.6064326", "0.60574436", "0.6051899", "0.6036297", "0.6028636", "0.60039616", "0.599828", "0.5989205", "0.59596866", "0.595944", "0.5953319", "0.5952006", "0.59500635", "0.5949157", "0.59443885", "0.5943287", "0.59265345", "0.59263945", "0.59152585", "0.59129846", "0.59069", "0.5890384", "0.58900875", "0.587835", "0.5860964", "0.58547336", "0.585256", "0.58519936", "0.58475053", "0.58471286", "0.58420116", "0.5841782", "0.5838135", "0.58306384" ]
0.0
-1
The camera that took the image
def camera(self): return self.__camera
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera(self):\n return self._camera", "def camera(self):\n return self._camera", "def query_camera(self):\n ok, orig_pic = self.vs.read() # Read video stream\n if ok: # If no errors\n orig_pic = imutils.rotate(orig_pic, angle=self.camera_rot)\n curr_pic = imutils.resize(orig_pic, width=self.image_width)\n return curr_pic, orig_pic\n else:\n return None, None", "def current_camera(self):\n n = ct.c_long() # current camera handler\n self.lib.GetCurrentCamera(ct.pointer(n))\n return n.value", "def get_image(self):\n return self.camera.getImage()", "def camera_id(self):\n return self._camera_id", "def camera_id(self):\n return self._camera_id", "def camera_image(self):\n if not self.ezvizService.switchState:\n return \"\"\n\n now = time.time()\n if now < self._last_snapshot_time + self._interval_snapshots:\n return self._last_image\n\n result = self.ezvizService.post('/lapp/device/capture', data={'deviceSerial':self.deviceSerial,'channelNo':1})\n if (result['code']!='200'):\n _LOGGER.error(\"EZVIZ capture image fail:%s\", result)\n return self._last_image\n\n image_path = result['data']['picUrl']\n try:\n response = requests.get(image_path)\n except requests.exceptions.RequestException as error:\n _LOGGER.error(\"EZVIZ getting camera image: %s\", error)\n return self._last_image\n\n self._last_snapshot_time = now\n self._last_image = response.content\n return self._last_image", "def get_image():\n\n # Access the global variable and activate the saving for the last camera's\n # frame\n global _save_image\n _save_image = True", "def snapshot(self):\n return self.camera.snapshot(0)", "def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def camera_entity(self):\n return self._camera_entity", "def camera_image(self):\n return asyncio.run_coroutine_threadsafe(\n self.async_camera_image(), self.hass.loop\n ).result()", "def camera_entity(self):\n return self._camera_entity_id", "def get_camera_metadata(self):\n return self.camera.getHeight(), self.camera.getWidth(), 4 # channels", "def camera_image(self):\n now = utcnow()\n if self._ready_for_snapshot(now) or True:\n image = self._device.camera_get_image(self._uuid, now)\n\n self._next_snapshot_at = now + self._time_between_snapshots\n self._last_image = image\n\n return self._last_image", "def captureimage(self):\n if not self.total_time:\n return self.frames[-1]\n return None", "def grab_image(self):\n _, camera_image = self.camera.read()\n with self.lock:\n self.image = camera_image", "def bspb_getCurrentCam():\n curPanel = pm.windows.getPanel(wf=True)\n if curPanel.startswith('modelPanel'):\n currentCamera = str(pm.windows.modelEditor(curPanel, q=True, camera=True))\n if currentCamera == 'shot_cam' or currentCamera == 'shot_camShape':\n return 'shot_cam'\n else:\n return 'Valid Camera is not selected.'\n else:\n return 'Please Select shot_cam viewport.'", "def _get_camera(self):\n rect = (self._dim[0], self._dim[2], self._dim[1] - self._dim[0],\n self._dim[3] - self._dim[2])\n flip = (False, type(self).__name__ == 'ImageObj', False)\n return scene.cameras.PanZoomCamera(rect=rect, flip=flip)", "def snapFrame(camera):\n return camera.read()[1]", "def get_camera_feed(self):\r\n # get the frame..from cam feed\r\n read_status, self.frame = self.capture.read()\r\n return self.frame", "def get_frame(self):\n BaseCamera.last_access = time.time()\n\n # wait for a signal from the camera thread\n BaseCamera.event.wait()\n BaseCamera.event.clear()\n\n return BaseCamera.frame", "def camera(self):\n self.spectrum = self.spectrum", "def capture_image():\n global img_tk\n r, img_cam = cam.read()\n img_pil = Image.fromarray(cv2.cvtColor(img_cam, cv2.COLOR_BGR2RGB))\n img_tk = ImageTk.PhotoImage(img_pil)\n tk_cam.create_image(0, 0, image=img_tk, anchor='nw')\n return img_pil", "def getFrame(self):\n s, image = self.capture.read()\n return image", "def getCameraMatrix(self): # real signature unknown; restored from __doc__\n pass", "def model(self) -> CameraModel:\n pass", "def __get_img(self):\n # Read camera image\n while True:\n # Wait for prediction\n if not self.__predict_start:\n continue\n\n # Get current frame and\n # check for success\n success, self.__img = self.__cap.read()\n if not success:\n continue\n\n self.__img = cv2.resize(self.__img, (self.__size[0], self.__size[1]))", "def _get_camera(self, mode):\n cam_bp = self.blueprint_lib.find(f\"sensor.camera.{mode}\")\n cam_bp.set_attribute(\"image_size_x\", f\"{self.img_x}\")\n cam_bp.set_attribute(\"image_size_y\", f\"{self.img_y}\")\n cam_bp.set_attribute(\"fov\", f\"{self.img_fov}\")\n cam = self.world.spawn_actor(cam_bp, self.transform, attach_to=self.vehicle) # spawing isn't expected to fail\n \n return cam", "def capture(self):\n filename = self.get_new_photo_filename()\n open(self.camid + '/' + filename, 'wb').write(self.fake_shot)\n return filename", "def cameraType(self):\r\n cls = mxs.classof(self._nativePointer)\r\n if cls in (mxs.FreeCamera, mxs.TargetCamera):\r\n return CameraType.Standard\r\n\r\n elif cls == mxs.Physical:\r\n return CameraType.Physical\r\n\r\n elif cls == mxs.VRayPhysicalCamera:\r\n return CameraType.Physical\r\n return 0", "def getCameraID(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALVideoRecorder\")\n return self.proxy.getCameraID()", "def capture():\n stream = BytesIO()\n cam.capture(stream, 'jpeg')\n data = np.fromstring(stream.getvalue(), dtype=np.uint8)\n # \"Decode\" the image preserving color\n img = cv2.imdecode(data, 1)\n # switch BGR order to RGB order\n img = img[:, :, ::-1]\n\n # resize image to match training size\n img = cv2.resize(img, (args.resize, args.resize), interpolation=cv2.INTER_AREA)\n print(\"done resizing\")\n\n# cv2.imshow('image',img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n return img.flatten()", "def get_camera_param(self, imgname) -> dict:\n assert hasattr(self, 'camera_param')\n subj, _, camera = self._parse_h36m_imgname(imgname)\n return self.camera_param[(subj, camera)]", "def get_camera_state(self, parameter):\n return self.opt.getParameter(parameter)", "def get_camera_count():\n return Camera.getNumberOfCameras()", "def capture(self):\n log.info('Image: %d' % self.idx)\n img = cv.imread(self.files[self.idx])\n self.idx = (self.idx + 1) % len(self.files)\n\n img = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n return img", "async def async_camera_image(self):\n last_image = self._nvr.get_snapshot_image(self._uuid)\n self._last_image = last_image\n return self._last_image", "def process_camera():\n\n pic_array = take_picture()\n detections, shapes, descriptors = detect_faces(person_database,pic_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def input(self):\n\t\treturn self.image", "def last_camera_image_url(self) -> str:\n return self.camera_info[\"cover_path\"]", "def capture_image(self, data={}):\n if self.camera:\n image_name = f'{os.path.join(self.path, self.filename)}.jpg'\n self.camera.capture(image_name)\n self.last_image = os.path.abspath(image_name)\n self.increment_count()\n self.fire({'event': 'ImageCaptured', 'image': image_name})", "def getImage(cam):\n\n return cam.getImage()", "def imageFromCamera(self, points): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def camera_location(self) -> CameraLocationType:\n return self._location", "def get_camera_observation(self, t):\n if not self.enable_cameras:\n warnings.warn(\n \"Cameras are not enabled, so images in the camera observation\"\n \" are not initialized. Create `TriFingerPlatform` with\"\n \" `enable_cameras=True` to get rendered camera images.\"\n )\n\n current_t = self.simfinger._t\n\n if t < 0:\n raise ValueError(\"Cannot access time index less than zero.\")\n elif t == current_t:\n return self._camera_observation_t\n elif t == current_t + 1:\n return self._get_current_camera_observation(t)\n else:\n raise ValueError(\n \"Given time index t has to match with index of the current\"\n \" step or the next one.\"\n )", "def createCamera():\n\n turnCam = cmds.camera()\n lookThruAndFrame(turnCam[0])\n return turnCam", "def get_image_from_camera(camera):\n if camera:\n # if predictor is too slow frames get buffered, this is designed to\n # flush that buffer\n ret, frame = camera.read()\n if not ret:\n raise Exception(\"your capture device is not returning images\")\n return frame\n return None", "def capture_image(self):\n ext = self.image_save_type.lower()\n\n if self.calibrating:\n print('calibrating')\n\n if ext == 'fits':\n self.save_fits()\n self._image_counter += 1\n else:\n img = self.original_image\n path = os.path.join(self.home, 'data')\n name = \"camtrak_frame_{}.png\".format(self._image_counter) \n fn = os.path.join(path, name)\n cv2.imwrite(fn, img)\n\n QtWidgets.QApplication.beep()\n self.statusBar().showMessage(f'Saved image to {fn}')\n self._image_counter += 1", "def obter_caminho(self):\n return self.caminho", "def get_frame(self):\n return opencv.highgui.cvQueryFrame(self.capture)", "def status(cls):\n return {'type': 'Emulated camera'}", "def camera(*args, aspectRatio: Union[float, bool]=0.0, cameraScale: Union[float, bool]=0.0,\n centerOfInterest: Union[float, bool]=0.0, clippingPlanes: bool=True, depthOfField:\n bool=True, displayFieldChart: bool=True, displayFilmGate: bool=True,\n displayFilmOrigin: bool=True, displayFilmPivot: bool=True, displayGateMask:\n bool=True, displayResolution: bool=True, displaySafeAction: bool=True,\n displaySafeTitle: bool=True, fStop: Union[float, bool]=0.0, farClipPlane:\n Union[float, bool]=0.0, farFocusDistance: Union[float, bool]=0.0, filmFit:\n Union[AnyStr, bool]=\"\", filmFitOffset: Union[float, bool]=0.0, filmRollOrder:\n Union[AnyStr, bool]=\"\", filmRollValue: Union[float, bool]=0.0, filmTranslateH:\n Union[float, bool]=0.0, filmTranslateV: Union[float, bool]=0.0, focalLength:\n Union[float, bool]=0.0, focusDistance: Union[float, bool]=0.0, homeCommand:\n Union[AnyStr, bool]=\"\", horizontalFieldOfView: Union[float, bool]=0.0,\n horizontalFilmAperture: Union[float, bool]=0.0, horizontalFilmOffset: Union[float,\n bool]=0.0, horizontalPan: Union[float, bool]=0.0, horizontalRollPivot: Union[float,\n bool]=0.0, horizontalShake: Union[float, bool]=0.0, journalCommand: bool=True,\n lensSqueezeRatio: Union[float, bool]=0.0, lockTransform: bool=True, motionBlur:\n bool=True, name: Union[AnyStr, bool]=\"\", nearClipPlane: Union[float, bool]=0.0,\n nearFocusDistance: Union[float, bool]=0.0, orthographic: bool=True,\n orthographicWidth: Union[float, bool]=0.0, overscan: Union[float, bool]=0.0,\n panZoomEnabled: bool=True, position: Union[List[float, float, float], bool]=None,\n postScale: Union[float, bool]=0.0, preScale: Union[float, bool]=0.0, renderPanZoom:\n bool=True, rotation: Union[List[float, float, float], bool]=None, shakeEnabled:\n bool=True, shakeOverscan: Union[float, bool]=0.0, shakeOverscanEnabled: bool=True,\n shutterAngle: Union[float, bool]=0.0, startupCamera: bool=True,\n stereoHorizontalImageTranslate: Union[float, bool]=0.0,\n stereoHorizontalImageTranslateEnabled: bool=True, verticalFieldOfView: Union[float,\n bool]=0.0, verticalFilmAperture: Union[float, bool]=0.0, verticalFilmOffset:\n Union[float, bool]=0.0, verticalLock: bool=True, verticalPan: Union[float, bool]=0.0,\n verticalRollPivot: Union[float, bool]=0.0, verticalShake: Union[float, bool]=0.0,\n worldCenterOfInterest: Union[List[float, float, float], bool]=None, worldUp:\n Union[List[float, float, float], bool]=None, zoom: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr], Any]:\n pass", "def capture(self):\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n self.vid.grab()\n ret, img = self.vid.read()\n cv2.imwrite(\"/tmp/test.png\", img)\n return img", "def get_frame(self, camera: int = 0) -> Tuple[float, np.ndarray]:\n return self.video.read()", "def captureImage(capture):\n cvImg = cv.QueryFrame(capture)\n # cv.CvtColor(cvImg, cvImg, cv.CV_BGR2RGB)\n cvMat = cv.GetMat(cvImg)\n return cv.CloneMat(cvMat)", "def get_pose():\n files = {'file': ('image.jpg', open(\n 'assets/image.jpg', 'rb'), 'images/jpeg')}\n result = requests.post(URL, files=files).json()\n img = cv2.imread('assets/image.jpg')[:, :, ::-1]\n return result, img", "def run(self):\n\n with Camera.instance() as camera:\n try:\n # create log file and write headers\n with open(self.log_path, \"w+\") as log:\n writer = csv.writer(log)\n writer.writerow([\"image\", \"angle\", \"previous_angle\"])\n except OSError:\n raise OSError(\"The log file could not be created.\")\n\n previous_angle = 0.0\n while self.active:\n if camera.image is None: continue # skip loop if no image provided\n\n # save image\n img_filename = datetime.today().strftime(\"%H-%M-%S-%f\") + \".\" + self.img_extension\n np.save(self.img_dir + img_filename, camera.image)\n\n try:\n # write data to csv file\n with open(self.log_path, \"a\") as log:\n writer = csv.writer(log)\n angle = str(round(self.driver.angle, 3))\n previous_angle = str(previous_angle)\n writer.writerow([img_filename, angle, previous_angle])\n except OSError:\n raise OSError(\"The log file could not be opened.\")\n\n previous_angle = angle # update previous angle for next loop\n time.sleep(self.CAPTURE_INTERVAL)", "def get_frame(self, camera: int = 0) -> Tuple[float, np.ndarray]:\n frame_time, self.frame = self.sources[camera].grabFrameNoTimeout(\n image=self.frame\n )\n return frame_time, self.frame", "def __init__(self, camera, cameras, settings):\n\n self.cam = None\n self.jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default\n # check picamera version\n try:\n picamversion = require('picamera')[0].version\n except:\n picamversion = '0'\n\n if 'threaded_read' in cameras[camera]: # threaded on non-threaded camera reading\n self.threaded_read = cameras[camera]['threaded_read']\n else:\n self.threaded_read = True\n if 'resolution' in cameras[camera]:\n self.resolution = literal_eval(cameras[camera]['resolution'])\n else:\n self.resolution = (320, 240)\n if 'framerate' in cameras[camera]:\n self.framerate = cameras[camera]['framerate']\n else:\n self.framerate = 32\n if 'vflip' in cameras[camera]:\n self.vflip = cameras[camera]['vflip']\n else:\n self.vflip = False\n if 'resize_width' in cameras[camera]:\n # resize_width is a percentage value\n # width in pixels will be computed later after reading a test image\n self.resize_width = cameras[camera]['resize_width']\n else:\n self.resize_width = None\n if 'viewname' in cameras[camera]:\n self.viewname = cameras[camera]['viewname']\n else:\n self.viewname = ' '\n if 'src' in cameras[camera]:\n self.src = cameras[camera]['src']\n else:\n self.src = 0\n if 'exposure_mode' in cameras[camera]:\n self.exposure_mode = cameras[camera]['exposure_mode']\n else:\n self.exposure_mode = None\n if 'iso' in cameras[camera]:\n self.iso = cameras[camera]['iso']\n else:\n self.iso = 0 # default value\n if 'shutter_speed' in cameras[camera]:\n self.shutter_speed = cameras[camera]['shutter_speed']\n else:\n self.shutter_speed = 0 # default value\n if 'sharpness' in cameras[camera]:\n self.sharpness = cameras[camera]['sharpness']\n else:\n self.sharpness = 0 # default value\n if 'contrast' in cameras[camera]:\n self.contrast = cameras[camera]['contrast']\n else:\n self.contrast = 0 # default value\n if 'brightness' in cameras[camera]:\n self.brightness = cameras[camera]['brightness']\n else:\n self.brightness = 50 # default value\n if 'exposure_compensation' in cameras[camera]:\n self.exposure_compensation = cameras[camera]['exposure_compensation']\n else:\n self.exposure_compensation = 0 # 0 default value, integer value between -25 and 25\n if 'awb_mode' in cameras[camera]:\n self.awb_mode = cameras[camera]['awb_mode']\n else:\n self.awb_mode = 'auto' # default value\n\n self.detectors = []\n if 'detectors' in cameras[camera]: # is there at least one detector\n self.setup_detectors(cameras[camera]['detectors'],\n settings.nodename,\n self.viewname)\n if camera[0].lower() == 'p': # this is a picam\n # start PiCamera and warm up; inherits methods from\n # imutils.VideoStream unless threaded_read is False; then uses class\n # PiCameraUnthreadedStream to read the PiCamera in an unthreaded way\n if self.threaded_read:\n self.cam = VideoStream(usePiCamera=True,\n resolution=self.resolution,\n framerate=self.framerate).start()\n else:\n self.cam = PiCameraUnthreadedStream(resolution=self.resolution,\n framerate=self.framerate)\n\n # if an exposure mode has been set in yaml, set it\n if self.exposure_mode:\n self.cam.camera.exposure_mode = self.exposure_mode\n # if an iso has been set in yaml, set it\n if self.iso:\n self.cam.camera.iso = self.iso\n # if an iso has been set in yaml, set it\n if self.shutter_speed:\n self.cam.camera.shutter_speed = self.shutter_speed\n # if an sharpness has been set in yaml, set it\n if self.sharpness:\n self.cam.camera.sharpness = self.sharpness\n # if an contrast has been set in yaml, set it\n if self.contrast:\n self.cam.camera.contrast = self.contrast\n # if an brightness has been set in yaml, set it\n if self.brightness:\n self.cam.camera.brightness = self.brightness\n # if an exposure_compensation has been set in yaml, set it\n if self.exposure_compensation:\n self.cam.camera.exposure_compensation = self.exposure_compensation\n # if an awb_mode has been set in yaml, set it\n if self.awb_mode:\n self.cam.camera.awb_mode = self.awb_mode\n self.cam_type = 'PiCamera'\n else: # this is a webcam (not a picam)\n self.cam = VideoStream(src=0).start()\n self.cam_type = 'webcam'\n sleep(3.0) # allow camera sensor to warm up\n\n # self.text is the text label for images from this camera.\n # Each image that is sent is sent with a text label so the hub can\n # file them by nodename, viewname, and send_type\n # example: JeffOffice Window|jpg\n # Nodename and View name are in one field, separated by a space.\n # send_type is in the next field\n # The 2 field names are separaged by the | character\n node_and_view = ' '.join([settings.nodename, self.viewname]).strip()\n self.text = '|'.join([node_and_view, settings.send_type])\n\n # set up camera image queue\n self.cam_q = deque(maxlen=settings.queuemax)", "def get_camera_transform(self):\r\n if not self.pose:\r\n rospy.loginfo(\"no pose!\")\r\n return None\r\n if self.pose.header.frame_id != self.role_name:\r\n rospy.logwarn(\"Unsupported frame received. Supported {}, received {}\".format(\r\n self.role_name, self.pose.header.frame_id))\r\n return None\r\n sensor_location = carla.Location(x=self.pose.pose.position.x,\r\n y=-self.pose.pose.position.y,\r\n z=self.pose.pose.position.z)\r\n quaternion = (\r\n self.pose.pose.orientation.x,\r\n self.pose.pose.orientation.y,\r\n self.pose.pose.orientation.z,\r\n self.pose.pose.orientation.w\r\n )\r\n roll, pitch, yaw = euler_from_quaternion(quaternion)\r\n # rotate to CARLA\r\n sensor_rotation = carla.Rotation(pitch=math.degrees(roll)-90,\r\n roll=math.degrees(pitch),\r\n yaw=-math.degrees(yaw)-90)\r\n return carla.Transform(sensor_location, sensor_rotation)", "def take_picture(self):\n self.drone.take_picture()", "def take_picture(self):\n self.drone.take_picture()", "def _create_single_camera(self):\n\n # obtian K matrix from cfg #####################################################################################\n for ii in range(0,3):\n for jj in range(0,3):\n self._k_mat[ii,jj] = self._cfg[\"KMatrix\"][ii*3+jj]\n ############################################################################## end of obtian K matrix from cfg #\n\n # create camera\n bpy.ops.object.camera_add()\n\n # get camera object\n # TODO get without object name:\n self._sensor = bpy.data.objects['Camera']\n\n # change name of camera\n self._sensor.name = self._cfg[\"outputBaseName\"] + '_Camera_RGBD'\n\n # use depth of field if requested ##############################################################################\n if \"depthOfField\" in self._cfg:\n\n # activate depth of field\n self._sensor.data.dof.use_dof = True\n\n # set up params ############################################################################################\n if \"distance\" in self._cfg[\"depthOfField\"]:\n self._sensor.data.dof.focus_distance = self._cfg[\"depthOfField\"][\"distance\"]\n else:\n self._sensor.data.dof.focus_distance = 10.0\n\n if \"fStop\" in self._cfg[\"depthOfField\"]:\n self._sensor.data.dof.aperture_fstop = self._cfg[\"depthOfField\"][\"fStop\"]\n else:\n self._sensor.data.dof.aperture_fstop = 1.5\n\n if \"blades\" in self._cfg[\"depthOfField\"]:\n self._sensor.data.dof.aperture_blades = self._cfg[\"depthOfField\"][\"blades\"]\n else:\n self._sensor.data.dof.aperture_blades = 0\n\n if \"rotationDeg\" in self._cfg[\"depthOfField\"]:\n self._sensor.data.dof.aperture_rotation = self._cfg[\"depthOfField\"][\"rotationDeg\"]*(math.pi/180.0)\n else:\n self._sensor.data.dof.aperture_rotation = 0\n\n if \"ratio\" in self._cfg[\"depthOfField\"]:\n self._sensor.data.dof.aperture_ratio = self._cfg[\"depthOfField\"][\"ratio\"]\n else:\n self._sensor.data.dof.aperture_ratio = 1.0\n #################################################################################### end of set up params #\n else:\n # deactivate depth of field\n self._sensor.data.dof.use_dof = False\n ####################################################################### end of use depth of field if requested #\n\n # set camera params ############################################################################################\n # based on https://blender.stackexchange.com/a/120063\n\n # get focal lenght and principle point from K matrix\n _f_x = self._k_mat[0,0]\n _f_y = self._k_mat[1,1]\n _c_x = self._k_mat[0,2]\n _c_y = self._k_mat[1,2]\n\n # get image resolution\n _w = self._cfg[\"imageResolution\"][0]\n _h = self._cfg[\"imageResolution\"][1]\n\n # calc field of view\n _fov = 2.0*math.atan(_w/(2*_f_x))\n _fov_deg = _fov*(180./math.pi)\n\n # aspect ratio\n _a_x = 1\n _a_y = 1\n if _f_x > _f_y:\n _a_y = _f_x / _f_y\n elif _f_x < _f_y:\n _a_y = _f_y / _f_x\n\n # calc focal length ratio\n _f_ratio = _f_x / _f_y\n\n # sensor fitting mode according to issue\n if 'AUTO' == self._sensor.data.sensor_fit:\n if _f_x*_w >= _f_y*_h:\n _v = _w\n else:\n _v = pixel_aspect_ratio * _h\n else:\n if 'HORIZONTAL' == cam.sensor_fit:\n _v = _w\n else:\n _v = pixel_aspect_ratio * _h\n\n # Set shift\n self._sensor.data.shift_x = ((_w/2.)-_c_x)/ _v\n self._sensor.data.shift_y = ((_h/2.)-_c_y)/ _v * _f_ratio\n\n # set field of view for camera\n self._sensor.data.lens_unit = 'FOV'\n self._sensor.data.angle = _fov\n\n # set transformation for camera\n self._sensor.rotation_mode = 'QUATERNION' \n self._base_to_sensor = self._cfg[\"transformation\"]\n self._sensor.location = (self._base_to_sensor[0],self._base_to_sensor[1],self._base_to_sensor[2])\n self._sensor.rotation_quaternion = (self._base_to_sensor[3],\n self._base_to_sensor[4],\n self._base_to_sensor[5],\n self._base_to_sensor[6])\n ##################################################################################### end of set camera params #\n\n # set render pass dict #########################################################################################\n self._general_render_pass_dict = {}\n self._general_render_pass_dict[\"name\"] = self._cfg[\"outputBaseName\"]\n self._general_render_pass_dict[\"imageResolution\"] = [int(self._cfg[\"imageResolution\"][0]),\\\n int(self._cfg[\"imageResolution\"][1])]\n ################################################################################## end of set render pass dict #\n\n # config RGBDPass ##############################################################################################\n if 'RGBDPass' in self._cfg[\"renderPasses\"]:\n rgbd_info = {}\n rgbd_info[\"name\"] = self._cfg[\"outputBaseName\"]\n rgbd_info[\"imageResolution\"] = [int(self._cfg[\"imageResolution\"][0]),int(self._cfg[\"imageResolution\"][1])]\n rgbd_info[\"DepthEnabled\"] = self._cfg[\"renderPasses\"][\"RGBDPass\"][\"DepthEnabled\"]\n self._render_pass_dict[\"RGBDPass\"] = rgbd_info\n ####################################################################################### end of config RGBDPass #", "def render(self):\n self.camera.SimulationStep(0.01)\n return self.camera.GetSensorData().imagedata;", "def takePicture(self):\n if not PICAM_ENABLED:\n return\n\n try:\n #self.camera.capture(self.imgFile, 'jpeg')\n #self.cnvImg.createImg(self.imgFile)\n\t\t\n self.rgbArray = picamera.array.PiRGBArray(self.camera)\n self.camera.capture(self.rgbArray, 'rgb')\n \n self.gray = rgb2gray(self.rgbArray.array)\n plt.imshow(self.gray, cmap = plt.get_cmap('gray'))\n plt.show()\n print(self.gray.shape, self.gray.ndim)\n print(self.gray[0][2]) \n self.img = Image.fromarray(self.rgbArray.array)\n self.img.save(\"./array.png\")\n self.cnvImg.displayImg(self.img)\n except:\n print(\"Take picture error\")", "def camera_start(self):\n mycam = ONVIFCamera(self.__cam_ip, 80, self.__cam_user, self.__cam_password)\n logging.info('Create media service object')\n media = mycam.create_media_service()\n logging.info('Get target profile')\n media_profile = media.GetProfiles()[0]\n logging.info('Camera working!')\n\n self.mycam = mycam\n self.camera_media_profile = media_profile\n self.camera_media = media\n self.mycam = mycam\n\n return self.mycam", "def get_camera_data_object(evt, src):\n o = evt.get(_psana.Camera.FrameV1, src)\n if o is not None: return o\n\n return None", "def numberOfCamera():\n return numCams", "def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1", "def capture():\n\tcap = cv2.VideoCapture(0)\n\tret, frame = cap.read()\n\tcap.release()\n\tcv2.destroyAllWindows()\n\treturn frame", "def camera_handle(self, index):\n index = ct.c_long(index)\n handle = ct.c_long()\n self.lib.GetCameraHandle(index, ct.pointer(handle))\n return handle.value", "def getcanvas(self):\n return self.cv", "def checkCamera(self):\n #how to check if cam exits\n #https://stackoverflow.com/questions/48049886/how-to-correctly-check-if-a-camera-is-available\n if \"has_cam\" in self.store.keys():\n return self.store[\"has_cam\"]\n\n cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)\n\n if cap is None or not cap.isOpened():\n return False\n #Close the cam\n cap.release()\n cv2.destroyAllWindows()\n return True", "def cam():\n\treturn Response(gen(camera),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame'), 200", "def camera(self, source):\n self._camera = source\n self.SetActiveCamera(self._camera)\n self.camera_position = CameraPosition(\n scale_point(source, source.position, invert=True),\n scale_point(source, source.focal_point, invert=True),\n source.up,\n )\n self.Modified()\n self.camera_set = True", "def get_frame(cap):\n\n #camera matrix for camera calibration\n mtx = np.array(np.mat(\"588.4525598886621, 0, 301.8008794717551; 0, 588.9763096391521, 242.617026416902; 0, 0, 1\"))\n\n #distrotion coefficients for camera calibration\n dist = np.array(np.mat(\"-0.4351555722591889, 0.2082765081608728, -0.006072767012672472, 0.008139871640987759, 0\"))\n\n #get image frame from the camera\n ret, frame = cap.read()\n\n return frame\n\n h, w = frame.shape[:2]\n\n #get the new optimal camera matrix and the roi which can be used to crop the result\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h))\n\n #get the undistroted image\n dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)\n\n x,y,w,h = roi\n\n #get the cropped image\n dst = dst[y:y+h, x:x+w]\n h, w = dst.shape[:2]\n\n #furthur crop the image to reduce the size of arena\n dst = dst[int(h/7):int(h*6/7), int(w/7):int(w*6/7)]\n\n #resize the arena to ARENA_SIZE\n dst = cv2.resize(dst, ARENA_SIZE, interpolation= cv2.INTER_CUBIC)\n\n return dst", "def get_img():\n\timg = camera.Capture()\n\tarray = jetson.utils.cudaToNumpy(img)\n\n\treturn(array)", "def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)", "def capture(camera, image):\n iss.compute() # Get the lat/long values from ephem\n\n # convert the latitude and longitude to EXIF-appropriate representations\n south, exif_latitude = convert(iss.sublat)\n west, exif_longitude = convert(iss.sublong)\n\n # set the EXIF tags specifying the current location\n camera.exif_tags['GPS.GPSLatitude'] = exif_latitude\n camera.exif_tags['GPS.GPSLatitudeRef'] = \"S\" if south else \"N\"\n camera.exif_tags['GPS.GPSLongitude'] = exif_longitude\n camera.exif_tags['GPS.GPSLongitudeRef'] = \"W\" if west else \"E\"\n\n # capture the image to disk\n camera.capture(image)", "def capture(self, *, show=False, keep_showing=False):\n\t\tgen = self.loop(l_img=True, r_img=True)\n\t\tl_img, r_img = next(gen)\n\n\t\tif show:\n\t\t\tdouble_img = np.hstack((l_img, r_img))\n\t\t\tcv2.imshow('Camera Inputs', double_img)\n\t\t\tif not keep_showing:\n\t\t\t\tcv2.waitKey(0)\n\t\t\t\tcv2.destroyWindow('Camera Inputs')\n\n\t\treturn l_img, r_img", "def get_image(\n self, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL\n ) -> np.ndarray:\n (_, _, img, _, _) = self._pybullet_client.getCameraImage(\n width=self._width,\n height=self._height,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix,\n renderer=renderer,\n **self._kwargs,\n )\n # remove the alpha channel\n return img[:, :, :3]", "def cameraCallback(self, data):\n if not self.isReady:\n cvImage, self.imageInfo['shape'] = u.getCVImage(data)\n if self.measuring is not None:\n self.list, cvImage, self.isReady = self.measuring.getListObjects(cvImage)\n # preview topic /see_main\n msg_image = u.getMsgImage(cvImage)\n self.pub_view_main.publish(msg_image)\n else:\n if self.imageInfo['shape'] is not None:\n self.init()\n else:\n rospy.logerr(\"no video stream. check camera's topic!\")", "def take_photo(self):\n\n status = self.camera.status()\n if status['mode'] != 'still':\n # place camera in snapshot mode\n self.camera.command('mode', 'still')\n\n photo_successful = self.camera.command('record', 'on')\n\n if photo_successful:\n\n # sleep for two seconds so the camera can process\n # and serve the new photo via http\n\n retrieved = False\n while not retrieved:\n print(\"Waiting for image to be served.\")\n time.sleep(2)\n retrieved = self.get_photos_from_device()\n\n print(\"Image got served.\")\n return True\n\n else:\n return False", "def get_camera_streaming(cam_id, w, h, fps):\n capture = cv2.VideoCapture(cam_id)\n capture.set(cv2.CAP_PROP_FRAME_WIDTH, w)\n capture.set(cv2.CAP_PROP_FRAME_HEIGHT, h)\n capture.set(cv2.CAP_PROP_FPS, fps)\n if not capture:\n print(\"Failed to initialize camera\")\n sys.exit(1)\n return capture", "def test_camera_id() -> None: # pragma: no cover\n cap = cv2.VideoCapture(1)\n if (cap.isOpened() == False):\n print(\"Error opening video stream or file\")\n ret, frame = cap.read()\n cv2.imshow('frame', frame)\n cv2.waitKey(0)\n cap.release()", "def capture(self):\n current_time = time.strftime('%Y%m%d-%H%M%S')\n filepath = f'files/{current_time}.png'\n self.ids.camera.export_to_png(filepath)\n self.manager.current = 'image_screen' # switch to the next screen\n self.manager.current_screen.ids.img.source = filepath # inherit img to the next screen\n return filepath", "def __str__(self):\n return \"F2Camera(%d, %d, %d)\" % (self.width, self.height, len(self.image))", "def capture(self):\n with picamera.PiCamera() as camera:\n # camera setup\n camera.resolution = (frame_width, frame_height)\n camera.framerate = 32\n camera.rotation = 90\n stream = PiRGBArray(camera, size=(frame_width, frame_height))\n\n # let camera warm up\n time.sleep(1)\n avg = None\n\n prev_area = 0\n upload_cnt = 0\n upload_threshold = 75\n motion_frames = []\n frame_cnt = 0\n\n start_time = time.time()\n\n print 'Ready'\n for frame in camera.capture_continuous(stream, 'bgr',\n use_video_port=True):\n\n stream.seek(0)\n image = frame.array\n\n if avg is None:\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, tuple(blur_size), 0)\n avg = gray.copy().astype(\"float\")\n stream.truncate()\n continue\n\n (contours, avg, gray, image) = motion_detect(image, avg)\n\n #print contours\n if isinstance(contours, tuple):\n contours = list(list(contours))\n if len(contours) > 0 and (time.time() - start_time) > 20:\n if upload_cnt < upload_threshold:\n print len(contours)\n print str(datetime.datetime.now())\n sys.stdout.flush()\n time_str = get_time()\n output_filename = path + 'img_' + time_str + '-' + str(frame_cnt) + '.jpg'\n if frame_cnt % 3 == 0:\n motion_frames.append((output_filename, gray, image))\n upload_cnt += 1\n frame_cnt += 1\n else:\n upload_cnt = 0\n if motion_frames:\n if len(motion_frames) > 1:\n self.append_frames(motion_frames)\n motion_frames = []\n frame_cnt = 0\n\n stream.seek(0)\n stream.truncate()", "def capture_image():\n\n endpoint = CAMERA_CAPTURE_URL + \"/camera/capture\"\n if DEBUG:\n print(\"Calling endpoint '%s'\" % endpoint)\n\n response = requests.get(endpoint)\n\n if response.status_code == 200:\n return response.content\n else:\n if DEBUG:\n print(\"Call to endpoint '%s' returned status code %s. Reason: %s\" % (endpoint, str(response.status_code), response.content))\n return None", "def preview_camera(self):\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Previewing the camera. Press the Enter key to exit.')\n self.buttonPreview.setText('Press Enter\\nto finish.')\n self.comboCamera.setEnabled(False)\n self.buttonSelectColor.setEnabled(False)\n self.buttonLogSet.setEnabled(False)\n cap = webcam.initiate_camera(self.comboCamera.currentIndex())\n while True:\n _, frame = cap.read()\n frame = np.rot90(frame, self.comboRotation.currentIndex())\n cv2.imshow('Camera Preview', frame)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('\\r'):\n break\n cap.release()\n cv2.destroyAllWindows()\n self.buttonPreview.setText('Preview')\n self.comboCamera.setEnabled(True)\n self.buttonSelectColor.setEnabled(True)\n self.buttonLogSet.setEnabled(True)\n self.statusbar.clearMessage()", "def getFrames():\n\t\tfor cam in Camera.CAMERAS: cam.getFrame()", "def snapshot(self):\n return cv2.resize(cv2.imread(\"../../../images/obstacles_sample.jpeg\"), (1280, 980))", "def startCamera(self):\n if self.video == \"camera\":\n self.cap = cv2.VideoCapture(gstreamer_pipeline(\n capture_width=416, capture_height=416, flip_method=0), cv2.CAP_GSTREAMER)\n else:\n video_path = Path(self.video)\n if not video_path.exists():\n raise Exception(\"Video file not found\")\n self.cap = cv2.VideoCapture(str(video_path))", "def get_frame(self, camera: int = 0) -> Tuple[float, np.ndarray]:\n result = self.video.read()\n if result[0]:\n return result\n else: # If we reach the end of the video, go back to the beginning.\n self.video.set(cv2.CAP_PROP_POS_FRAMES, 0)\n return self.video.read()", "def __init__(self, index = 0, requested_cam_size=(640,480)):\r\n \r\n object.__init__(self)\r\n self.surface = None\r\n self.capture = pygame.camera.Camera(Capture.enumerateDevices()[index][0], requested_cam_size, 'RGB')\r\n self.capture.start()", "def getAllDroneCamera(self):\n return self.__data[\"DroneCamera\"][:]", "def camera_info_callback(self, ros_data):\n self.last_call_back_time = rospy.get_time()\n\n self.lastCameraInfo = ros_data" ]
[ "0.8029401", "0.8029401", "0.7391439", "0.7336508", "0.7180209", "0.711195", "0.711195", "0.71046424", "0.70813036", "0.70528156", "0.69937193", "0.69860065", "0.69809467", "0.68731916", "0.6841381", "0.6825675", "0.68190277", "0.680215", "0.6788457", "0.6687622", "0.66858554", "0.6659736", "0.66563463", "0.6616539", "0.6616073", "0.6568658", "0.65509355", "0.6548425", "0.64725703", "0.6465223", "0.64617294", "0.6431176", "0.6400229", "0.63999254", "0.6398272", "0.63907456", "0.6386727", "0.6358168", "0.6355743", "0.63512343", "0.6344107", "0.632879", "0.6328411", "0.6313454", "0.6302412", "0.6272611", "0.6229247", "0.6219893", "0.6219253", "0.61894315", "0.6187971", "0.61814684", "0.61809415", "0.61675614", "0.61659414", "0.61585677", "0.6151668", "0.61362827", "0.61252654", "0.61220664", "0.61086416", "0.60971844", "0.60704494", "0.6066012", "0.6066012", "0.60593164", "0.6057424", "0.6040087", "0.60203636", "0.60187197", "0.60186464", "0.6007676", "0.6002182", "0.59903574", "0.59824514", "0.5979356", "0.5976008", "0.5971932", "0.59578794", "0.5954808", "0.59503907", "0.5948149", "0.594521", "0.5936102", "0.5935883", "0.5934009", "0.5932952", "0.59316885", "0.5930046", "0.5918972", "0.59131795", "0.58923167", "0.58898044", "0.58877015", "0.58847386", "0.5882145", "0.5876872", "0.58767796", "0.58719295", "0.586624" ]
0.81577206
0
r""" Property for the exterior orientation parameters
def exteriorOrientationParameters(self): return self.__exteriorOrientationParameters
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orient(self):\n return self.__ph.get('orient', PH_ORIENT_HORZ)", "def get_orientation(self):\r\n return self.__orientation", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def orientation(self):\n return self._orientation", "def orientation(self):\n return self._orientation", "def getOrientation(self):\r\n return self.orientation", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def give_orientation(pose, orr_array):\n pose.orientation.x = orr_array[0]\n pose.orientation.y = orr_array[1]\n pose.orientation.z = orr_array[2]\n pose.orientation.w = orr_array[3]", "def orientation(self) -> str:\n return self._widget._mgui_get_orientation()", "def get_orientation(self):\n return self._orientation", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def setup_orientation_annotation(self) :\n \n # Anatomical directions in LPS convention, numpy order\n directions_anatomical = {\n \"L\" : (0,0,+1),\n \"R\" : (0,0,-1),\n \"P\" : (0,+1,0),\n \"A\" : (0,-1,0),\n \"I\" : (-1,0,0),\n \"S\" : (+1,0,0),\n }\n \n # Index directions, numpy order\n directions_index = {\n \"+x\" : (0,0,+1),\n \"-x\" : (0,0,-1),\n \"+y\" : (0,+1,0),\n \"-y\" : (0,-1,0),\n \"+z\" : (-1,0,0),\n \"-z\" : (+1,0,0),\n }\n \n directions = (directions_anatomical \n if self.display_coordinates in [\"physical\", \"nearest_axis_aligned\"]\n else directions_index)\n \n # Window locations\n locations = {\n \"up\" : (1,0),\n \"down\" : (-1,0),\n \"left\" : (0,-1),\n \"right\" : (0,1)\n }\n \n for location, p in locations.items() :\n matrix = self._3d_world_to_slice\n direction = numpy.dot(self._3d_slice_to_world, numpy.hstack((0, p)))\n \n # Find closest in-slice direction based on dot product\n closest = None\n max_distance = -1\n for name, d in directions.items() :\n distance = numpy.dot(d, direction)\n if distance > max_distance :\n max_distance = distance\n closest = name\n \n # Set text\n index = self._orientation_annotation_index[location]\n self._orientation_annotation.SetText(index, closest)", "def __init__(self):\n self.degrees = 60.0\n self.aspect_ratio = 1.0\n self.front_pane = 0.1\n self.back_pane = 100.0", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def GetOrientation(self):\r\n\r\n return self.orientation", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def set_MRI_orientation(self):\n\n if self.has_axes(MRI3Daxes):\n orientation = MRI3Daxes[:]\n if self.has_axis('time'):\n orientation += ['time']\n if self.has_axis('iteration'):\n orientation += ['iteration']\n if self.has_axis('condition'):\n orientation += ['condition']\n\n orientation += sorted(set(self.axes_names).difference(orientation))\n\n self.set_orientation(orientation)", "def GetOrientation(self):\n return self._orient", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def set_orientation(self, val):\n self._orientation = val", "def GetToolOrientation(self):\r\n\r\n return self._tool_orientation", "def __init__(self):\n self.rot_axis = 1", "def potential_parameters(cls):\n return [\"k\", \"angle\"]", "def yy(self):\n return self.exterior[:, 1]", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def screen_orientation(self):\n # type: () -> string_types\n return self._screen_orientation", "def relativeRotation(self):\n return self.rotation()", "def SetOrientation(self, a):\r\n\r\n self.orientation = a", "def angle(self):\r\n return self.model.angle", "def orientation(self):\n agents = self.board[self.agent_locs_idx]\n out = (agents & CellTypes.orientation_mask) >> CellTypes.orientation_bit\n return out.astype(np.int64)", "def mortality(self):\n pass", "def integralProps(self):\n return (\"param\", \"lower\", \"upper\", \"binCount\", \"xscale\", \"yweight\",\n \"autoFollow\")", "def orientation(self) -> Orientation:\n # if orientation was passed in, use it\n if self._orientation is not None:\n return convert_to_enum(self._orientation, Orientation)\n\n # replace any dead pixels with median value\n temp_image = self.image.array.copy()\n temp_image[temp_image < np.median(temp_image)] = np.median(temp_image)\n\n # find \"range\" of 80 to 90th percentiles\n row_sum = np.sum(temp_image, 0)\n col_sum = np.sum(temp_image, 1)\n row80, row90 = np.percentile(row_sum, [85, 99])\n col80, col90 = np.percentile(col_sum, [85, 99])\n row_range = row90 - row80\n col_range = col90 - col80\n\n # The true picket side will have a greater difference in\n # percentiles than will the non-picket size.\n if row_range < col_range:\n orientation = Orientation.LEFT_RIGHT\n else:\n orientation = Orientation.UP_DOWN\n return orientation", "def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]", "def get_angle_info(self):\n return", "def set_orientation(self, vnorm):\n self.vnorm = vnorm", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def actualize_properties(self):\n\n\t\tself.a_max_coord = np.array((\t\t\t# Computes the maximal coordinates\n\t\t\tmax(self.a_atoms[\"coord_x\"]),\t\t# For the x axis\n\t\t\tmax(self.a_atoms[\"coord_y\"]),\t\t# For the y axis\n\t\t\tmax(self.a_atoms[\"coord_z\"])\t\t# For the z axis\n\t\t))\n\t\tself.a_min_coord = np.array((\t\t\t# Computes the minimal coordinates\n\t\t\tmin(self.a_atoms[\"coord_x\"]),\t\t# For the x axis\n\t\t\tmin(self.a_atoms[\"coord_y\"]),\t\t# For the y axis\n\t\t\tmin(self.a_atoms[\"coord_z\"])\t\t# For the z axis\n\t\t))", "def GetOrientation(self):\r\n\r\n return self._orientation", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def change_orientation(self):\n self.shape = self.shape.T", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def orient(self):\n if self.oriented:\n return\n self.substrates = self.substrates.simplify(SHP_EPSILON)\n self.substrates = shapely.ops.orient(self.substrates)\n self.oriented = True", "def _exteriorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n exteriorIDs = numerix.concatenate((numerix.ravel(XYids[..., 0].swapaxes(0, 1)),\n numerix.ravel(XYids[..., -1].swapaxes(0, 1)),\n numerix.ravel(XZids[:, 0,:]),\n numerix.ravel(XZids[:, -1,:]),\n numerix.ravel(YZids[ 0, ...]),\n numerix.ravel(YZids[-1, ...])))\n\n from fipy.variables.faceVariable import FaceVariable\n exteriorFaces = FaceVariable(mesh=self, value=False)\n exteriorFaces[exteriorIDs] = True\n return exteriorFaces", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def right_angle_axes(self):\n return self.container['right_angle_axes']", "def max_front_wheel_angle():", "def _get_orientation_visibility(self) :\n \n return self._orientation_visibility", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def xx(self):\n return self.exterior[:, 0]", "def get_angle(self):\n return self.__angle", "def getSplitOrientation(self):\n return Qt.Vertical", "def angle(self) -> float:\n ...", "def __init__(self, angle = 0, center = (0, 0)):\n\n self.angle = angle\n self.center = center\n self.size = (2 * 194 + 3, 185)", "def orient(self,Y):\r\n self.orientation[Y]+=1\r\n if self.orientation[Y]>3:\r\n self.orientation[Y]=0\r\n if self.orientation[Y]<0:\r\n self.orientation[Y]=3\r\n self.can.delete(self.image_bateau[Y])\r\n self.image_bateau[Y]=self.create_image(self.img[self.orientation[Y]][Y],0,0)\r\n self.affichage(Y)", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def galaxy1_orbitals_properties(self):\n return self._galaxy1_orbitals_properties", "def do_altangle(self):\n nave = 10000\n x, y, z, angle = cbp.phidget.main(nave)\n current_angle = angle\n #print(current_angle)\n self.altangle = current_angle\n return current_angle", "def setSplitOrientation(self, orientation):\n pass", "def landscape_info(self):\n return self._landscape_info", "def setViewAngles(self, aspect_ratio, max_angle):\n a = math.pi*(max_angle/2.0)/180\n if aspect_ratio < 1:\n self.view_angle_v = a\n self.view_angle_h = math.asin(math.sin(a) * aspect_ratio)\n else:\n self.view_angle_h = a\n self.view_angle_v = math.asin(math.sin(a) / aspect_ratio)", "def test_default_parameters(self):\n\n assert self.test_shape.rotation_angle == 360", "def mpl_patch_arguments(self):\n raise NotImplementedError()", "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def zaxis ( self ) :\n return self.__zaxis", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def SetOrientation(self, orientation):\r\n\r\n pass", "def VParameters(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_VParameters(self, *args)", "def angle(self):\n return 0", "def renderer_settings(self):\n return {'width': self.width,\n 'height': self.height,\n 'model_matrix': np.eye(4, dtype=np.float32),\n 'view_matrix': self.modelview_matrix,\n 'projection_matrix': self.projection_matrix}", "def angle(self) -> int:", "def base_roll_pitch_yaw(self):\n #raise NotImplementedError('Not yet implemented!')\n return np.asarray([self._robot_state.roll, self._robot_state.pitch, self._robot_state.yaw])", "def getHeight(self):\r\n height = 1\r\n if self.orientation == \"v\":\r\n height = self.size\r\n return height", "def _get_geometric_augmentation_parameter(self, entry: SupervisedKeypointDBEntry) -> (float, float):\n # Not training\n if not self._is_train:\n return 1.0, 0.0\n\n # For scale\n scale = np.clip(np.random.randn(), -1.0, 1.0) * self._config.aug_scale_factor + 1.0\n\n # For rotate:\n if random.random() < self._config.aug_rot_rate and (not entry.on_boundary):\n rotate_rad = np.clip(np.random.randn(), -2.0, 2.0) * self._config.aug_rot_rad_factor\n else:\n rotate_rad = 0.0\n\n # OK\n return scale, rotate_rad", "def rotation_angle(self):\n return self.container['rotation_angle']", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def rotate_ship(self):\n\t\tif self.cur_orient == GameBoard.O_HORIZONTAL:\n\t\t\tself.cur_orient = GameBoard.O_VERTICAL\n\t\telse:\n\t\t\tself.cur_orient = GameBoard.O_HORIZONTAL", "def extent(self):\n ulx, uly, lrx, lry = self.ul_lr\n return ulx, lry, lrx, uly", "def attributes_to_save(self):\r\n return ['initial_pos', 'height', 'width']", "def DEFAULT_MAX_ROTATION(self): # real signature unknown; restored from __doc__\n pass", "def initRelativeRotation(self):\n self.__relRotationStartValue = self.rotation()", "def axis(self):\r\n return self._arm.axis", "def getAngles(self) -> tuple:\n return self.getHeading(), self.getPitch(), self.getRoll()", "def orientation(self, point):\n p_x = self.begin.x\n p_y = self.begin.y\n\n q_x = self.end.x\n q_y = self.end.y\n\n r_x = point.x\n r_y = point.y\n\n D = q_x * r_y + p_x * q_y + p_y * r_x - q_x * p_y - r_x * q_y - r_y * p_x\n\n if D > 0:\n return 1\n elif D == 0:\n return 0\n else:\n return -1", "def wheels_properties(self):\n height_wheels = 180.\n radius_wheels = 300.\n width_wheels = 80.\n return height_wheels, radius_wheels, width_wheels", "def getAssemblyPitch(self):\n return self.spatialGrid.pitch", "def set_symmetry(self, symmetry):\n return super(Orientation, self).set_symmetry(C1, symmetry)", "def is_orthogonal(self):\n pass", "def test_default_parameters(self):\n\n assert self.test_shape.rotation_angle == 360\n assert self.test_shape.extrude_both", "def getFinalLarmorAngle(self):\n return np.degrees(self.theta_L_array[-1])" ]
[ "0.61678284", "0.61665916", "0.61640745", "0.60078", "0.60078", "0.5891339", "0.5885623", "0.58542037", "0.5838997", "0.5836636", "0.5822891", "0.58147955", "0.5795377", "0.57695895", "0.57024115", "0.56947505", "0.56763643", "0.5612986", "0.5593346", "0.5587369", "0.5578711", "0.5572987", "0.5562875", "0.55135816", "0.5508962", "0.5501228", "0.5457921", "0.5444679", "0.5438654", "0.5415286", "0.5413526", "0.54106504", "0.53910536", "0.53736496", "0.5358842", "0.5358088", "0.53148633", "0.53044325", "0.52797174", "0.52759147", "0.5275075", "0.5275075", "0.52505237", "0.52501607", "0.5227863", "0.5227863", "0.5217199", "0.5211306", "0.518699", "0.51853096", "0.5175644", "0.5175041", "0.5166521", "0.5141916", "0.5139789", "0.51315725", "0.51292396", "0.5120749", "0.51003945", "0.50986624", "0.50849015", "0.50825304", "0.50808173", "0.5073464", "0.5066875", "0.50608796", "0.5054485", "0.5052833", "0.5038678", "0.5037069", "0.5033595", "0.5029699", "0.5029692", "0.50194746", "0.5015101", "0.50105494", "0.5005656", "0.49994493", "0.4992415", "0.49714738", "0.4968942", "0.49673307", "0.4965917", "0.4965214", "0.49650487", "0.4962", "0.49559554", "0.4954716", "0.49543858", "0.4953452", "0.49497905", "0.49497893", "0.49433884", "0.4943164", "0.49242204", "0.49241677", "0.49232352", "0.49225986", "0.49217114", "0.49185497" ]
0.80023855
0
The rotation matrix of the image Relates to the exterior orientation
def rotationMatrix(self): R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4], self.exteriorOrientationParameters[5]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def matrix(self):\n return self._rotation", "def rotation(self):\n return self.transform.getRotation() + [0]", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotated_image(image):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n return image.rotate(orientation)", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n\n return self._rotation", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotation_angle(self):\n return self.container['rotation_angle']", "def relativeRotation(self):\n return self.rotation()", "def rotate_image(image):\n return tf.image.rot90(image)", "def rotation(self) -> CameraRotationType:\n return self._rotation", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def camera_rotation(self) -> CameraRotationType:\n return self._rotation", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix", "def rotate(self):\n val = None\n try:\n \"\"\"Get rotation tags\"\"\"\n f = open(self._name, 'rb')\n tags = exifread.process_file(f)\n f.close()\n orientation = tags[\"Image Orientation\"]\n val = orientation.values\n\n except:\n return True\n\n if 3 in val:\n rotation = 180\n\n elif 6 in val:\n rotation = 270\n\n elif 8 in val:\n rotation = 90\n\n else:\n rotation = 0\n\n self._image = pygame.transform.rotate(self._image, rotation)", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def rotateImage(self, img, angle=90):\n if (angle == 90) :\n return(cv2.flip(cv2.transpose(img),flipCode=0))\n elif (angle == -90) :\n return(cv2.flip(cv2.transpose(img),flipCode=1))\n else :\n center = (img.shape[1]/2.0,img.shape[0]/2.0)\n rotate = cv2.getRotationMatrix2D(center, angle, 1.0)\n return cv2.warpAffine(img, rotate, (img.shape[1], img.shape[0]))", "def image_rotation(x):\n rands = tf.truncated_normal([tf.shape(x)[0]], stddev=0.05)\n return images_rotate(x, rands, interpolation='BILINEAR')", "def apply_rotation(image):\n\n\t# Load the image into a new BytesIO\n\tsImg = BytesIO(image)\n\tsNewImg = BytesIO(b'')\n\n\t# Create a new Pillow instance from the raw data\n\toImg = Pillow.open(sImg)\n\n\t# Store the image format\n\tsFormat = oImg.format\n\n\t# Get the proper sequence\n\ttry:\n\t\tlSeq = SEQUENCES[oImg._getexif()[ORIENTATION_TAG] - 1]\n\n\t\t# Transpose the image\n\t\tfor i in lSeq:\n\t\t\toImg = oImg.transpose(i)\n\n\t\t# Save the image using the same format as we got it in\n\t\toImg.save(sNewImg, sFormat)\n\n\t\t# Get the raw bytes\n\t\tsRet = sNewImg.getvalue()\n\n\t# If there's no sequence, return the image as is\n\texcept Exception as e:\n\t\tsRet = image\n\n\t# Cleanup\n\toImg.close()\n\tsImg.close()\n\tsNewImg.close()\n\n\t# Return\n\treturn sRet", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate_right_90(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n \r\n #flipping image 90 degrees\r\n newimg = im.transpose(PIL.Image.ROTATE_90)\r\n \r\n return img", "def _rotate_image_90(self, img: ndarray, k: int) -> ndarray:\n if img.shape[0] < img.shape[1]:\n self.y = np.rot90(img, k)\n return self.y\n else:\n return img", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def get_rotation_encryption_angle(self):\n return self.__rotation_encryption_angle", "def rotate(self, image, angle):\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def apply_image_rotation_by_exif(image, exif_orientation):\n if exif_orientation == 3:\n result_image = image.rotate(180, expand=True)\n elif exif_orientation == 6:\n result_image = image.rotate(270, expand=True)\n elif exif_orientation == 8:\n result_image = image.rotate(90, expand=True)\n else:\n result_image = image\n return result_image", "def get_rot(m_obj):\n mfn_obj = oMa.MFnTransform(m_obj)\n\n rot = mfn_obj.rotation()\n\n return rot", "def _reorient_numpy_image(self, image, orientation):\n if len(image.shape) == 2:\n image = np.resize(image, (image.shape[0], image.shape[1], 1))\n if orientation in {\n tifftools.constants.Orientation.LeftTop.value,\n tifftools.constants.Orientation.RightTop.value,\n tifftools.constants.Orientation.LeftBottom.value,\n tifftools.constants.Orientation.RightBottom.value}:\n image = image.transpose(1, 0, 2)\n if orientation in {\n tifftools.constants.Orientation.BottomLeft.value,\n tifftools.constants.Orientation.BottomRight.value,\n tifftools.constants.Orientation.LeftBottom.value,\n tifftools.constants.Orientation.RightBottom.value}:\n image = image[::-1, ::, ::]\n if orientation in {\n tifftools.constants.Orientation.TopRight.value,\n tifftools.constants.Orientation.BottomRight.value,\n tifftools.constants.Orientation.RightTop.value,\n tifftools.constants.Orientation.RightBottom.value}:\n image = image[::, ::-1, ::]\n return image", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def rotate((x,y)):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n w,h = image_size()\n if orientation == 0: return (x,y)\n if orientation == -90: return (h-y,x)\n if orientation == 90: return (y,w-x)\n if orientation == 180: return (w-x,h-y)\n return (x,y)", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate(self):\n pass", "def matrix(self):\n\t\t# apply yaw, then pitch, then roll\n\t\treturn Matrix((\n\t\t\t(\t1,\t0,\t\t\t\t\t\t\t0\t\t\t\t\t\t\t),\n\t\t\t(\t0,\tmath.cos(self.roll.val),\t-math.sin(self.roll.val)\t),\n\t\t\t(\t0,\tmath.sin(self.roll.val),\tmath.cos(self.roll.val)\t\t)\n\t\t))* Matrix((\n\t\t\t(\tmath.cos(self.pitch.val),\t0,\tmath.sin(self.pitch.val)\t),\n\t\t\t(\t0,\t\t\t\t\t\t\t1,\t0 \t\t\t\t\t\t\t),\n\t\t\t(\t-math.sin(self.pitch.val),\t0,\tmath.cos(self.pitch.val)\t)\n\t\t)) * Matrix((\n\t\t\t(\tmath.cos(self.yaw.val),\t-math.sin(self.yaw.val),\t0\t),\n\t\t\t(\tmath.sin(self.yaw.val),\tmath.cos(self.yaw.val),\t\t0\t),\n\t\t\t(\t0,\t\t\t\t\t\t0,\t\t\t\t\t\t\t1\t)\n\t\t))", "def matrix(self):\n m = Matrix.identity(4, 4)\n\n m[:3, :3] = self.rotation.matrix.data\n m[:3, 3:4] = self.translation.matrix.data\n\n return m", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach() * 0\n ones = zeros.detach() + 1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).view(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).view(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).view(B, 3, 3)\n\n # rotMat = xmat.bmm(ymat).bmm(zmat)\n # changed to match opencv and conversion euler->mat/mat->euler\n rotMat = torch.bmm(zmat, torch.bmm(ymat, xmat))\n\n return rotMat", "def __cv2_rotate(image, deg):\n num_rows, num_cols = image.shape[:2]\n\n rotation_matrix = cv.getRotationMatrix2D((num_cols / 2, num_rows / 2), deg, 1)\n image = cv.warpAffine(np.float32(image), rotation_matrix, (num_cols, num_rows))\n # In case of only one channel, warpAffine removes channel dimension.\"\"\"\n return image", "def rotate_and_wrap_image(self, image, degree_of_rotation):\n\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, degree_of_rotation, 1.0)\n # borderMode (constant) and borderValue are important for maintaiing consistency \n ri = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT,borderValue = (255,255,255))\n return ri", "def rotate(image_path):\n try:\n with Image.open(image_path) as img:\n img = ImageOps.exif_transpose(img)\n img.save(image_path, format=img.format, quality=95)\n except Exception as e:\n log.warn(f'Cannot rotate input image: [{e}]')", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def rotateImage(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_NEAREST)\n return result", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def im_rotate(im_path, rotate_angle):\n image = cv.imread(str(im_path))\n image_r = cv.rotate(image, rotate_angle)\n\n return image_r", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def im_rotate(img, angle):\n rows, cols = img.shape\n rotM = cv2.getRotationMatrix2D((cols/2-0.5, rows/2-0.5), angle, 1)\n imrotated = cv2.warpAffine(img, rotM, (cols, rows))\n\n return imrotated", "def get_rot_from_mat(m_mat):\n trans_matrix = oMa.MTransformationMatrix(m_mat)\n rot = trans_matrix.rotation()\n\n return rot", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotate(im: Image) -> Image:\n return im.rotate(random.randint(0, 360))", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotate_3D(image, angle, axes=(1, 2)):\n rotated_image = scipy.ndimage.interpolation.rotate(\n image, angle, axes, reshape=False)\n return rotated_image", "def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)", "def deg_mat(self):\n return self._deg_mat", "def rotate(img_path):\r\n\timg = cv2.imread(img_path)\r\n\timgcorrect = ImgCorrect(img)\r\n\tlines_img = imgcorrect.img_lines()\r\n\tif lines_img is None:\r\n\t rotated = imgcorrect.rotate_image(0)\r\n\telse:\r\n\t degree = imgcorrect.search_lines()\r\n\t rotated = imgcorrect.rotate_image(degree)\r\n\tcv2.imwrite(img_path, rotated)", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def rotate_img(image, degrees):\n if degrees == 90:\n return cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)\n elif degrees == 180:\n return cv2.rotate(image, cv2.ROTATE_180)\n elif degrees == 270:\n return cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif degrees == 0:\n return image\n else:\n print(\"DEGREE = \", degrees)", "def rotateImage(self):\n self.cnvImgOrig.rotate(\"./images/origPic.tiff\")\n self.cnvImgTest.rotate(\"./images/testPic.tiff\")", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def get_rot(self) -> WAQuaternion:\n pass", "def detect_orientation(image):\n custom_oem_psm_config = r'--oem 1--psm 7'\n newdata = pytesseract.image_to_osd(image,config= custom_oem_psm_config)\n rotation = int(re.search('(?<=Rotate: )\\\\d+', newdata).group(0))\n # print(\"Rotation degrees : \", rotation)\n return rotate_img(image, rotation)", "def rotation_matrix(angle) -> np.array:\n return np.array([\n [np.cos(angle), np.sin(angle)],\n [-np.sin(angle), np.cos(angle)]])", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def rotate(img, angle):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n aug = iaa.Affine(rotate=angle)\n return aug.augment_image(img)", "def _r270(self,m):\n return np.rot90(m,3)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rot_mat2rot_angle(rot_mat):\n return np.arctan2(rot_mat[1, 0], rot_mat[0, 0])", "def rotationMatrix_RzRyRz(self):\n\n R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def eulerAnglesToRotationMatrix(self, theta):\n\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def rotate_image(image, angle):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(array(image_size) / 2)\n\n rot_mat = vstack([cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])\n trans_mat = identity(3)\n\n w2 = image_size[0] * 0.5\n h2 = image_size[1] * 0.5\n\n rot_mat_notranslate = matrix(rot_mat[0:2, 0:2])\n\n tl = (array([-w2, h2]) * rot_mat_notranslate).A[0]\n tr = (array([w2, h2]) * rot_mat_notranslate).A[0]\n bl = (array([-w2, -h2]) * rot_mat_notranslate).A[0]\n br = (array([w2, -h2]) * rot_mat_notranslate).A[0]\n\n x_coords = [pt[0] for pt in [tl, tr, bl, br]]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in [tl, tr, bl, br]]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n new_image_size = (new_w, new_h)\n\n new_midx = new_w * 0.5\n new_midy = new_h * 0.5\n\n dx = int(new_midx - w2)\n dy = int(new_midy - h2)\n\n trans_mat = getTranslationMatrix2d(dx, dy)\n affine_mat = (matrix(trans_mat) * matrix(rot_mat))[0:2, :]\n result = cv2.warpAffine(image, affine_mat, new_image_size, flags=cv2.INTER_LINEAR)\n\n return result", "def get_rotation_back_angle(w, r):\n # now let us compute the image of the vector (r,0), that is h(r,0)\n\n zeros = torch.zeros_like(r)\n\n origin_circle = torch.stack([r, zeros], dim=-1) # create (B1, B2, .., B_N, r, 0)\n\n h_orig_circle = h(origin_circle, w, r)\n\n return T_2(h_orig_circle)", "def rotate_image(image, angle):\r\n\r\n # Get the image size\r\n # No that's not an error - NumPy stores image matricies backwards\r\n image_size = (image.shape[1], image.shape[0])\r\n image_center = tuple(np.array(image_size) / 2)\r\n\r\n # Convert the OpenCV 3x2 rotation matrix to 3x3\r\n rot_mat = np.vstack(\r\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\r\n )\r\n\r\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\r\n\r\n # Shorthand for below calcs\r\n image_w2 = image_size[0] * 0.5\r\n image_h2 = image_size[1] * 0.5\r\n\r\n # Obtain the rotated coordinates of the image corners\r\n rotated_coords = [\r\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\r\n ]\r\n\r\n # Find the size of the new image\r\n x_coords = [pt[0] for pt in rotated_coords]\r\n x_pos = [x for x in x_coords if x > 0]\r\n x_neg = [x for x in x_coords if x < 0]\r\n\r\n y_coords = [pt[1] for pt in rotated_coords]\r\n y_pos = [y for y in y_coords if y > 0]\r\n y_neg = [y for y in y_coords if y < 0]\r\n\r\n right_bound = max(x_pos)\r\n left_bound = min(x_neg)\r\n top_bound = max(y_pos)\r\n bot_bound = min(y_neg)\r\n\r\n new_w = int(abs(right_bound - left_bound))\r\n new_h = int(abs(top_bound - bot_bound))\r\n\r\n # We require a translation matrix to keep the image centred\r\n trans_mat = np.matrix([\r\n [1, 0, int(new_w * 0.5 - image_w2)],\r\n [0, 1, int(new_h * 0.5 - image_h2)],\r\n [0, 0, 1]\r\n ])\r\n\r\n # Compute the tranform for the combined rotation and translation\r\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\r\n\r\n # Apply the transform\r\n result = cv2.warpAffine(\r\n image,\r\n affine_mat,\r\n (new_w, new_h),\r\n flags=cv2.INTER_LINEAR\r\n )\r\n\r\n return result", "def getOrientation(self):\r\n return self.orientation", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot" ]
[ "0.7759922", "0.77279866", "0.7530842", "0.7502491", "0.7206852", "0.71904266", "0.7162689", "0.71563435", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7005358", "0.6981667", "0.6911321", "0.68793875", "0.68270576", "0.679283", "0.67912734", "0.6790262", "0.67638195", "0.67603487", "0.67526007", "0.6750879", "0.6747828", "0.6702097", "0.670177", "0.66791886", "0.6669619", "0.66346693", "0.6599657", "0.652011", "0.6489357", "0.6478114", "0.644607", "0.64343905", "0.6429481", "0.6428973", "0.63882273", "0.63699096", "0.6350216", "0.63384175", "0.6327105", "0.632621", "0.6318789", "0.6299089", "0.62917876", "0.62702763", "0.62631404", "0.6260796", "0.6255669", "0.62380916", "0.6237384", "0.6231948", "0.62276775", "0.6197764", "0.6192595", "0.61874574", "0.61820245", "0.6176092", "0.61707777", "0.6165205", "0.6163984", "0.6160134", "0.6158438", "0.6157506", "0.6155815", "0.6143829", "0.6143023", "0.6142756", "0.61397237", "0.61145645", "0.61103886", "0.6110211", "0.61100966", "0.61041766", "0.6096376", "0.6094306", "0.60888606", "0.60831213", "0.60743773", "0.60718304", "0.60659623", "0.6064851", "0.60636824", "0.6058148", "0.6057082", "0.6056926", "0.6055493", "0.605416", "0.60535264", "0.60492706", "0.6048952" ]
0.77768993
0
The rotation matrix of the image Relates to the exterior orientation
def rotationMatrix_RzRyRz(self): R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4], self.exteriorOrientationParameters[5]) return R
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0", "def rotation_matrix(self):\n return self.affine_matrix[0:3][:, 0:3]", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def matrix(self):\n return self._rotation", "def rotation(self):\n return self.transform.getRotation() + [0]", "def rotation_matrix(self):\n self._normalise()\n product_matrix = np.dot(self._q_matrix(), self._q_bar_matrix().conj().transpose())\n return product_matrix[1:][:,1:]", "def transformation_matrix(self):\n t = np.array([[0.0], [0.0], [0.0]])\n Rt = np.hstack([self.rotation_matrix, t])\n return np.vstack([Rt, np.array([0.0, 0.0, 0.0, 1.0])])", "def rotated_image(image):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n return image.rotate(orientation)", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n return self._rotation", "def rotation(self):\n\n return self._rotation", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotation_angle(self):\n return self.container['rotation_angle']", "def relativeRotation(self):\n return self.rotation()", "def rotate_image(image):\n return tf.image.rot90(image)", "def rotation(self) -> CameraRotationType:\n return self._rotation", "def _get_rotation_matrix(transform):\n # caution: UE4 is using left-hand ortation order\n roll = np.deg2rad(-transform.rotation.roll)\n pitch = np.deg2rad(-transform.rotation.pitch)\n yaw = np.deg2rad(transform.rotation.yaw)\n sr, cr = np.sin(roll), np.cos(roll)\n sp, cp = np.sin(pitch), np.cos(pitch)\n sy, cy = np.sin(yaw), np.cos(yaw)\n rotation_matrix = np.array([[cy * cp, -sy * sr + cy * sp * sr, cy * sp * cr + sy * sr],\n [sy * cp, cy * sp * sr + cy * sr, -cy * sr + sy * sp * cr],\n [-sp, cp * sr, cp * cr]])\n return rotation_matrix", "def rotate(mat,angle):\n return np.dot(Mueller.rotator(angle), np.dot(mat, Mueller.rotator(-angle)))", "def camera_rotation(self) -> CameraRotationType:\n return self._rotation", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def determine_rotation_matrix(self, origin, angle, scale):\n # scaling will be ignored at this step\n rotation_matrix = cv2.getRotationMatrix2D(origin, angle * 180 / np.pi, scale)\n return rotation_matrix", "def rotate(self):\n val = None\n try:\n \"\"\"Get rotation tags\"\"\"\n f = open(self._name, 'rb')\n tags = exifread.process_file(f)\n f.close()\n orientation = tags[\"Image Orientation\"]\n val = orientation.values\n\n except:\n return True\n\n if 3 in val:\n rotation = 180\n\n elif 6 in val:\n rotation = 270\n\n elif 8 in val:\n rotation = 90\n\n else:\n rotation = 0\n\n self._image = pygame.transform.rotate(self._image, rotation)", "def rotate(mat,angle):\n return np.dot(Jones.rotator(angle), np.dot(mat, Jones.rotator(-angle)))", "def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation", "def rotateImage(self, img, angle=90):\n if (angle == 90) :\n return(cv2.flip(cv2.transpose(img),flipCode=0))\n elif (angle == -90) :\n return(cv2.flip(cv2.transpose(img),flipCode=1))\n else :\n center = (img.shape[1]/2.0,img.shape[0]/2.0)\n rotate = cv2.getRotationMatrix2D(center, angle, 1.0)\n return cv2.warpAffine(img, rotate, (img.shape[1], img.shape[0]))", "def image_rotation(x):\n rands = tf.truncated_normal([tf.shape(x)[0]], stddev=0.05)\n return images_rotate(x, rands, interpolation='BILINEAR')", "def apply_rotation(image):\n\n\t# Load the image into a new BytesIO\n\tsImg = BytesIO(image)\n\tsNewImg = BytesIO(b'')\n\n\t# Create a new Pillow instance from the raw data\n\toImg = Pillow.open(sImg)\n\n\t# Store the image format\n\tsFormat = oImg.format\n\n\t# Get the proper sequence\n\ttry:\n\t\tlSeq = SEQUENCES[oImg._getexif()[ORIENTATION_TAG] - 1]\n\n\t\t# Transpose the image\n\t\tfor i in lSeq:\n\t\t\toImg = oImg.transpose(i)\n\n\t\t# Save the image using the same format as we got it in\n\t\toImg.save(sNewImg, sFormat)\n\n\t\t# Get the raw bytes\n\t\tsRet = sNewImg.getvalue()\n\n\t# If there's no sequence, return the image as is\n\texcept Exception as e:\n\t\tsRet = image\n\n\t# Cleanup\n\toImg.close()\n\tsImg.close()\n\tsNewImg.close()\n\n\t# Return\n\treturn sRet", "def _rotationMatrix(self, n_dim, theta):\n i = np.identity(n_dim)\n c, s = np.cos(theta)*i, np.sin(theta)*i\n rotation = np.bmat([[c, s], [-s, c]])\n return rotation", "def rotate_right_90(img):\r\n #reading image\r\n im = Image.open(\"filename\")\r\n \r\n #flipping image 90 degrees\r\n newimg = im.transpose(PIL.Image.ROTATE_90)\r\n \r\n return img", "def _rotate_image_90(self, img: ndarray, k: int) -> ndarray:\n if img.shape[0] < img.shape[1]:\n self.y = np.rot90(img, k)\n return self.y\n else:\n return img", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def get_rotation_encryption_angle(self):\n return self.__rotation_encryption_angle", "def rotate(self, image, angle):\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result", "def rotation_matrix(yaw, pitch) -> TransformationMatrixType:\n return rotation_matrix_yx(math.radians(yaw + 180), math.radians(pitch))", "def apply_image_rotation_by_exif(image, exif_orientation):\n if exif_orientation == 3:\n result_image = image.rotate(180, expand=True)\n elif exif_orientation == 6:\n result_image = image.rotate(270, expand=True)\n elif exif_orientation == 8:\n result_image = image.rotate(90, expand=True)\n else:\n result_image = image\n return result_image", "def get_rot(m_obj):\n mfn_obj = oMa.MFnTransform(m_obj)\n\n rot = mfn_obj.rotation()\n\n return rot", "def _reorient_numpy_image(self, image, orientation):\n if len(image.shape) == 2:\n image = np.resize(image, (image.shape[0], image.shape[1], 1))\n if orientation in {\n tifftools.constants.Orientation.LeftTop.value,\n tifftools.constants.Orientation.RightTop.value,\n tifftools.constants.Orientation.LeftBottom.value,\n tifftools.constants.Orientation.RightBottom.value}:\n image = image.transpose(1, 0, 2)\n if orientation in {\n tifftools.constants.Orientation.BottomLeft.value,\n tifftools.constants.Orientation.BottomRight.value,\n tifftools.constants.Orientation.LeftBottom.value,\n tifftools.constants.Orientation.RightBottom.value}:\n image = image[::-1, ::, ::]\n if orientation in {\n tifftools.constants.Orientation.TopRight.value,\n tifftools.constants.Orientation.BottomRight.value,\n tifftools.constants.Orientation.RightTop.value,\n tifftools.constants.Orientation.RightBottom.value}:\n image = image[::, ::-1, ::]\n return image", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def rotate((x,y)):\n orientation = parameter('Orientation',90) # in degrees counter-clockwise\n if orientation == None: orienation = 0\n w,h = image_size()\n if orientation == 0: return (x,y)\n if orientation == -90: return (h-y,x)\n if orientation == 90: return (y,w-x)\n if orientation == 180: return (w-x,h-y)\n return (x,y)", "def _rotate(self, affine):\n dims = affine.shape[0]\n if not np.isscalar(self.rotation):\n raise Exception('this class requires exactly one entry for rotation!')\n theta = (self.deformrandomstate.rand() - 0.5) * 2 * self.rotation\n if dims == 4:\n\n # sample unit vector:\n u = np.random.random(3)\n u /= np.sqrt(np.sum([uu ** 2 for uu in u]) + 1e-8)\n ct = np.cos(theta)\n st = np.sin(theta)\n rot = np.eye(4)\n rot[:3, :3] = [\n [ct + u[0] ** 2 * (1 - ct), u[0] * u[1] * (1 - ct) - u[2] * st, u[0] * u[2] * (1 - ct) + u[2] * st],\n [u[1] * u[0] * (1 - ct) + u[2] * st, ct + u[1] ** 2 * (1 - ct), u[1] * u[2] * (1 - ct) - u[0] * st],\n [u[2] * u[0] * (1 - ct) - u[1] * st, u[2] * u[1] * (1 - ct) + u[0] * st, ct + u[2] ** 2 * (1 - ct)]]\n\n elif dims == 3:\n rot = np.eye(3)\n rot[:2, :2] = np.asarray([[np.cos(theta), np.sin(theta)], [-np.sin(theta), np.cos(theta)]])\n else:\n raise Exception(\n 'implement this for each dimension, since not yet implemented for dimension {}'.format(dims))\n\n return np.matmul(rot, affine)", "def rotate(self):\n pass", "def matrix(self):\n\t\t# apply yaw, then pitch, then roll\n\t\treturn Matrix((\n\t\t\t(\t1,\t0,\t\t\t\t\t\t\t0\t\t\t\t\t\t\t),\n\t\t\t(\t0,\tmath.cos(self.roll.val),\t-math.sin(self.roll.val)\t),\n\t\t\t(\t0,\tmath.sin(self.roll.val),\tmath.cos(self.roll.val)\t\t)\n\t\t))* Matrix((\n\t\t\t(\tmath.cos(self.pitch.val),\t0,\tmath.sin(self.pitch.val)\t),\n\t\t\t(\t0,\t\t\t\t\t\t\t1,\t0 \t\t\t\t\t\t\t),\n\t\t\t(\t-math.sin(self.pitch.val),\t0,\tmath.cos(self.pitch.val)\t)\n\t\t)) * Matrix((\n\t\t\t(\tmath.cos(self.yaw.val),\t-math.sin(self.yaw.val),\t0\t),\n\t\t\t(\tmath.sin(self.yaw.val),\tmath.cos(self.yaw.val),\t\t0\t),\n\t\t\t(\t0,\t\t\t\t\t\t0,\t\t\t\t\t\t\t1\t)\n\t\t))", "def matrix(self):\n m = Matrix.identity(4, 4)\n\n m[:3, :3] = self.rotation.matrix.data\n m[:3, 3:4] = self.translation.matrix.data\n\n return m", "def __rotate_model(self):\n self.__model_matrix = self.__get_rotation_matrix(\n self.__face.position_cartesian,\n (1 + self.__face.position[2]) * 0.5)", "def _rotation_matrix(theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.array(((c, -s), (s, c)))", "def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]", "def euler2mat(angle):\n B = angle.size(0)\n x, y, z = angle[:, 0], angle[:, 1], angle[:, 2]\n\n cosz = torch.cos(z)\n sinz = torch.sin(z)\n\n zeros = z.detach() * 0\n ones = zeros.detach() + 1\n zmat = torch.stack([cosz, -sinz, zeros,\n sinz, cosz, zeros,\n zeros, zeros, ones], dim=1).view(B, 3, 3)\n\n cosy = torch.cos(y)\n siny = torch.sin(y)\n\n ymat = torch.stack([cosy, zeros, siny,\n zeros, ones, zeros,\n -siny, zeros, cosy], dim=1).view(B, 3, 3)\n\n cosx = torch.cos(x)\n sinx = torch.sin(x)\n\n xmat = torch.stack([ones, zeros, zeros,\n zeros, cosx, -sinx,\n zeros, sinx, cosx], dim=1).view(B, 3, 3)\n\n # rotMat = xmat.bmm(ymat).bmm(zmat)\n # changed to match opencv and conversion euler->mat/mat->euler\n rotMat = torch.bmm(zmat, torch.bmm(ymat, xmat))\n\n return rotMat", "def __cv2_rotate(image, deg):\n num_rows, num_cols = image.shape[:2]\n\n rotation_matrix = cv.getRotationMatrix2D((num_cols / 2, num_rows / 2), deg, 1)\n image = cv.warpAffine(np.float32(image), rotation_matrix, (num_cols, num_rows))\n # In case of only one channel, warpAffine removes channel dimension.\"\"\"\n return image", "def rotate_and_wrap_image(self, image, degree_of_rotation):\n\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, degree_of_rotation, 1.0)\n # borderMode (constant) and borderValue are important for maintaiing consistency \n ri = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR, borderMode = cv2.BORDER_CONSTANT,borderValue = (255,255,255))\n return ri", "def rotate(image_path):\n try:\n with Image.open(image_path) as img:\n img = ImageOps.exif_transpose(img)\n img.save(image_path, format=img.format, quality=95)\n except Exception as e:\n log.warn(f'Cannot rotate input image: [{e}]')", "def rotate(self, *args, **kwargs):\n return _image.image_rotate(self, *args, **kwargs)", "def rotateImage(image, angle):\n image_center = tuple(np.array(image.shape[1::-1]) / 2)\n rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_NEAREST)\n return result", "def _cubelet_rotation_matrix(self, cubelet_meta_info, qpos_array):\n euler_angles = qpos_array[cubelet_meta_info[\"euler_qpos\"]]\n return rotation.euler2mat(euler_angles)", "def rotate(img, angle, resample=False, expand=False, center=None):\r\n \r\n return img.rotate(angle, resample, expand, center)", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def align(image):\n angle = find_angle(image)\n image = rotate(image, angle)\n return image", "def im_rotate(im_path, rotate_angle):\n image = cv.imread(str(im_path))\n image_r = cv.rotate(image, rotate_angle)\n\n return image_r", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotate(self):\n\n last_center = self.rect.center\n self.image = pg.transform.rotate(self.image_copy,self.angle)\n self.rect = self.image.get_rect()\n self.rect.center = last_center\n self.angle +=self.rotate_by", "def im_rotate(img, angle):\n rows, cols = img.shape\n rotM = cv2.getRotationMatrix2D((cols/2-0.5, rows/2-0.5), angle, 1)\n imrotated = cv2.warpAffine(img, rotM, (cols, rows))\n\n return imrotated", "def get_rot_from_mat(m_mat):\n trans_matrix = oMa.MTransformationMatrix(m_mat)\n rot = trans_matrix.rotation()\n\n return rot", "def rotation_matrix(rotate):\n tx, ty, tz = rotate\n Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])\n Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])\n Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])\n return np.dot(Rx, np.dot(Ry, Rz))", "def rotate(im: Image) -> Image:\n return im.rotate(random.randint(0, 360))", "def angle_to_rotation_matrix(angle: torch.Tensor) -> torch.Tensor:\n ang_rad = deg2rad(angle)\n cos_a: torch.Tensor = torch.cos(ang_rad)\n sin_a: torch.Tensor = torch.sin(ang_rad)\n return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)", "def rotate_3D(image, angle, axes=(1, 2)):\n rotated_image = scipy.ndimage.interpolation.rotate(\n image, angle, axes, reshape=False)\n return rotated_image", "def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)", "def deg_mat(self):\n return self._deg_mat", "def rotate(img_path):\r\n\timg = cv2.imread(img_path)\r\n\timgcorrect = ImgCorrect(img)\r\n\tlines_img = imgcorrect.img_lines()\r\n\tif lines_img is None:\r\n\t rotated = imgcorrect.rotate_image(0)\r\n\telse:\r\n\t degree = imgcorrect.search_lines()\r\n\t rotated = imgcorrect.rotate_image(degree)\r\n\tcv2.imwrite(img_path, rotated)", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= angle", "def rotate_img(image, degrees):\n if degrees == 90:\n return cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)\n elif degrees == 180:\n return cv2.rotate(image, cv2.ROTATE_180)\n elif degrees == 270:\n return cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif degrees == 0:\n return image\n else:\n print(\"DEGREE = \", degrees)", "def rotateImage(self):\n self.cnvImgOrig.rotate(\"./images/origPic.tiff\")\n self.cnvImgTest.rotate(\"./images/testPic.tiff\")", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def get_rot(self) -> WAQuaternion:\n pass", "def detect_orientation(image):\n custom_oem_psm_config = r'--oem 1--psm 7'\n newdata = pytesseract.image_to_osd(image,config= custom_oem_psm_config)\n rotation = int(re.search('(?<=Rotate: )\\\\d+', newdata).group(0))\n # print(\"Rotation degrees : \", rotation)\n return rotate_img(image, rotation)", "def rotation_matrix(angle) -> np.array:\n return np.array([\n [np.cos(angle), np.sin(angle)],\n [-np.sin(angle), np.cos(angle)]])", "def _rotation_from_gradient(self,m):\n\t\ttheta = -np.arctan(m)\n\t\tself.current_theta = theta\n\t\treturn self._rotation_from_angle(theta)", "def rotate(img, angle):\n if not _is_numpy(img):\n raise TypeError('img should be Numpy Image. Got {}'.format(type(img)))\n\n aug = iaa.Affine(rotate=angle)\n return aug.augment_image(img)", "def _r270(self,m):\n return np.rot90(m,3)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rot_mat2rot_angle(rot_mat):\n return np.arctan2(rot_mat[1, 0], rot_mat[0, 0])", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def eulerAnglesToRotationMatrix(self, theta):\n\n R_x = np.array([[1, 0, 0],\n [0, math.cos(theta[0]), -math.sin(theta[0])],\n [0, math.sin(theta[0]), math.cos(theta[0])]\n ])\n\n R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1])],\n [0, 1, 0],\n [-math.sin(theta[1]), 0, math.cos(theta[1])]\n ])\n\n R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0],\n [math.sin(theta[2]), math.cos(theta[2]), 0],\n [0, 0, 1]\n ])\n\n R = np.dot(R_z, np.dot(R_y, R_x))\n\n return R", "def rotate_image(image, angle):\n\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(array(image_size) / 2)\n\n rot_mat = vstack([cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]])\n trans_mat = identity(3)\n\n w2 = image_size[0] * 0.5\n h2 = image_size[1] * 0.5\n\n rot_mat_notranslate = matrix(rot_mat[0:2, 0:2])\n\n tl = (array([-w2, h2]) * rot_mat_notranslate).A[0]\n tr = (array([w2, h2]) * rot_mat_notranslate).A[0]\n bl = (array([-w2, -h2]) * rot_mat_notranslate).A[0]\n br = (array([w2, -h2]) * rot_mat_notranslate).A[0]\n\n x_coords = [pt[0] for pt in [tl, tr, bl, br]]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in [tl, tr, bl, br]]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n new_image_size = (new_w, new_h)\n\n new_midx = new_w * 0.5\n new_midy = new_h * 0.5\n\n dx = int(new_midx - w2)\n dy = int(new_midy - h2)\n\n trans_mat = getTranslationMatrix2d(dx, dy)\n affine_mat = (matrix(trans_mat) * matrix(rot_mat))[0:2, :]\n result = cv2.warpAffine(image, affine_mat, new_image_size, flags=cv2.INTER_LINEAR)\n\n return result", "def get_rotation_back_angle(w, r):\n # now let us compute the image of the vector (r,0), that is h(r,0)\n\n zeros = torch.zeros_like(r)\n\n origin_circle = torch.stack([r, zeros], dim=-1) # create (B1, B2, .., B_N, r, 0)\n\n h_orig_circle = h(origin_circle, w, r)\n\n return T_2(h_orig_circle)", "def rotate_image(image, angle):\r\n\r\n # Get the image size\r\n # No that's not an error - NumPy stores image matricies backwards\r\n image_size = (image.shape[1], image.shape[0])\r\n image_center = tuple(np.array(image_size) / 2)\r\n\r\n # Convert the OpenCV 3x2 rotation matrix to 3x3\r\n rot_mat = np.vstack(\r\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\r\n )\r\n\r\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\r\n\r\n # Shorthand for below calcs\r\n image_w2 = image_size[0] * 0.5\r\n image_h2 = image_size[1] * 0.5\r\n\r\n # Obtain the rotated coordinates of the image corners\r\n rotated_coords = [\r\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\r\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\r\n ]\r\n\r\n # Find the size of the new image\r\n x_coords = [pt[0] for pt in rotated_coords]\r\n x_pos = [x for x in x_coords if x > 0]\r\n x_neg = [x for x in x_coords if x < 0]\r\n\r\n y_coords = [pt[1] for pt in rotated_coords]\r\n y_pos = [y for y in y_coords if y > 0]\r\n y_neg = [y for y in y_coords if y < 0]\r\n\r\n right_bound = max(x_pos)\r\n left_bound = min(x_neg)\r\n top_bound = max(y_pos)\r\n bot_bound = min(y_neg)\r\n\r\n new_w = int(abs(right_bound - left_bound))\r\n new_h = int(abs(top_bound - bot_bound))\r\n\r\n # We require a translation matrix to keep the image centred\r\n trans_mat = np.matrix([\r\n [1, 0, int(new_w * 0.5 - image_w2)],\r\n [0, 1, int(new_h * 0.5 - image_h2)],\r\n [0, 0, 1]\r\n ])\r\n\r\n # Compute the tranform for the combined rotation and translation\r\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\r\n\r\n # Apply the transform\r\n result = cv2.warpAffine(\r\n image,\r\n affine_mat,\r\n (new_w, new_h),\r\n flags=cv2.INTER_LINEAR\r\n )\r\n\r\n return result", "def getOrientation(self):\r\n return self.orientation", "def rotmat(p, q):\n rot = numpy.dot(refmat(q, -p), refmat(p, -p))\n return rot" ]
[ "0.77768993", "0.7759922", "0.77279866", "0.7530842", "0.7502491", "0.7206852", "0.71904266", "0.7162689", "0.71563435", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7058655", "0.7005358", "0.6981667", "0.6911321", "0.68793875", "0.68270576", "0.679283", "0.67912734", "0.6790262", "0.67638195", "0.67603487", "0.67526007", "0.6750879", "0.6747828", "0.6702097", "0.670177", "0.66791886", "0.6669619", "0.66346693", "0.6599657", "0.652011", "0.6489357", "0.6478114", "0.644607", "0.64343905", "0.6429481", "0.6428973", "0.63882273", "0.63699096", "0.6350216", "0.63384175", "0.6327105", "0.632621", "0.6318789", "0.6299089", "0.62917876", "0.62702763", "0.62631404", "0.6260796", "0.6255669", "0.62380916", "0.6237384", "0.6231948", "0.62276775", "0.6197764", "0.6192595", "0.61874574", "0.61820245", "0.6176092", "0.61707777", "0.6165205", "0.6163984", "0.6160134", "0.6158438", "0.6157506", "0.6155815", "0.6143829", "0.6143023", "0.6142756", "0.61397237", "0.61145645", "0.61103886", "0.6110211", "0.61100966", "0.61041766", "0.6096376", "0.6094306", "0.60888606", "0.60831213", "0.60743773", "0.60718304", "0.60659623", "0.6064851", "0.60636824", "0.6057082", "0.6056926", "0.6055493", "0.605416", "0.60535264", "0.60492706", "0.6048952" ]
0.6058148
93
True if the exterior orientation is solved
def isSolved(self): return self.__isSolved
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_versor(self) -> bool:\n return np.isclose(np.linalg.norm(self.A), 1.0)", "def is_hom_alt(self) -> bool:\n return self.is_hom() and (self.allele1 > 0 or self.allele2 > 0)", "def solved(self):\n return all(cell == 1 for row in self.faces for cell in row) or all(cell == 0 for row in self.faces for cell in row)", "def isVersor(self) -> bool:\n\n Vhat = self.gradeInvol()\n Vrev = ~self\n Vinv = Vrev/(self*Vrev)[0]\n\n gpres = grades_present(Vhat*Vinv, 0.000001)\n if len(gpres) == 1:\n if gpres[0] == 0:\n if np.sum(np.abs((Vhat*Vinv).value - (Vinv*Vhat).value)) < 0.0001:\n for e in basis_vectors(self.layout).values():\n gpres = grades_present(Vhat*e*Vrev, 0.000001)\n if not (len(gpres) == 1 and gpres[0] == 1):\n return False\n gpres = grades_present(self, 0.000001)\n if len(gpres) == 1:\n return False\n else:\n return True\n return False", "def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid", "def is_hom(self) -> bool:\n if self.is_null():\n return False\n if self.allele1 == -1 or self.allele2 == -1:\n return True\n return self.allele1 == self.allele2", "def is_orientation_ok(image,k=2,is_first=True):\n\n mid_x, mid_y = int(0.5*image.shape[1]), int(0.5*image.shape[0])\n\n # Get moment for first body half \n image_0 = np.array(image)\n image_0[:,:int(mid_x)] = 0\n image_0 = image_0[:,int(mid_x):]\n moment_0 = get_moment(image_0,k)\n\n # Get moment for second body half\n image_1 = np.array(image)\n image_1[:,int(mid_x):] = 0\n image_1 = np.fliplr(image_1)\n image_1 = image_1[:,int(mid_x):]\n moment_1 = get_moment(image_1,k)\n\n # Compute descriminant and flip flag\n discrim = (moment_0 - moment_1)/(moment_0 + moment_1)\n if discrim < 0:\n ok = False\n else:\n ok = True \n return ok, discrim", "def is_orthogonal(self):\n pass", "def isInvertible(self):\n return bool(self.isSquare() and self.determinant())", "def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col", "def isinvertible(self):\n if np.all(np.abs(self.maroots) > 1):\n return True\n else:\n return False", "def is_full_dimensional(self):\n\n return self.affine_dimension() == self.space_dimension()", "def __bool__(self):\n return _osgAnimation.mapVertexInfluence___bool__(self)", "def is_solution(self):\n # Only need to check the length because the configuration expansion assesses the feasibility.\n return len(self._path) == self._N", "def is_simply_laced(self):\n return self._info['simply_laced']", "def check_for_residue_existance(self):\n if not self.pose.total_residue():\n print \"No pose Loaded.\"\n return False\n\n if not self.current_chain.get() or not self.current_residue.get():\n print \"Chain or residue not set\"\n return False\n\n current_region = self.current_residue.get().split(\":\")\n if len(current_region)>1:\n ResStart = int(current_region[0]); ResEnd = int(self.current_region[1])\n\n\n if self.pose.pdb_info().pdb2pose(self.current_chain.get(), ResStart)==0 or self.pose.pdb_info().pdb2pose(self.current_chain.get(), ResEnd)==0:\n print \"Region not found in pose\"\n return False\n else:\n if self.pose.pdb_info().pdb2pose(self.current_chain.get(), int(self.current_residue.get())) ==0:\n\n print \"Residue not found in pose\"\n return False\n\n #If everythig is good then:\n return True", "def is_conjugate(self):\n return self.is_dagger and bool(self.z)", "def check_hermitian(self):\n adjoint = self.mat.conj().T\n return np.allclose(self.mat, adjoint)", "def is_simplex(self):\n return self.affine_dimension()+1==self.n_vertices()", "def is_versor(self) -> np.ndarray:\n return np.isclose(np.linalg.norm(self.array, axis=1), 1.0)", "def is_done(self):\n\n x, y = self.position\n\n if x <= -CAR_LENGTH \\\n and (self.spawn_position[0] > 0 or y != self.spawn_position[1]) \\\n and self.is_horizontal:\n return True\n elif x >= MAP_SIZE \\\n and (self.spawn_position[0] < MAP_SIZE or y != self.spawn_position[1]) \\\n and self.is_horizontal:\n return True\n elif y <= -CAR_LENGTH \\\n and (self.spawn_position[1] > 0 or x != self.spawn_position[0]) \\\n and not self.is_horizontal:\n return True\n elif y >= MAP_SIZE \\\n and (self.spawn_position[1] < MAP_SIZE or x != self.spawn_position[0]) \\\n and not self.is_horizontal:\n return True\n else:\n return False", "def is_vr(self):\n\n if self.max_grid_size:\n return self.max_grid_size != self.min_grid_size\n else:\n return False", "def complete_level(self):\n if self.ycor() == self.finish_line:\n return True", "def _infer_direction(self):\n data = self.get_data(None)\n if data is not None:\n # Infer the direction from the data\n if data._size > 1:\n data = data[0:2].array\n return bool(\n data.item(\n 0,\n )\n < data.item(\n 1,\n )\n )\n # --- End: if\n\n # Still here?\n data = self.get_bounds_data(None)\n if data is not None:\n # Infer the direction from the bounds\n b = data[(0,) * (data.ndim - 1)].array\n return bool(\n b.item(\n 0,\n )\n < b.item(\n 1,\n )\n )\n\n # Still here? Then infer the direction from the units.\n return not self.Units.ispressure", "def has_solution(self) -> bool:\n pass", "def is_achromatic(self) -> bool:\n\n value = self._space.is_achromatic(self.coords(nans=False))\n if value is None:\n xyz = self.convert('xyz-d65')\n return bool(xyz._space.is_achromatic(xyz[:-1]))\n return value", "def check_orientation(self) -> None:\n if self.compute_volume() > 0:\n raise (\n BaseException(\n \"The volume within the surface is negative. It seems that you faces\"\n \"are not oriented correctly according to the clockwise flag\"\n )\n )", "def e_ortogonal(self, other):\n if self.pi(other) == 0:\n return True\n else:\n return False", "def isGoal(self):\n for index in range(self.DIM):\n if not self.values('r',index).count(0) is 0:\n return False\n if not self.isValid():\n return False\n return True", "def is_solved(self):\n return self.to_grid == self.from_grid", "def is_equivalence(self) -> bool:", "def is_exterior(self):\r\n exterior_types = [\"ExteriorWall\", \"Roof\", \"InteriorWall\", \"UndergroundWall\", \"RaisedFloor\"]\r\n return self.obj_type in exterior_types", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def is_hom_ref(self) -> bool:\n return self.is_hom() and (self.allele1 == 0 or self.allele2 == 0)", "def isscalar(self):\n return not self.axes", "def is_double_faced(self) -> bool:\n return self.layout in (\"transform\", \"meld\", \"modal_dfc\")", "def is_origin(self) -> bool:\n return self.x == 0 and self.y == 0", "def is_vertical(self):\n return self.slope == float(\"+inf\")", "def is_complete_multipartite(self):\n if self._.d != 2:\n return False\n if not self._has(\"p\"):\n self.pTable()\n return any(self._.p[0, i, i] == self._.p[j, i, i]\n for i, j in [(1, 2), (2, 1)])", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def flexible(self) -> bool:\n return self.ratio is not None", "def is_equation(self): \n return False", "def _validate_mesh(self):\n if not (np.abs(self.axis_u.dot(self.axis_v) < 1e-6) and #pylint: disable=no-member\n np.abs(self.axis_v.dot(self.axis_w) < 1e-6) and #pylint: disable=no-member\n np.abs(self.axis_w.dot(self.axis_u) < 1e-6)): #pylint: disable=no-member\n raise ValueError('axis_u, axis_v, and axis_w must be orthogonal')\n return True", "def check_boundary(self):\n turtle_position = self.turtle.position()\n if turtle_position[0] > self.screen_width/2 - 40 and int(self.turtle.heading()) == 0:\n return False\n if turtle_position[0] < -self.screen_width/2 + 40 and int(self.turtle.heading()) == 180:\n return False\n if turtle_position[1] > self.screen_height/2 - 40 and int(self.turtle.heading()) == 90:\n return False\n if turtle_position[1] < -self.screen_height/2 + 40 and int(self.turtle.heading()) == 270:\n return False\n return True", "def did_solve(self):\n return self._solution[\"status\"] == \"optimal\"", "def IsHermitian(self):\n \n Hermitian=True\n for Ind in self.IndList():\n Q=tuple(-x for x in Ind)\n \n X = self[Ind].conj().T-self[Q]\n\n A=amax(list(abs(X.flatten())))\n\n \n if A > 1e-9:\n Hermitian=False\n \n return Hermitian", "def finished(self) -> bool:\n p1_count = 0\n p2_count = 0\n ley_line_total = (self.side_length + 1) * 3\n for itype in self.current_ley_lines:\n for line in itype:\n if line[0] == '1':\n p1_count += 1\n if line[0] == '2':\n p2_count += 1\n return p1_count >= ley_line_total / 2 or p2_count >= ley_line_total / 2", "def is_in_torelli(self):\n mat = self.action_on_homology()\n return np.array_equal(mat, np.identity(mat.shape[0], dtype=object))", "def solved(self):\n return GOAL_VEHICLE in self.vehicles", "def colision(self, X, Y):\n #ESTE IF COMPROBARA MEDIANTE LAS POSICIONES EN EL EJE SI HAN GOLPEADO AL SUBDITO\n if X <= self.x + self.width and X >= self.x:\n if Y <= self.y + self.height and Y >=self.y:\n return True\n return False", "def has_intermediate_table(self):\r\n if (self.intermediate_spherical_right is not None or self.intermediate_cylinder_right is not None\r\n or self.intermediate_axis_right is not None or self.intermediate_av_right is not None or\r\n self.intermediate_dnp_right is not None\r\n or self.intermediate_spherical_left is not None or self.intermediate_cylinder_left is not None\r\n or self.intermediate_axis_left is not None or self.intermediate_av_left is not None or\r\n self.intermediate_dnp_left is not None):\r\n return True\r\n return False", "def is_wide(self) -> bool:\n return self.layout == \"planar\"", "def independent_components(self) -> bool:\n return bool(self.GetIndependentComponents())", "def is_equation(self):\n return False", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0", "def is_equation(self):\n return True", "def is_equation(self):\n return True", "def is_contradiction_(transition):\n is_contr = False\n\n # check implications of lower left corner\n if np.argmax(transition[0]) == 0:\n if np.argmax(transition[2]) == 2 or np.argmax(transition[2]) == 3:\n is_contr = True\n elif np.argmax(transition[0]) == 1:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 2:\n is_contr = True\n if np.argmax(transition[2]) != 1:\n is_contr = True\n elif np.argmax(transition[0]) == 2:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 1:\n is_contr = True\n elif np.argmax(transition[0]) == 3:\n if np.argmax(transition[1]) != 3:\n is_contr = True\n if np.argmax(transition[2]) == 0 or np.argmax(transition[2]) == 2:\n is_contr = True\n\n # check implicatiosn of upper right corner\n if np.argmax(transition[2]) == 0:\n if np.argmax(transition[0]) == 1 or np.argmax(transition[0]) == 3:\n is_contr = True\n elif np.argmax(transition[2]) == 1:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 2:\n is_contr = True\n elif np.argmax(transition[2]) == 2:\n if np.argmax(transition[0]) != 2:\n is_contr = True\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 1:\n is_contr = True\n elif np.argmax(transition[2]) == 3:\n if np.argmax(transition[1]) != 3:\n is_contr = True\n if np.argmax(transition[0]) == 0 or np.argmax(transition[0]) == 1:\n is_contr = True\n\n return is_contr", "def is_lattice(self):\n return hasattr(self,\"uc\") and len(self.maximals())==1 and type(self.get_meet())!=str and type(self.get_join())!=str", "def __can_calculation_be_performed(self) -> bool:\n return self.__matrix.shape[0] == self.__matrix.shape[1] \\\n and len(self.__output_state) == len(self.__input_state) \\\n and len(self.__output_state) == self.__matrix.shape[0]", "def isscalar(self):\n return not bool(self.shape)", "def independent(self):\n return True", "def _validate_axes(self):\n if not (\n np.abs(self.axis_u.dot(self.axis_v) < 1e-6)\n and np.abs(self.axis_v.dot(self.axis_w) < 1e-6)\n and np.abs(self.axis_w.dot(self.axis_u) < 1e-6)\n ):\n raise ValueError(\"axis_u, axis_v, and axis_w must be orthogonal\")\n return True", "def _is_h_contrained(self, m):\n available_area = self._get_render_area_size()\n map_aspect = m.envelope().width() / m.envelope().height()\n page_aspect = available_area[0] / available_area[1]\n\n return map_aspect > page_aspect", "def is_full(self):\n return self.name and self.variables and self.assumptions and self.guarantees", "def did_solve(self) -> bool:\n pass", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def semileaf(self):\n if self._leftchild and not self._rightchild:\n return True\n if self._rightchild and not self._leftchild:\n return True\n return False", "def converged(self):\n if len(self.rundir) >= 2:\n if io.ionic_steps(self.rundir[-1]) <= 3:\n return True\n if self.settings[\"nrg_convergence\"] != None:\n if io.job_complete(self.rundir[-1]) and io.job_complete(self.rundir[-2]):\n o1 = io.Oszicar(os.path.join(self.rundir[-1],\"OSZICAR\"))\n o2 = io.Oszicar(os.path.join(self.rundir[-2],\"OSZICAR\"))\n if abs( o1.E[-1] - o2.E[-1]) < self.settings[\"nrg_convergence\"]:\n return True\n\n return False", "def is_solved(self):\n self.solved = self.current_pos == self.finish_pos\n return self.solved", "def _is_valid_pose(self):\n contacts = self.gc.getRobot().robot.contacts\n n_object_contacts = 0\n is_thumb_in_contact = False\n for contact in contacts:\n if contact.body1 == self.object_name:\n n_object_contacts += 1\n if contact.body2 == '_chain4_link2':\n is_thumb_in_contact = True\n elif contact.body2 == self.object_name:\n n_object_contacts += 1\n if contact.body1 == '_chain4_link2':\n is_thumb_in_contact = True\n\n is_valid = n_object_contacts >= 2\n return is_valid", "def drone_has_flipped(self, current_orientation):\n has_flipped = True\n\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n\n rospy.logwarn(\"#### HAS FLIPPED? ########\")\n rospy.logwarn(\"RPY current_orientation\"+str(current_orientation))\n rospy.logwarn(\"max_roll\"+str(self.max_roll) +\n \",min_roll=\"+str(-1*self.max_roll))\n rospy.logwarn(\"max_pitch\"+str(self.max_pitch) +\n \",min_pitch=\"+str(-1*self.max_pitch))\n rospy.logwarn(\"############\")\n\n if current_orientation.x > -1*self.max_roll and current_orientation.x <= self.max_roll:\n if current_orientation.y > -1*self.max_pitch and current_orientation.y <= self.max_pitch:\n has_flipped = False\n\n return has_flipped", "def check_directionality_viable(self):\n\n direction_viable = True\n nose_cords, ear_left_cords, ear_right_cords = [], [], []\n for animal_name in self.animal_bp_dict.keys():\n for bp_cord in [\"X_bps\", \"Y_bps\"]:\n bp_list = self.animal_bp_dict[animal_name][bp_cord]\n for bp_name in bp_list:\n bp_name_components = bp_name.split(\"_\")\n bp_name_components = [x.lower() for x in bp_name_components]\n if \"nose\" in bp_name_components:\n nose_cords.append(bp_name)\n elif (\"ear\" in bp_name_components) and (\n \"left\" in bp_name_components\n ):\n ear_left_cords.append(bp_name)\n elif (\"ear\" in bp_name_components) and (\n \"right\" in bp_name_components\n ):\n ear_right_cords.append(bp_name)\n else:\n pass\n\n for cord in [nose_cords, ear_left_cords, ear_right_cords]:\n if len(cord) != len(self.animal_bp_dict.keys()) * 2:\n direction_viable = False\n\n if direction_viable:\n nose_cords = [\n nose_cords[i * 2 : (i + 1) * 2]\n for i in range((len(nose_cords) + 2 - 1) // 2)\n ]\n ear_left_cords = [\n ear_left_cords[i * 2 : (i + 1) * 2]\n for i in range((len(ear_left_cords) + 2 - 1) // 2)\n ]\n ear_right_cords = [\n ear_right_cords[i * 2 : (i + 1) * 2]\n for i in range((len(ear_right_cords) + 2 - 1) // 2)\n ]\n\n return direction_viable, nose_cords, ear_left_cords, ear_right_cords", "def is_stable(self, position):\n A = self.A_matrix + self.A_matrix.T\n eigvals, eigvecs = np.linalg.eig(A)\n\n if all(eigvals < 0):\n return True\n else:\n return False", "def is_center(self):\n if self.pupils_located:\n return self.is_right() is not True and self.is_left() is not True", "def invertible(self):\n a = self._data\n return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]", "def is_valid(self,):\r\n return self.g > 0 and self.l > 0 and self.m1 > 0 and self.m2 > 0 and self.m3 > 0 and self.r1 > 0 and self.r2 > 0 and self.tau > 0 and self.theta1 > 0 and self.theta2 > 0 and self.theta3 > 0", "def checkintercambio(self,pos):\n\t\tif not Cost().existe('intercambio'):\n\t\t\treturn False\n\t\tif pos < len(self.objective):\n\t\t\t\"\"\"Evaluo caso de intercambio\"\"\"\n\t\t\tb1 = self.verBase()\n\t\t\tb2= self.verSigBase()\n\t\t\to1 = self.objective[pos]\n\t\t\to2 = self.verSigObj(pos)\n\t\t\tif (not b2 is None and not o2 is None) and (b1 == o2) and (b2 == o1):\n\t\t\t\treturn True\n\t\treturn False", "def IsRotationInertialToBody(self):\n return _gmat_py.CCSDSAEMSegment_IsRotationInertialToBody(self)", "def get_is_valid(self):\n if not super().get_is_valid():\n return False\n if fabs(self.left_height - self.right_height) < 2:\n return True\n return False", "def is_modern(self):\n G = self.poset().hasse_diagram()\n for x in G:\n nx = list(G.neighbors_in(x))\n nx.append(x)\n if min(nx) < x and max(nx) > x:\n return False\n return True", "def is_right(self):\n if self.pupils_located:\n return self.horizontal_ratio() <= 0.35", "def is_diagonal(self):\n return self.is_upper() and self.is_lower()", "def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True", "def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def converged(self) -> bool:\n assert self._coords is not None\n\n if self._converged_translation:\n logger.info(\n \"Converged purely based on translation of the \"\n \"dimer midpoint\"\n )\n return True\n\n rms_g0 = np.sqrt(np.mean(np.square(self._coords.g0)))\n return self.iteration > 0 and rms_g0 < self.gtol", "def isScalene(self):\n\t\treturn self.a != self.b != self.c", "def row1_invariant(self, target_col):\n result = True\n if self._grid[1][target_col] != 0:\n result = False\n for row in range(2, self._height):\n for col in range(self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n for row in (0, 1):\n for col in range(target_col+1, self._width):\n solved_value = (col + self._width * row)\n if solved_value != self._grid[row][col]:\n result = False\n return result", "def inpointgroup(self, Q):\n # if Q is not an orthogonal matrix, return false\n try:\n Q = np.array(Q)\n except Exception:\n return False\n if not _in_O3(Q):\n return False\n if len(Q) != self.__N:\n return False\n return self == Lattice(np.dot(Q, self.__E))", "def is_solvable(self):\n if self._is_solvable is None:\n if self.order() % 2 != 0:\n return True\n ds = self.derived_series()\n terminator = ds[len(ds) - 1]\n gens = terminator.generators\n degree = self.degree\n identity = _af_new(list(range(degree)))\n if all(g == identity for g in gens):\n self._is_solvable = True\n return True\n else:\n self._is_solvable = False\n return False\n else:\n return self._is_solvable", "def is_solved(self) -> bool:\n return set(self.boxes) == set(self.storage_locations)", "def is_posdef(X):\n return np.min(np.linalg.eigvals(X)) > 0", "def verif_victoire(self):\n\n if self._mot_en_cours == self._mot_a_trouver :\n return True\n else :\n return False", "def is_V(self):\n return True", "def is_V(self):\n return True", "def isFim(self):\r\n return self.sair", "def is_algebraically_stable(self):\n import numpy as np\n\n if np.any(self.b < 0):\n return False\n\n # Check the eigenvalues for positive definiteness.\n #\n # We could also check whether the Cholesky factorization fails (=> not pos. def.).\n # However, since we are interested in positive SEMIdefinite matrices, we have to\n # regularize M by adding a small multiple of the identity matrix. Since \"small\"\n # depends on the floating point type and this is probably not a bottleneck, using\n # the eigenvalues seems to be fine.\n rk = self.__num__()\n B = np.diag(rk.b)\n M = B.dot(rk.A) + rk.A.T.dot(B) - np.outer(rk.b, rk.b)\n isposdef = np.all(np.linalg.eigvals(M) >= -10 * np.finfo(M.dtype).eps)\n\n return isposdef", "def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()", "def rotated(self):\n return self.pol_lat != 90." ]
[ "0.645655", "0.63614464", "0.62675583", "0.62624866", "0.62122506", "0.6185279", "0.6101881", "0.60985523", "0.605189", "0.6042637", "0.6027129", "0.6021935", "0.6013572", "0.6010895", "0.59998566", "0.5995052", "0.5970132", "0.5969842", "0.5943013", "0.59369075", "0.59358853", "0.5905908", "0.5879452", "0.5864619", "0.5854801", "0.58522236", "0.58227694", "0.5819711", "0.58190584", "0.58095956", "0.5807328", "0.5802393", "0.57982206", "0.577554", "0.5775032", "0.5768606", "0.5757377", "0.57493585", "0.5741816", "0.57408637", "0.57408637", "0.57408637", "0.5732908", "0.57138336", "0.5710352", "0.5707986", "0.5706481", "0.5706223", "0.5693618", "0.56875473", "0.5683492", "0.568223", "0.5672227", "0.5670107", "0.5668354", "0.566534", "0.5664041", "0.5657477", "0.5657477", "0.56547487", "0.5654581", "0.5652573", "0.5651842", "0.5643994", "0.5630903", "0.56254077", "0.5620079", "0.56167305", "0.5614748", "0.55935884", "0.5587621", "0.5586883", "0.5582738", "0.5581747", "0.55801296", "0.55797654", "0.55772877", "0.5575282", "0.55729544", "0.5572534", "0.5572134", "0.5568247", "0.5568117", "0.55609906", "0.55553037", "0.5554625", "0.55515826", "0.55472773", "0.5547173", "0.55465657", "0.5543263", "0.55403405", "0.5540269", "0.55351704", "0.55347836", "0.55210257", "0.55210257", "0.5518431", "0.5515334", "0.55136025", "0.5512986" ]
0.0
-1
r""" Compute inner orientation parameters
def ComputeInnerOrientation(self, imagePoints): # implementing observation vectors imagePoints = imagePoints.reshape(np.size(imagePoints), 1) fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1) n = int(len(imagePoints)) # number of observations u = 6 # 6 orientation parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(imagePoints)): if i % 2 == 0: A[i, 0] = 1; A[i, 1] = 0; A[i, 2] = fMarks[j]; A[i, 3] = fMarks[j + 1]; A[i, 4] = 0 A[i, 5] = 0 else: A[i, 0] = 0; A[i, 1] = 1; A[i, 2] = 0; A[i, 3] = 0; A[i, 4] = fMarks[j]; A[i, 5] = fMarks[j + 1] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints)) v = np.dot(A, X) - imagePoints adjustment_results = {"params": X, "residuals": v, "N": np.dot(np.transpose(A), A)} self.__innerOrientationParameters = X # updating the inner orientation params return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def P(self):\n self.eigenmatrix()", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def orientation(a:tuple, b:tuple, c:tuple)->int:\n d = direction(a, b, c)\n if d == 0:\n return 0\n elif d > 0:\n return 1\n else:\n return -1", "def outer_rad(self):\n return self._outer_rad", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def _sector_orientation(self, vertices):\n if not vertices[0] == vertices[-1]:\n vertices.append(vertices[0])\n xy = np.transpose(np.array(vertices))\n x, y = xy[0], xy[1]\n return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0, vertices", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def init_axis(self):\n # Shorthand:\n nphi = self.nphi\n nfp = self.nfp\n\n phi = np.linspace(0, 2 * np.pi / nfp, nphi, endpoint=False)\n d_phi = phi[1] - phi[0]\n R0 = np.zeros(nphi)\n Z0 = np.zeros(nphi)\n R0p = np.zeros(nphi)\n Z0p = np.zeros(nphi)\n R0pp = np.zeros(nphi)\n Z0pp = np.zeros(nphi)\n R0ppp = np.zeros(nphi)\n Z0ppp = np.zeros(nphi)\n for jn in range(0, self.nfourier):\n n = jn * nfp\n sinangle = np.sin(n * phi)\n cosangle = np.cos(n * phi)\n R0 += self.rc[jn] * cosangle + self.rs[jn] * sinangle\n Z0 += self.zc[jn] * cosangle + self.zs[jn] * sinangle\n R0p += self.rc[jn] * (-n * sinangle) + self.rs[jn] * (n * cosangle)\n Z0p += self.zc[jn] * (-n * sinangle) + self.zs[jn] * (n * cosangle)\n R0pp += self.rc[jn] * (-n * n * cosangle) + self.rs[jn] * (-n * n * sinangle)\n Z0pp += self.zc[jn] * (-n * n * cosangle) + self.zs[jn] * (-n * n * sinangle)\n R0ppp += self.rc[jn] * (n * n * n * sinangle) + self.rs[jn] * (-n * n * n * cosangle)\n Z0ppp += self.zc[jn] * (n * n * n * sinangle) + self.zs[jn] * (-n * n * n * cosangle)\n\n d_l_d_phi = np.sqrt(R0 * R0 + R0p * R0p + Z0p * Z0p)\n d2_l_d_phi2 = (R0 * R0p + R0p * R0pp + Z0p * Z0pp) / d_l_d_phi\n B0_over_abs_G0 = nphi / np.sum(d_l_d_phi)\n abs_G0_over_B0 = 1 / B0_over_abs_G0\n self.d_l_d_varphi = abs_G0_over_B0\n G0 = self.sG * abs_G0_over_B0 * self.B0\n\n # For these next arrays, the first dimension is phi, and the 2nd dimension is (R, phi, Z).\n d_r_d_phi_cylindrical = np.array([R0p, R0, Z0p]).transpose()\n d2_r_d_phi2_cylindrical = np.array([R0pp - R0, 2 * R0p, Z0pp]).transpose()\n d3_r_d_phi3_cylindrical = np.array([R0ppp - 3 * R0p, 3 * R0pp - R0, Z0ppp]).transpose()\n\n tangent_cylindrical = np.zeros((nphi, 3))\n d_tangent_d_l_cylindrical = np.zeros((nphi, 3))\n for j in range(3):\n tangent_cylindrical[:,j] = d_r_d_phi_cylindrical[:,j] / d_l_d_phi\n d_tangent_d_l_cylindrical[:,j] = (-d_r_d_phi_cylindrical[:,j] * d2_l_d_phi2 / d_l_d_phi \\\n + d2_r_d_phi2_cylindrical[:,j]) / (d_l_d_phi * d_l_d_phi)\n\n curvature = np.sqrt(d_tangent_d_l_cylindrical[:,0] * d_tangent_d_l_cylindrical[:,0] + \\\n d_tangent_d_l_cylindrical[:,1] * d_tangent_d_l_cylindrical[:,1] + \\\n d_tangent_d_l_cylindrical[:,2] * d_tangent_d_l_cylindrical[:,2])\n\n axis_length = np.sum(d_l_d_phi) * d_phi * nfp\n rms_curvature = np.sqrt((np.sum(curvature * curvature * d_l_d_phi) * d_phi * nfp) / axis_length)\n mean_of_R = np.sum(R0 * d_l_d_phi) * d_phi * nfp / axis_length\n mean_of_Z = np.sum(Z0 * d_l_d_phi) * d_phi * nfp / axis_length\n standard_deviation_of_R = np.sqrt(np.sum((R0 - mean_of_R) ** 2 * d_l_d_phi) * d_phi * nfp / axis_length)\n standard_deviation_of_Z = np.sqrt(np.sum((Z0 - mean_of_Z) ** 2 * d_l_d_phi) * d_phi * nfp / axis_length)\n\n normal_cylindrical = np.zeros((nphi, 3))\n for j in range(3):\n normal_cylindrical[:,j] = d_tangent_d_l_cylindrical[:,j] / curvature\n self.normal_cylindrical = normal_cylindrical\n self._determine_helicity()\n\n # b = t x n\n binormal_cylindrical = np.zeros((nphi, 3))\n binormal_cylindrical[:,0] = tangent_cylindrical[:,1] * normal_cylindrical[:,2] - tangent_cylindrical[:,2] * normal_cylindrical[:,1]\n binormal_cylindrical[:,1] = tangent_cylindrical[:,2] * normal_cylindrical[:,0] - tangent_cylindrical[:,0] * normal_cylindrical[:,2]\n binormal_cylindrical[:,2] = tangent_cylindrical[:,0] * normal_cylindrical[:,1] - tangent_cylindrical[:,1] * normal_cylindrical[:,0]\n\n # We use the same sign convention for torsion as the\n # Landreman-Sengupta-Plunk paper, wikipedia, and\n # mathworld.wolfram.com/Torsion.html. This sign convention is\n # opposite to Garren & Boozer's sign convention!\n torsion_numerator = (d_r_d_phi_cylindrical[:,0] * (d2_r_d_phi2_cylindrical[:,1] * d3_r_d_phi3_cylindrical[:,2] - d2_r_d_phi2_cylindrical[:,2] * d3_r_d_phi3_cylindrical[:,1]) \\\n + d_r_d_phi_cylindrical[:,1] * (d2_r_d_phi2_cylindrical[:,2] * d3_r_d_phi3_cylindrical[:,0] - d2_r_d_phi2_cylindrical[:,0] * d3_r_d_phi3_cylindrical[:,2]) \n + d_r_d_phi_cylindrical[:,2] * (d2_r_d_phi2_cylindrical[:,0] * d3_r_d_phi3_cylindrical[:,1] - d2_r_d_phi2_cylindrical[:,1] * d3_r_d_phi3_cylindrical[:,0]))\n\n torsion_denominator = (d_r_d_phi_cylindrical[:,1] * d2_r_d_phi2_cylindrical[:,2] - d_r_d_phi_cylindrical[:,2] * d2_r_d_phi2_cylindrical[:,1]) ** 2 \\\n + (d_r_d_phi_cylindrical[:,2] * d2_r_d_phi2_cylindrical[:,0] - d_r_d_phi_cylindrical[:,0] * d2_r_d_phi2_cylindrical[:,2]) ** 2 \\\n + (d_r_d_phi_cylindrical[:,0] * d2_r_d_phi2_cylindrical[:,1] - d_r_d_phi_cylindrical[:,1] * d2_r_d_phi2_cylindrical[:,0]) ** 2\n\n torsion = torsion_numerator / torsion_denominator\n\n self.etabar_squared_over_curvature_squared = self.etabar * self.etabar / (curvature * curvature)\n\n self.d_d_phi = spectral_diff_matrix(self.nphi, xmax=2 * np.pi / self.nfp)\n self.d_varphi_d_phi = B0_over_abs_G0 * d_l_d_phi\n self.d_d_varphi = np.zeros((nphi, nphi))\n for j in range(nphi):\n self.d_d_varphi[j,:] = self.d_d_phi[j,:] / self.d_varphi_d_phi[j]\n\n # Compute the Boozer toroidal angle:\n self.varphi = np.zeros(nphi)\n for j in range(1, nphi):\n # To get toroidal angle on the full mesh, we need d_l_d_phi on the half mesh.\n self.varphi[j] = self.varphi[j-1] + (d_l_d_phi[j-1] + d_l_d_phi[j])\n self.varphi = self.varphi * (0.5 * d_phi * 2 * np.pi / axis_length)\n\n # Add all results to self:\n self.phi = phi\n self.d_phi = d_phi\n self.R0 = R0\n self.Z0 = Z0\n self.R0p = R0p\n self.Z0p = Z0p\n self.R0pp = R0pp\n self.Z0pp = Z0pp\n self.R0ppp = R0ppp\n self.Z0ppp = Z0ppp\n self.G0 = G0\n self.d_l_d_phi = d_l_d_phi\n self.axis_length = axis_length\n self.curvature = curvature\n self.torsion = torsion\n self.X1s = np.zeros(nphi)\n self.X1c = self.etabar / curvature\n self.min_R0 = fourier_minimum(self.R0)\n self.tangent_cylindrical = tangent_cylindrical\n self.normal_cylindrical = normal_cylindrical \n self.binormal_cylindrical = binormal_cylindrical\n self.Bbar = self.spsi * self.B0\n self.abs_G0_over_B0 = abs_G0_over_B0\n\n # The output is not stellarator-symmetric if (1) R0s is nonzero,\n # (2) Z0c is nonzero, (3) sigma_initial is nonzero, or (B2s is\n # nonzero and order != 'r1')\n self.lasym = np.max(np.abs(self.rs)) > 0 or np.max(np.abs(self.zc)) > 0 \\\n or self.sigma0 != 0 or (self.order != 'r1' and self.B2s != 0)\n\n # Functions that converts a toroidal angle phi0 on the axis to the axis radial and vertical coordinates\n self.R0_func = self.convert_to_spline(sum([self.rc[i]*np.cos(i*self.nfp*self.phi) +\\\n self.rs[i]*np.sin(i*self.nfp*self.phi) \\\n for i in range(len(self.rc))]))\n self.Z0_func = self.convert_to_spline(sum([self.zc[i]*np.cos(i*self.nfp*self.phi) +\\\n self.zs[i]*np.sin(i*self.nfp*self.phi) \\\n for i in range(len(self.zs))]))\n\n # Spline interpolants for the cylindrical components of the Frenet-Serret frame:\n self.normal_R_spline = self.convert_to_spline(self.normal_cylindrical[:,0])\n self.normal_phi_spline = self.convert_to_spline(self.normal_cylindrical[:,1])\n self.normal_z_spline = self.convert_to_spline(self.normal_cylindrical[:,2])\n self.binormal_R_spline = self.convert_to_spline(self.binormal_cylindrical[:,0])\n self.binormal_phi_spline = self.convert_to_spline(self.binormal_cylindrical[:,1])\n self.binormal_z_spline = self.convert_to_spline(self.binormal_cylindrical[:,2])\n self.tangent_R_spline = self.convert_to_spline(self.tangent_cylindrical[:,0])\n self.tangent_phi_spline = self.convert_to_spline(self.tangent_cylindrical[:,1])\n self.tangent_z_spline = self.convert_to_spline(self.tangent_cylindrical[:,2])\n\n # Spline interpolant for nu = varphi - phi, used for plotting\n self.nu_spline = self.convert_to_spline(self.varphi - self.phi)", "def inner(Ax, Ay, Bx, By):\n return (Ax*Bx + Ay*By) / (Ax**2+Ay**2)**0.5 / (Bx**2+By**2)**0.5", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def get_params(img, output_size):\n c, h, w = img.shape\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = (h - th)//2\n j = (w - tw)//2\n return i, j, th, tw", "def test_array_orientation_consistency_tilt():\n samples = 128\n p = FringeZernike(Z2=1000, samples=samples)\n ps = PSF.from_pupil(p, 1)\n idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape) # row-major y, x\n assert idx_x == ps.center_x\n assert idx_y > ps.center_y", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def thetaInner(self):\n if self.theta in range(1, len(self.ThRZmesh.getPositions(label=\"Th\"))):\n Th = self.ThRZmesh.getUpper(label=\"Th\", n=(self.theta - 1))\n else:\n runLog.warning(\n \"Error: Azimuthal Index ({0}) location not INSIDE mesh \".format(\n self.theta\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"Th\"))\n Th = None\n return Th", "def theta(self):\n self.eigenvalues()", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def axialInner(self):\n if self.axial in range(0, len(self.ThRZmesh.getPositions(label=\"Z\"))):\n Z = self.ThRZmesh.getUpper(label=\"Z\", n=self.axial)\n else:\n Z = None\n return Z", "def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py", "def sivina(self):\n return (self.r + self.g + self.b) / 3", "def get_alignment_params(self, s, w):\n\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def orientation(point_p, point_q, point_r):\n # Set https://www.geeksforgeeks.org/orientation-3-ordered-points/\n # for details of below formula.\n r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -\n (point_q.x - point_p.x) * (point_r.y - point_q.y))\n if r == 0:\n return 0\n return 1 if r > 0 else 2", "def _get_params(self):\r\n return np.hstack((self.variance,self.lengthscale))", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def principal(sigma):\n vals, vecs = np.linalg.eigh(sigma)\n vals = np.abs(vals)\n order = vals.argsort()[::-1]\n vecs = vecs[:, order]\n vals = vals[order]\n\n func = mplstereonet.vector2plunge_bearing\n vec_orientations = [func(*item) for item in vecs.T]\n\n \"\"\"\n val1, val2, val3 = vals\n print (val2-val3) / (val1 - val3)\n print 'sigma1', val1 \n print 'sigma2', val2\n print 'sigma3', val3\n \"\"\"\n return vals, vec_orientations", "def axialOuter(self):\n if (self.axial + 1) in range(1, len(self.ThRZmesh.getPositions(label=\"Z\"))):\n Z = self.ThRZmesh.getUpper(label=\"Z\", n=(self.axial + 1))\n else:\n Z = None\n return Z", "def get_alignment_params(self, s, w):\n\n X1 = s.__get_X(w)\n X2 = self.__get_X(w)\n Y1 = s.__get_Y(w)\n Y2 = self.__get_Y(w)\n Z = self.__get_Z(w)\n W = sum(w)\n C1 = self.__get_C1(w, s)\n C2 = self.__get_C2(w, s)\n\n a = np.array([[ X2, -Y2, W, 0],\n [ Y2, X2, 0, W],\n [ Z, 0, X2, Y2],\n [ 0, Z, -Y2, X2]])\n\n b = np.array([X1, Y1, C1, C2])\n # Solve equations\n # result is [ax, ay, tx, ty]\n return np.linalg.solve(a, b)", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def vae_encoder(self):\n return stax.serial(\n stax.Dense(self.hidden_dims[0], W_init=stax.randn()),\n stax.Relu,\n stax.Dense(self.hidden_dims[1], W_init=stax.randn()),\n stax.Relu,\n stax.FanOut(2),\n stax.parallel(\n stax.Dense(self.z_dim, W_init=stax.randn()), # mean\n stax.serial(stax.Dense(self.z_dim, W_init=stax.randn()), stax.Exp), # std -- i.e. diagonal covariance\n ),\n )", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def _get_params(self):\r\n return np.hstack((self.variance, self.lengthscale))", "def contract_tenors(self):\n\n\tself.r_outer_r[:,:,0,1,:] = self.r_outer_r[:,:,0,1,:]/(1. - self.k_dot_r[0,1,:])\n\tself.r_outer_r[:,:,0,2,:] = self.r_outer_r[:,:,0,2,:]/(1. - self.k_dot_r[0,2,:])\n\t\n\tself.r_outer_r[:,:,1,0,:] = self.r_outer_r[:,:,1,0,:]/(1. - self.k_dot_r[1,0,:])\n\tself.r_outer_r[:,:,1,2,:] = self.r_outer_r[:,:,1,2,:]/(1. - self.k_dot_r[1,2,:])\n\t\n\tself.r_outer_r[:,:,2,0,:] = self.r_outer_r[:,:,2,0,:]/(1. - self.k_dot_r[2,0,:])\n\tself.r_outer_r[:,:,2,1,:] = self.r_outer_r[:,:,2,1,:]/(1. - self.k_dot_r[2,1,:])\n\n\tself.delta_l = np.zeros((3,3,self.N),dtype=np.complex_)\n \n\tself.delta_l[0,1,:] = get_l(self,0,1)\n\tself.delta_l[1,0,:] = get_l(self,1,0)\n\t\n\tself.delta_l[0,2,:] = get_l(self,0,2)\n\tself.delta_l[2,0,:] = get_l(self,2,0)\n\t\n\tself.delta_l[1,2,:] = get_l(self,1,2)\n\tself.delta_l[2,1,:] = get_l(self,2,1)\n \n\treturn", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def GetParametricCoords(self):\n ...", "def CreateBiPennate2():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n Vectors1 = LongaxisOrtho(Vectors1)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/Project_Gastro/workflows/Cesim/musc_mod_v2/OutputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((np.shape(Vectors1)[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = -30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = 30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(211,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,1)\r\n\r\n ax2 = fig.add_subplot(212,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,1)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres.dat\",Vectors2,header = header,comments='')", "def get_params(pic, output_size):\n\n w, h, c = pic.shape\n th, tw = output_size\n\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n\n return i, j, th, tw", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def get_params(pic, output_size):\n\n c, w, h = pic.shape\n th, tw = output_size\n\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n\n return i, j, th, tw", "def vp_from_ke(m):\n return (m[0, 0]/m[2,0], m[1,0]/m[2,0])", "def _corner_orientations(*args, **kwargs): # real signature unknown\n pass", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def orientation(self, point):\n p_x = self.begin.x\n p_y = self.begin.y\n\n q_x = self.end.x\n q_y = self.end.y\n\n r_x = point.x\n r_y = point.y\n\n D = q_x * r_y + p_x * q_y + p_y * r_x - q_x * p_y - r_x * q_y - r_y * p_x\n\n if D > 0:\n return 1\n elif D == 0:\n return 0\n else:\n return -1", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def naturalAspectRatio(self):\n return math.sin(self.view_angle_h) / math.sin(self.view_angle_v)", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def rotate(p,q,A,V): \n n = A.shape[0]\n App, Aqq, Apq = A[p,p], A[q,q], A[p,q] #Initial values\n phi = 0.5*math.atan2(2*Apq, Aqq-App) #Find the rotation value\n c, s = math.cos(phi), math.sin(phi) #Calculate sin and cos\n\n #Update the matrix diagonal elements\n A[p,p] = c*c*App + s*s*Aqq - 2*s*c*Apq \n A[q,q] = s*s*App + c*c*Aqq + 2*s*c*Apq\n A[p,q] = 0 #This is zero by construction\n \n \n #Iterate over and update remaining off-diagonal elements\n for i in range(p):\n Aip, Aiq = A[i,p], A[i,q]\n A[i,p] = c*Aip - s*Aiq\n A[i,q] = c*Aiq + s*Aip\n \n for i in range(p+1,q):\n Api, Aiq = A[p,i], A[i,q]\n A[p,i] = c*Api - s*Aiq\n A[i,q] = c*Aiq + s*Api\n \n for i in range(q+1,n):\n Api, Aqi = A[p,i], A[q,i]\n A[p,i] = c*Api - s*Aqi\n A[q,i] = c*Aqi + s*Api\n \n #Update eigenvectors in matrix V\n for i in range(n):\n Vip, Viq = V[i,p], V[i,q]\n V[i,p] = c*Vip - s*Viq\n V[i,q] = s*Vip + c*Viq\n \n return A, V", "def _prepare_outer_matrix(self):\n self._mat_plane = numpy.array([\n self._scaling[0], 0, 0, 0,\n 0, self._scaling[1], 0, 0,\n 0, 0, 1, 0,\n self.i_border[0], -self.i_border[1], 0, 1\n ], dtype=numpy.float32)", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def implicit_surface(self, F, y):\n y = y[:, :, None].expand(-1, -1, self.n_primitives, -1)\n y_latent, ldj = self.invertible_network.inverse(F, y)\n norm = torch.sqrt((y_latent**2).sum(-1))\n\n # <0 denotes internal points\n # >0 denotes external points\n # 0 is the boundary hence our primitive\n return norm - self.radius, ldj", "def parameters(conv, orthogonal):\n nrm = operator_one_norm(conv.weight).detach().cpu().numpy()\n\n if nrm > 15:\n print('Overflow likely, norm={}'.format(nrm))\n\n m = np.arange(1, len(THETA) + 1)\n vals = m * np.ceil(nrm / THETA)\n mstar = min(1 + np.argmin(vals), 56)\n s = int(np.ceil(nrm / THETA[mstar - 1]))\n\n return mstar, s", "def CreateBiPennate1():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((Vectors1.shape[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = 30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = -30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres30.dat\",Vectors2,header = header,comments='')", "def bivariate_variance(x, mu_x, y, mu_y, pdf):\n\n if pdf.shape[0] != x.shape[0] or pdf.shape[1] != y.shape[0]:\n print(\"Error, mesh size does not match x and y\")\n n_x = x.shape[0]\n n_y = y.shape[0]\n var_int_x, var_int_y = 0.0, 0.0\n p_of_x, p_of_y = np.zeros(n_x), np.zeros(n_y)\n for i in range(0, n_x):\n for j in range(1, n_y):\n delta_y = y[j] - y[j - 1]\n p_of_x[i] += delta_y / 2.0 * (pdf[i, j] + pdf[i, j - 1])\n if i > 0:\n delta_x = x[i] - x[i - 1]\n var_int_x += (\n delta_x\n / 2.0\n * (\n (x[i] - mu_x) ** 2.0 * p_of_x[i]\n + (x[i - 1] - mu_x) ** 2.0 * p_of_x[i - 1]\n )\n )\n\n for j in range(0, n_y):\n for i in range(1, n_x):\n delta_x = x[i] - x[i - 1]\n p_of_y[j] += delta_x / 2.0 * (pdf[i, j] + pdf[i - 1, j])\n if j > 0:\n delta_y = y[j] - y[j - 1]\n var_int_y += (\n delta_y\n / 2.0\n * (\n (y[j] - mu_y) ** 2.0 * p_of_y[j]\n + (y[j - 1] - mu_y) ** 2.0 * p_of_y[j - 1]\n )\n )\n\n return var_int_x, var_int_y", "def diriv(x, params):\n return np.array([x,1])", "def _standardize_pose(self):\n self.mesh_.center_vertices_bb()\n vertex_array_cent = np.array(self.mesh_.vertices())\n\n # find principal axes\n pca = sklearn.decomposition.PCA(n_components = 3)\n pca.fit(vertex_array_cent)\n\n # count num vertices on side of origin wrt principal axes\n comp_array = pca.components_\n norm_proj = vertex_array_cent.dot(comp_array.T)\n opposite_aligned = np.sum(norm_proj < 0, axis = 0)\n same_aligned = np.sum(norm_proj >= 0, axis = 0)\n pos_oriented = 1 * (same_aligned > opposite_aligned) # trick to turn logical to int\n neg_oriented = 1 - pos_oriented\n\n # create rotation from principal axes to standard basis\n target_array = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]) # Z+, Y+, X+\n target_array = target_array * pos_oriented + -1 * target_array * neg_oriented\n R = np.linalg.solve(comp_array, target_array)\n R = R.T\n\n # rotate vertices, normals and reassign to the mesh\n vertex_array_rot = R.dot(vertex_array_cent.T)\n vertex_array_rot = vertex_array_rot.T\n self.mesh_.set_vertices(vertex_array_rot.tolist())\n self.mesh_.center_vertices_bb()\n\n if self.mesh_.normals() is not None:\n normals_array = np.array(self.mesh_.normals_)\n normals_array_rot = R.dot(normals_array.T)\n self.mesh_.set_normals(normals_array_rot.tolist())", "def thetaOuter(self):\n if self.theta in range(1, len(self.ThRZmesh.getPositions(label=\"Th\"))):\n Th = self.ThRZmesh.getUpper(label=\"Th\", n=(self.theta))\n else:\n runLog.warning(\n \"Error: Azimuthal Index ({}) location not INSIDE mesh \".format(\n self.theta\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"Th\"))\n Th = None\n return Th", "def get_bar_yz_transform(v, ihat, eid, xyz1, xyz2, nid1, nid2, i, Li):\n vhat = v / norm(v) # j\n try:\n z = np.cross(ihat, vhat) # k\n except ValueError:\n msg = 'Invalid vector length\\n'\n msg += 'xyz1=%s\\n' % str(xyz1)\n msg += 'xyz2=%s\\n' % str(xyz2)\n msg += 'nid1=%s\\n' % str(nid1)\n msg += 'nid2=%s\\n' % str(nid2)\n msg += 'i =%s\\n' % str(i)\n msg += 'Li =%s\\n' % str(Li)\n msg += 'ihat=%s\\n' % str(ihat)\n msg += 'v =%s\\n' % str(v)\n msg += 'vhat=%s\\n' % str(vhat)\n msg += 'z=cross(ihat, vhat)'\n print(msg)\n raise ValueError(msg)\n\n zhat = z / norm(z)\n yhat = np.cross(zhat, ihat) # j\n\n if norm(ihat) == 0.0 or norm(yhat) == 0.0 or norm(z) == 0.0:\n print(' invalid_orientation - eid=%s yhat=%s zhat=%s v=%s i=%s n%s=%s n%s=%s' % (\n eid, yhat, zhat, v, i, nid1, xyz1, nid2, xyz2))\n elif not np.allclose(norm(yhat), 1.0) or not np.allclose(norm(zhat), 1.0) or Li == 0.0:\n print(' length_error - eid=%s Li=%s Lyhat=%s Lzhat=%s'\n ' v=%s i=%s n%s=%s n%s=%s' % (\n eid, Li, norm(yhat), norm(zhat), v, i, nid1, xyz1, nid2, xyz2))\n return yhat, zhat", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def IK_geometric(dh_params, pose):\n pass", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def angle(z):", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def aic(self):\n return 2*self.number_of_parameters() - 2*self.ll[-1]", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def stokes_vertical():\n return np.array([1, -1, 0, 0])", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def euler_to_rodrigues(X_params):\n data_samples = X_params.shape[0]\n pose_euler = np.array([X_params[:, i:i+3] for i in range(0, 72, 3)])\n #print(pose_euler[0][0])\n #pose_euler = pose_euler.reshape((24, data_samples, 1, 3))\n #print(pose_euler[0][0])\n print(\"pose_euler shape: \" + str(pose_euler.shape))\n #R = np.array([[eulerAnglesToRotationMatrix(vector) for vector in vectors] for vectors in pose_euler])\n #print(\"R shape: \" + str(R.shape))\n #print(R[0][0])\n #R = R.reshape((data_samples, 24, 3, 3))\n\n #pose_params = np.array([[Rot.from_dcm(rot_mat).as_rotvec() for rot_mat in param_rot_mats] for param_rot_mats in R])\n pose_params = np.array([Rot.from_euler('xyz', vectors, degrees=False).as_rotvec() for vectors in pose_euler])\n print(\"pose_params shape: \" + str(pose_params.shape))\n pose_params = pose_params.reshape((data_samples, 72))\n print(\"pose_params shape: \" + str(pose_params.shape))\n print(\"other params shape: \" + str(X_params[:, 72:85].shape))\n X_params = np.concatenate([pose_params, X_params[:, 72:85]], axis=1)\n print(\"X_params shape: \" + str(X_params.shape))\n\n return X_params" ]
[ "0.687762", "0.6436624", "0.58278096", "0.578275", "0.57705504", "0.5755263", "0.5649262", "0.5623161", "0.5620293", "0.5616361", "0.55939484", "0.55838054", "0.5497484", "0.54887134", "0.54881567", "0.54828835", "0.54536176", "0.5438182", "0.5438182", "0.54221433", "0.5411174", "0.54104936", "0.53689116", "0.53688836", "0.53652114", "0.53632826", "0.5338338", "0.53027475", "0.5285858", "0.52843916", "0.5277243", "0.5275149", "0.52661824", "0.52595943", "0.52595943", "0.5252855", "0.5250698", "0.5245457", "0.52365184", "0.52214557", "0.51974505", "0.51797", "0.5167385", "0.51653266", "0.515188", "0.5137323", "0.513427", "0.51262015", "0.51221794", "0.51210374", "0.5119016", "0.5114289", "0.5097322", "0.5082281", "0.50787944", "0.50787944", "0.5069311", "0.50676394", "0.5064282", "0.50564176", "0.5048234", "0.50453955", "0.504501", "0.50447565", "0.5037875", "0.50369227", "0.50270355", "0.50254506", "0.5022939", "0.5016382", "0.50148314", "0.5007597", "0.50057447", "0.50034195", "0.5002838", "0.5000152", "0.49955603", "0.498667", "0.49840796", "0.497041", "0.49607646", "0.49607617", "0.49601522", "0.4954888", "0.49545398", "0.49484786", "0.4947139", "0.49442494", "0.49421576", "0.49409756", "0.49274042", "0.49223745", "0.49204567", "0.49100202", "0.4909333", "0.49066365", "0.49063104", "0.4905511", "0.49017295", "0.4897041" ]
0.7024464
0
Computes the geometric inner orientation parameters
def ComputeGeometricParameters(self): # extracting inner orientation params a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] # computing algebric params tx = a0; ty = b0 theta = np.arctan(b1 / b2) gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta))) sx = a1 * np.cos(theta) - a2 * np.sin(theta) sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma) return {"translationX": tx, "translationY": ty, "rotationAngle": np.rad2deg(theta), "scaleFactorX": sx, "scaleFactorY": sy, "shearAngle": np.rad2deg(gamma)}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def IK_geometric(dh_params, pose):\n pass", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def polar_decomposition(self):\n return self.polar_unit_vector, self.polar_angle", "def greenhouse_orientation():\n \n # NEED TO CHECK THIS WITH COMPASS (OR IPHONE)\n orientation_angle = 90 # angle between east-west line and the length of the greenhouse (0-90 degree)\n orientation_angle = float(orientation_angle)", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def angle(z):", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def sivina(self):\n return (self.r + self.g + self.b) / 3", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def get_angle_and_body_vector(moments):\n body_cov = np.array( [ [moments['mu20'], moments['mu11']], [moments['mu11'], moments['mu02'] ]])\n eig_vals, eig_vecs = np.linalg.eigh(body_cov)\n max_eig_ind = np.argmax(eig_vals**2)\n max_eig_vec = eig_vecs[:,max_eig_ind]\n angle = np.arctan2(max_eig_vec[1], max_eig_vec[0])\n return angle, max_eig_vec", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def init_axis(self):\n # Shorthand:\n nphi = self.nphi\n nfp = self.nfp\n\n phi = np.linspace(0, 2 * np.pi / nfp, nphi, endpoint=False)\n d_phi = phi[1] - phi[0]\n R0 = np.zeros(nphi)\n Z0 = np.zeros(nphi)\n R0p = np.zeros(nphi)\n Z0p = np.zeros(nphi)\n R0pp = np.zeros(nphi)\n Z0pp = np.zeros(nphi)\n R0ppp = np.zeros(nphi)\n Z0ppp = np.zeros(nphi)\n for jn in range(0, self.nfourier):\n n = jn * nfp\n sinangle = np.sin(n * phi)\n cosangle = np.cos(n * phi)\n R0 += self.rc[jn] * cosangle + self.rs[jn] * sinangle\n Z0 += self.zc[jn] * cosangle + self.zs[jn] * sinangle\n R0p += self.rc[jn] * (-n * sinangle) + self.rs[jn] * (n * cosangle)\n Z0p += self.zc[jn] * (-n * sinangle) + self.zs[jn] * (n * cosangle)\n R0pp += self.rc[jn] * (-n * n * cosangle) + self.rs[jn] * (-n * n * sinangle)\n Z0pp += self.zc[jn] * (-n * n * cosangle) + self.zs[jn] * (-n * n * sinangle)\n R0ppp += self.rc[jn] * (n * n * n * sinangle) + self.rs[jn] * (-n * n * n * cosangle)\n Z0ppp += self.zc[jn] * (n * n * n * sinangle) + self.zs[jn] * (-n * n * n * cosangle)\n\n d_l_d_phi = np.sqrt(R0 * R0 + R0p * R0p + Z0p * Z0p)\n d2_l_d_phi2 = (R0 * R0p + R0p * R0pp + Z0p * Z0pp) / d_l_d_phi\n B0_over_abs_G0 = nphi / np.sum(d_l_d_phi)\n abs_G0_over_B0 = 1 / B0_over_abs_G0\n self.d_l_d_varphi = abs_G0_over_B0\n G0 = self.sG * abs_G0_over_B0 * self.B0\n\n # For these next arrays, the first dimension is phi, and the 2nd dimension is (R, phi, Z).\n d_r_d_phi_cylindrical = np.array([R0p, R0, Z0p]).transpose()\n d2_r_d_phi2_cylindrical = np.array([R0pp - R0, 2 * R0p, Z0pp]).transpose()\n d3_r_d_phi3_cylindrical = np.array([R0ppp - 3 * R0p, 3 * R0pp - R0, Z0ppp]).transpose()\n\n tangent_cylindrical = np.zeros((nphi, 3))\n d_tangent_d_l_cylindrical = np.zeros((nphi, 3))\n for j in range(3):\n tangent_cylindrical[:,j] = d_r_d_phi_cylindrical[:,j] / d_l_d_phi\n d_tangent_d_l_cylindrical[:,j] = (-d_r_d_phi_cylindrical[:,j] * d2_l_d_phi2 / d_l_d_phi \\\n + d2_r_d_phi2_cylindrical[:,j]) / (d_l_d_phi * d_l_d_phi)\n\n curvature = np.sqrt(d_tangent_d_l_cylindrical[:,0] * d_tangent_d_l_cylindrical[:,0] + \\\n d_tangent_d_l_cylindrical[:,1] * d_tangent_d_l_cylindrical[:,1] + \\\n d_tangent_d_l_cylindrical[:,2] * d_tangent_d_l_cylindrical[:,2])\n\n axis_length = np.sum(d_l_d_phi) * d_phi * nfp\n rms_curvature = np.sqrt((np.sum(curvature * curvature * d_l_d_phi) * d_phi * nfp) / axis_length)\n mean_of_R = np.sum(R0 * d_l_d_phi) * d_phi * nfp / axis_length\n mean_of_Z = np.sum(Z0 * d_l_d_phi) * d_phi * nfp / axis_length\n standard_deviation_of_R = np.sqrt(np.sum((R0 - mean_of_R) ** 2 * d_l_d_phi) * d_phi * nfp / axis_length)\n standard_deviation_of_Z = np.sqrt(np.sum((Z0 - mean_of_Z) ** 2 * d_l_d_phi) * d_phi * nfp / axis_length)\n\n normal_cylindrical = np.zeros((nphi, 3))\n for j in range(3):\n normal_cylindrical[:,j] = d_tangent_d_l_cylindrical[:,j] / curvature\n self.normal_cylindrical = normal_cylindrical\n self._determine_helicity()\n\n # b = t x n\n binormal_cylindrical = np.zeros((nphi, 3))\n binormal_cylindrical[:,0] = tangent_cylindrical[:,1] * normal_cylindrical[:,2] - tangent_cylindrical[:,2] * normal_cylindrical[:,1]\n binormal_cylindrical[:,1] = tangent_cylindrical[:,2] * normal_cylindrical[:,0] - tangent_cylindrical[:,0] * normal_cylindrical[:,2]\n binormal_cylindrical[:,2] = tangent_cylindrical[:,0] * normal_cylindrical[:,1] - tangent_cylindrical[:,1] * normal_cylindrical[:,0]\n\n # We use the same sign convention for torsion as the\n # Landreman-Sengupta-Plunk paper, wikipedia, and\n # mathworld.wolfram.com/Torsion.html. This sign convention is\n # opposite to Garren & Boozer's sign convention!\n torsion_numerator = (d_r_d_phi_cylindrical[:,0] * (d2_r_d_phi2_cylindrical[:,1] * d3_r_d_phi3_cylindrical[:,2] - d2_r_d_phi2_cylindrical[:,2] * d3_r_d_phi3_cylindrical[:,1]) \\\n + d_r_d_phi_cylindrical[:,1] * (d2_r_d_phi2_cylindrical[:,2] * d3_r_d_phi3_cylindrical[:,0] - d2_r_d_phi2_cylindrical[:,0] * d3_r_d_phi3_cylindrical[:,2]) \n + d_r_d_phi_cylindrical[:,2] * (d2_r_d_phi2_cylindrical[:,0] * d3_r_d_phi3_cylindrical[:,1] - d2_r_d_phi2_cylindrical[:,1] * d3_r_d_phi3_cylindrical[:,0]))\n\n torsion_denominator = (d_r_d_phi_cylindrical[:,1] * d2_r_d_phi2_cylindrical[:,2] - d_r_d_phi_cylindrical[:,2] * d2_r_d_phi2_cylindrical[:,1]) ** 2 \\\n + (d_r_d_phi_cylindrical[:,2] * d2_r_d_phi2_cylindrical[:,0] - d_r_d_phi_cylindrical[:,0] * d2_r_d_phi2_cylindrical[:,2]) ** 2 \\\n + (d_r_d_phi_cylindrical[:,0] * d2_r_d_phi2_cylindrical[:,1] - d_r_d_phi_cylindrical[:,1] * d2_r_d_phi2_cylindrical[:,0]) ** 2\n\n torsion = torsion_numerator / torsion_denominator\n\n self.etabar_squared_over_curvature_squared = self.etabar * self.etabar / (curvature * curvature)\n\n self.d_d_phi = spectral_diff_matrix(self.nphi, xmax=2 * np.pi / self.nfp)\n self.d_varphi_d_phi = B0_over_abs_G0 * d_l_d_phi\n self.d_d_varphi = np.zeros((nphi, nphi))\n for j in range(nphi):\n self.d_d_varphi[j,:] = self.d_d_phi[j,:] / self.d_varphi_d_phi[j]\n\n # Compute the Boozer toroidal angle:\n self.varphi = np.zeros(nphi)\n for j in range(1, nphi):\n # To get toroidal angle on the full mesh, we need d_l_d_phi on the half mesh.\n self.varphi[j] = self.varphi[j-1] + (d_l_d_phi[j-1] + d_l_d_phi[j])\n self.varphi = self.varphi * (0.5 * d_phi * 2 * np.pi / axis_length)\n\n # Add all results to self:\n self.phi = phi\n self.d_phi = d_phi\n self.R0 = R0\n self.Z0 = Z0\n self.R0p = R0p\n self.Z0p = Z0p\n self.R0pp = R0pp\n self.Z0pp = Z0pp\n self.R0ppp = R0ppp\n self.Z0ppp = Z0ppp\n self.G0 = G0\n self.d_l_d_phi = d_l_d_phi\n self.axis_length = axis_length\n self.curvature = curvature\n self.torsion = torsion\n self.X1s = np.zeros(nphi)\n self.X1c = self.etabar / curvature\n self.min_R0 = fourier_minimum(self.R0)\n self.tangent_cylindrical = tangent_cylindrical\n self.normal_cylindrical = normal_cylindrical \n self.binormal_cylindrical = binormal_cylindrical\n self.Bbar = self.spsi * self.B0\n self.abs_G0_over_B0 = abs_G0_over_B0\n\n # The output is not stellarator-symmetric if (1) R0s is nonzero,\n # (2) Z0c is nonzero, (3) sigma_initial is nonzero, or (B2s is\n # nonzero and order != 'r1')\n self.lasym = np.max(np.abs(self.rs)) > 0 or np.max(np.abs(self.zc)) > 0 \\\n or self.sigma0 != 0 or (self.order != 'r1' and self.B2s != 0)\n\n # Functions that converts a toroidal angle phi0 on the axis to the axis radial and vertical coordinates\n self.R0_func = self.convert_to_spline(sum([self.rc[i]*np.cos(i*self.nfp*self.phi) +\\\n self.rs[i]*np.sin(i*self.nfp*self.phi) \\\n for i in range(len(self.rc))]))\n self.Z0_func = self.convert_to_spline(sum([self.zc[i]*np.cos(i*self.nfp*self.phi) +\\\n self.zs[i]*np.sin(i*self.nfp*self.phi) \\\n for i in range(len(self.zs))]))\n\n # Spline interpolants for the cylindrical components of the Frenet-Serret frame:\n self.normal_R_spline = self.convert_to_spline(self.normal_cylindrical[:,0])\n self.normal_phi_spline = self.convert_to_spline(self.normal_cylindrical[:,1])\n self.normal_z_spline = self.convert_to_spline(self.normal_cylindrical[:,2])\n self.binormal_R_spline = self.convert_to_spline(self.binormal_cylindrical[:,0])\n self.binormal_phi_spline = self.convert_to_spline(self.binormal_cylindrical[:,1])\n self.binormal_z_spline = self.convert_to_spline(self.binormal_cylindrical[:,2])\n self.tangent_R_spline = self.convert_to_spline(self.tangent_cylindrical[:,0])\n self.tangent_phi_spline = self.convert_to_spline(self.tangent_cylindrical[:,1])\n self.tangent_z_spline = self.convert_to_spline(self.tangent_cylindrical[:,2])\n\n # Spline interpolant for nu = varphi - phi, used for plotting\n self.nu_spline = self.convert_to_spline(self.varphi - self.phi)", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def get_orientation_map_tensor(image, filts, rescale_angle=False, max_intensity=220):\n # move to numpy\n image = np.squeeze(image.numpy())\n\n # convolve Gabors and get energy of each\n magnitudes = []\n for filt in filts:\n sin_conv = convolve2d(image, filt[1], mode='same')\n cos_conv = convolve2d(image, filt[0], mode='same')\n\n magnitudes.append(np.sqrt(sin_conv ** 2 + cos_conv ** 2))\n\n orientation_vec = np.array([magnitudes[0] - magnitudes[2],\n magnitudes[1] - magnitudes[3]])\n\n return orientation_vec", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def GetParametricCoords(self):\n ...", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def acquisition_angles(Px,Gx):\n are_two_arrays_equal(Px, Gx)\n\n major_axis,minor_axis = earth_axes()\n Vx = Px - Gx # observation vector\n del Px\n Vdist = np.linalg.norm(Vx, axis=1) # make unit length\n Vx = np.einsum('i...,i->i...', Vx, np.divide(1, Vdist))\n del Vdist\n\n e_Z = np.einsum('...i,i->...i', Gx,\n 1 / np.array([major_axis, major_axis, minor_axis]))\n e_E = np.zeros_like(e_Z)\n e_E[..., 0], e_E[..., 1] = -e_Z[:, 1].copy(), e_Z[:, 0].copy()\n e_plan = np.linalg.norm(e_Z[:, :2], axis=1)\n e_E = np.einsum('i...,i->i...', e_E, np.divide(1, e_plan))\n del e_plan\n e_N = np.array([np.multiply(e_Z[:, 1], e_E[:, 2]) -\n np.multiply(e_Z[:, 2], e_E[:, 1]),\n np.multiply(e_Z[:, 2], e_E[:, 0]) -\n np.multiply(e_Z[:, 0], e_E[:, 2]),\n np.multiply(e_Z[:, 0], e_E[:, 1]) -\n np.multiply(e_Z[:, 1], e_E[:, 0])]).T\n\n LoS = np.zeros_like(e_Z)\n LoS[..., 0] = np.einsum('...i,...i->...', Vx, e_E)\n del e_E\n LoS[..., 1] = np.einsum('...i,...i->...', Vx, e_N)\n del e_N\n LoS[..., 2] = np.einsum('...i,...i->...', Vx, e_Z)\n del e_Z\n\n az = np.rad2deg(np.arctan2(LoS[..., 0], LoS[..., 1]))\n zn = np.rad2deg(np.arccos(LoS[...,2]))\n return zn, az", "def theta(self):\n self.eigenvalues()", "def orientation(a:tuple, b:tuple, c:tuple)->int:\n d = direction(a, b, c)\n if d == 0:\n return 0\n elif d > 0:\n return 1\n else:\n return -1", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def mercier(self):\n\n # See Overleaf note \"Mercier criterion near the magnetic axis- detailed notes\".\n # See also \"20200604-02 Checking sign in Mercier DGeod near axis.docx\"\n\n # Shorthand:\n d_l_d_phi = self.d_l_d_phi\n B0 = self.B0\n G0 = self.G0\n p2 = self.p2\n etabar = self.etabar\n curvature = self.curvature\n sigma = self.sigma\n iotaN = self.iotaN\n iota = self.iota\n pi = np.pi\n\n #integrand = d_l_d_phi * (Y1c * Y1c + X1c * (X1c + Y1s)) / (Y1c * Y1c + (X1c + Y1s) * (X1c + Y1s))\n integrand = d_l_d_phi * (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*sigma*sigma + etabar*etabar*curvature*curvature) \\\n / (etabar*etabar*etabar*etabar + curvature*curvature*curvature*curvature*(1+sigma*sigma) + 2*etabar*etabar*curvature*curvature)\n\n integral = np.sum(integrand) * self.d_phi * self.nfp * 2 * pi / self.axis_length\n\n #DGeod_times_r2 = -(2 * sG * spsi * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar &\n self.DGeod_times_r2 = -(2 * mu0 * mu0 * p2 * p2 * G0 * G0 * G0 * G0 * etabar * etabar \\\n / (pi * pi * pi * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * B0 * iotaN * iotaN)) \\\n * integral\n\n self.d2_volume_d_psi2 = 4*pi*pi*abs(G0)/(B0*B0*B0)*(3*etabar*etabar - 4*self.B20_mean/B0 + 2 * (self.G2 + iota * self.I2)/G0)\n\n self.DWell_times_r2 = (mu0 * p2 * abs(G0) / (8 * pi * pi * pi * pi * B0 * B0 * B0)) * \\\n (self.d2_volume_d_psi2 - 8 * pi * pi * mu0 * p2 * abs(G0) / (B0 * B0 * B0 * B0 * B0))\n\n self.DMerc_times_r2 = self.DWell_times_r2 + self.DGeod_times_r2", "def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def orientate(arrayin,orientation):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n \r\n if orientation == 1 :\r\n # x,y\r\n y = range(ny)\r\n x = range(nx)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 2 :\r\n # x,-y\r\n y = range(ny-2,-1,-1)\r\n y.append(0)\r\n x = range(nx)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 3 :\r\n # -x,y\r\n y = range(ny)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 4 :\r\n # -x,-y\r\n y = range(nx-2,-1,-1)\r\n y.append(0)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 5 :\r\n # x,y\r\n y = range(ny)\r\n x = range(nx)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 6 :\r\n # x,-y\r\n y = range(ny-2,-1,-1)\r\n y.append(0)\r\n x = range(nx)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 7 :\r\n # -x,y\r\n y = range(ny)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 8 :\r\n # -x,-y\r\n y = range(nx-2,-1,-1)\r\n y.append(0)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n y, x = np.meshgrid(x,y)\r\n else :\r\n print 'orientation must be an integer between 1 and 8.'\r\n return np.copy(arrayin[y,x])", "def estimate_rotation(bounding_box):\n # x,y coord of topleft corner\n x,y,w,h = bounding_box\n rotation_arg = np.abs(1 - (h/float(w)))*2\n return rad_to_deg( np.arctan(rotation_arg) )", "def zerodegree_pol(dim):\n\n out = zeros(dim)\n out[0] = 1\n\n return out", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def givens_rotation(v1: float, v2: float) -> Tuple[float, float]:\n t = jnp.sqrt(v1**2 + v2**2)\n cs = v1 / t\n sn = -v2 / t\n return cs, sn", "def test_orientation_vector():\n\topening_angle = geom_instance.source_opening_angle\n\ttest_orientation = o_gen_instance.generate_orientation_vector()\n\tassert test_orientation[0] < np.cos(opening_angle)\n\tassert test_orientation[1] < np.sin(opening_angle)", "def olivine():\n\n rho = 3355.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 320.5; C[0,1] = 68.15; C[0,2] = 71.6; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 196.5; C[1,2] = 76.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 233.5; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 64.; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 77.; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 78.7\n\n return C, rho", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def photometric_calibration():\n pass", "def scalar_g2r(al, be, ga, lon, lat):\n\n rad = mt.pi / 180\n al = al * rad\n be = be * rad\n ga = ga * rad\n\n rotate_matrix = np.zeros(shape=(3, 3))\n\n rotate_matrix[0, 0] = np.cos(ga) * np.cos(al) - np.sin(ga) * np.cos(be) * np.sin(al)\n rotate_matrix[0, 1] = np.cos(ga) * np.sin(al) + np.sin(ga) * np.cos(be) * np.cos(al)\n rotate_matrix[0, 2] = np.sin(ga) * np.sin(be)\n rotate_matrix[1, 0] = -np.sin(ga) * np.cos(al) - np.cos(ga) * np.cos(be) * np.sin(\n al\n )\n rotate_matrix[1, 1] = -np.sin(ga) * np.sin(al) + np.cos(ga) * np.cos(be) * np.cos(\n al\n )\n rotate_matrix[1, 2] = np.cos(ga) * np.sin(be)\n rotate_matrix[2, 0] = np.sin(be) * np.sin(al)\n rotate_matrix[2, 1] = -np.sin(be) * np.cos(al)\n rotate_matrix[2, 2] = np.cos(be)\n \n #rotate_matrix = np.linalg.pinv(rotate_matrix)\n \n lat = lat * rad\n lon = lon * rad\n\n # geographical Cartesian coordinates:\n xr = np.cos(lat) * np.cos(lon)\n yr = np.cos(lat) * np.sin(lon)\n zr = np.sin(lat)\n\n # rotated Cartesian coordinates:\n xg = rotate_matrix[0, 0] * xr + rotate_matrix[0, 1] * yr + rotate_matrix[0, 2] * zr\n yg = rotate_matrix[1, 0] * xr + rotate_matrix[1, 1] * yr + rotate_matrix[1, 2] * zr\n zg = rotate_matrix[2, 0] * xr + rotate_matrix[2, 1] * yr + rotate_matrix[2, 2] * zr\n\n # rotated coordinates:\n rlat = np.arcsin(zg)\n rlon = np.arctan2(yg, xg)\n\n a = np.where((np.abs(xg) + np.abs(yg)) == 0)\n if a:\n lon[a] = 0\n\n rlat = rlat / rad\n rlon = rlon / rad\n\n return (rlon, rlat)", "def _sector_orientation(self, vertices):\n if not vertices[0] == vertices[-1]:\n vertices.append(vertices[0])\n xy = np.transpose(np.array(vertices))\n x, y = xy[0], xy[1]\n return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0, vertices", "def calculate_stereographic_projection(p):\n # P' = P * (2r / r + z)\n mu = 1 / (1 + p[2])\n x = p[0] * mu\n y = p[1] * mu\n return x, y", "def _corner_orientations(*args, **kwargs): # real signature unknown\n pass", "def trapezoid_area(base_minor, base_major, height):\n return ((base_major + base_minor) / 2 ) * height", "def horizontal_to_cartesian(altitude, azimuth):\n theta = math.pi / 2 - math.radians(altitude)\n phi = math.radians(-azimuth)\n x = math.sin(phi) * math.sin(-theta)\n y = math.sin(theta) * math.cos(phi)\n z = math.cos(theta)\n return x, y, z", "def phi(self):\n return (np.sum(self.diameters**self.ndim)*np.pi / (2*self.ndim))", "def nfw_physical2angle_fromNFWparams(self, rhos, rs, z):\n\n D_d = self.cosmo.D_A_z(z)\n Rs_angle = rs / D_d / self.cosmo.arcsec # Rs in arcsec\n theta_Rs = rhos * (4 * rs ** 2 * (1 + numpy.log(1. / 2.)))\n eps_crit = self.get_sigma_crit_lensing(z, self.z_source)\n\n return Rs_angle, theta_Rs / eps_crit / D_d / self.cosmo.arcsec", "def V(E, g, gl):\n num = 0\n den = 0\n for i in range(len(E)):\n num += E[i][0]*g[i][0] + E[i][1]*g[i][1]\n den += g[i][0] + g[i][1] + gl\n return num / den", "def outer_rad(self):\n return self._outer_rad", "def mw_boundaries(self):\n phi = np.arange(0., 2.0*np.pi, 0.1)\n theta_l = np.ones_like(phi)* 110 * np.pi / 180.\n theta_h = np.ones_like(phi)* 70 * np.pi / 180.\n ra_l, dec_l = self.gc2radec(phi, theta_l)\n ra_h, dec_h = self.gc2radec(phi, theta_h)\n return (ra_h, dec_h), (ra_l, dec_l)", "def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))", "def HOG(img, x, y):\n #TODO: write a HOG descriptor here\n des=[]\n row=0\n sub_image = img[x-8:x+8,y-8:y+8]\n while row < len(sub_image):\n col=0\n while col < len(sub_image[0]):\n temp_vector = [0 for i in range(8)]\n new_subimage = sub_image[row:row+4,col:col+4]\n x_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=1,dy=0)\n y_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=0,dy=1)\n theta = np.empty([x_gradient.shape[0],x_gradient.shape[1]])\n for i in range(len(x_gradient)):\n for j in range(len(x_gradient[0])):\n if x_gradient[i,j] == 0:\n theta[i,j] = 90\n else:\n theta[i,j] = np.arctan(y_gradient[i,j]/x_gradient[i,j])*(180/np.pi)\n theta_iter = theta.flatten() #To avoid nested for loops for 4x4 theta\n for i in range(len(theta_iter)):\n if theta_iter[i] < 45:\n temp_vector[0]=temp_vector[0]+1\n elif theta_iter[i] >= 45 and theta_iter[i] < 90:\n temp_vector[1]=temp_vector[1]+1\n elif theta_iter[i] >= 90 and theta_iter[i] < 135:\n temp_vector[2]=temp_vector[2]+1\n elif theta_iter[i] >= 135 and theta_iter[i] < 180:\n temp_vector[3]=temp_vector[3]+1\n elif theta_iter[i] >= 180 and theta_iter[i] < 225:\n temp_vector[4]=temp_vector[4]+1\n elif theta_iter[i] >= 225 and theta_iter[i] < 270:\n temp_vector[5]=temp_vector[5]+1\n elif theta_iter[i] >= 270 and theta_iter[i] < 315:\n temp_vector[6]=temp_vector[6]+1\n elif theta_iter[i] >= 315 and theta_iter[i] < 360:\n temp_vector[7]=temp_vector[7]+1\n des.extend(temp_vector)\n col=col+4\n row=row+4\n return des", "def get_pgeom(aor, e):\n return 1. / (aor * (1 - e*e)) * (aor > 1.0)", "def P(self):\n self.eigenmatrix()", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def phi(self):\n return np.arctan2(np.sqrt(self.x**2 + self.y**2), self.z)", "def voigt(p, E):\n dE = E[1]-E[0]\n return dE * convolve( lorentzian(p,E), gaussian(p,E), 'same' )", "def pto_depth_map(self, velo_points,\n H=64, W=512, C=5, dtheta=np.radians(0.4), dphi=np.radians(90./512.0)):\n\n x, y, z, i = velo_points[:, 0], velo_points[:, 1], velo_points[:, 2], velo_points[:, 3]\n d = np.sqrt(x ** 2 + y ** 2 + z**2)\n r = np.sqrt(x ** 2 + y ** 2)\n d[d==0] = 0.000001\n r[r==0] = 0.000001\n phi = np.radians(45.) - np.arcsin(y/r)\n phi_ = (phi/dphi).astype(int)\n phi_[phi_<0] = 0\n phi_[phi_>=512] = 511\n\n # print(np.min(phi_))\n # print(np.max(phi_))\n #\n # print z\n # print np.radians(2.)\n # print np.arcsin(z/d)\n theta = np.radians(2.) - np.arcsin(z/d)\n # print theta\n theta_ = (theta/dtheta).astype(int)\n # print theta_\n theta_[theta_<0] = 0\n theta_[theta_>=64] = 63\n #print theta,phi,theta_.shape,phi_.shape\n # print(np.min((phi/dphi)),np.max((phi/dphi)))\n #np.savetxt('./dump/'+'phi'+\"dump.txt\",(phi_).astype(np.float32), fmt=\"%f\")\n #np.savetxt('./dump/'+'phi_'+\"dump.txt\",(phi/dphi).astype(np.float32), fmt=\"%f\")\n # print(np.min(theta_))\n # print(np.max(theta_))\n\n depth_map = np.zeros((H, W, C))\n # 5 channels according to paper\n if C == 5:\n depth_map[theta_, phi_, 0] = x\n depth_map[theta_, phi_, 1] = y\n depth_map[theta_, phi_, 2] = z\n depth_map[theta_, phi_, 3] = i\n depth_map[theta_, phi_, 4] = d\n else:\n depth_map[theta_, phi_, 0] = i\n return depth_map", "def epidote():\n\n rho = 3465.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 211.5; C[0,1] = 65.6; C[0,2] = 43.2; C[0,3] = 0.; C[0,4] = -6.5; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 239.; C[1,2] = 43.6; C[1,3] = 0.; C[1,4] = -10.4; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 202.1; C[2,3] = 0.; C[2,4] = -20.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.1; C[3,4] = 0.; C[3,5] = -2.3\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 43.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.5\n\n return C, rho", "def orientation(point_p, point_q, point_r):\n # Set https://www.geeksforgeeks.org/orientation-3-ordered-points/\n # for details of below formula.\n r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -\n (point_q.x - point_p.x) * (point_r.y - point_q.y))\n if r == 0:\n return 0\n return 1 if r > 0 else 2", "def params_3D_2D(phi, theta, psi, s2x, s2y):\n\tif theta > 90.0:\n\t\tmirror = 1\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 540.0-psi, 0, 0, 1.0)\n\telse:\n\t\tmirror = 0\n\t\talpha, sx, sy, scalen = compose_transform2(0, s2x, s2y, 1.0, 360.0-psi, 0, 0, 1.0)\n\treturn alpha, sx, sy, mirror", "def ecef2ocs(xyz, uvw):\n # create rotation matrix towards orbital coordinate system\n Z_o = np.divide(xyz, np.linalg.norm(xyz))\n X_o = np.cross(uvw, xyz)\n X_o = np.divide(X_o, np.linalg.norm(X_o))\n Y_o = np.cross(Z_o, X_o)\n Y_o = np.divide(Y_o, np.linalg.norm(Y_o))\n R = np.column_stack([X_o, Y_o, Z_o]) # (1) in [Ye20]\n return R", "def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu", "def eoa(self, *args):\n\n\t\t#Assume coordinate is in center of pixel.\n\t\t#Information on pixel standard is in this article.\n\t\t#http://www.aanda.org/component/article?access=bibcode&bibcode=&bibcode=2002A%2526A...395.1061GFUL\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tlonUL, latUL = self.heliographic(args[0], -.5, -.5)\n\t\t\tlonLL, latLL = self.heliographic(args[0], .5, -.5)\n\t\t\tlonLR, latLR = self.heliographic(args[0], .5, .5)\n\t\t\tlonUR, latUR = self.heliographic(args[0], -.5, .5)\n\t\telse:\n\t\t\tx = args[0]\n\t\t\ty = args[1]\n\t\t\tlonUL, latUL = self.heliographic(x - .5, y - .5)\n\t\t\tlonLL, latLL = self.heliographic(x + .5, y - .5)\n\t\t\tlonLR, latLR = self.heliographic(x + .5, y + .5)\n\t\t\tlonUR, latUR = self.heliographic(x - .5, y + .5)\n\n\t\t# Calculating unit vectors of pixel corners for solid angle.\n\t\tr1 = np.array([np.cos(np.deg2rad(latUL))*np.cos(np.deg2rad(lonUL)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latUL))*np.sin(np.deg2rad(lonUL)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latUL))])\n\n\t\tr2 = np.array([np.cos(np.deg2rad(latLL))*np.cos(np.deg2rad(lonLL)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latLL))*np.sin(np.deg2rad(lonLL)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latLL))])\n\n\t\tr3 = np.array([np.cos(np.deg2rad(latLR))*np.cos(np.deg2rad(lonLR)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latLR))*np.sin(np.deg2rad(lonLR)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latLR))])\n\n\t\tr4 = np.array([np.cos(np.deg2rad(latUR))*np.cos(np.deg2rad(lonUR)),\n\t\t\t\t\t\tnp.cos(np.deg2rad(latUR))*np.sin(np.deg2rad(lonUR)),\n\t\t\t\t\t\tnp.sin(np.deg2rad(latUR))])\n\n\t\t# Calculate solid angle of pixel based on a pyrimid shaped polygon.\n\t\t# See \n\t\tcross1 = np.cross(r1, r2, axis=0)\n\t\tcross2 = np.cross(r3, r4, axis=0)\n\t\tnumerator1 = dot(cross1, r3)\n\t\tnumerator2 = dot(cross2, r1)\n\t\tsolid_angle1 = 2*np.arctan2(numerator1,\n\t\t\t\t\t\t(dot(r1, r2) + dot(r2, r3) + dot(r3, r1) + 1))\n\t\tsolid_angle2 = 2*np.arctan2(numerator2, \n\t\t\t\t\t\t(dot(r3, r4) + dot(r4, r1) + dot(r3, r1) + 1))\n\t\tsolid_angle = solid_angle1 + solid_angle2\n\t\tr = 6.957e10 * u.cm\n\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\tself.area = np.abs((r**2)*solid_angle)\n\t\t\tind = np.where(self.rg > self.rsun)\n\t\t\tself.area[ind] = np.nan\n\t\t\treturn self.area\n\t\telse:\n\t\t\treturn np.abs((r**2)*solid_angle)", "def gyz(xp, yp, zp, prisms):\n if xp.shape != yp.shape != zp.shape:\n raise ValueError(\"Input arrays xp, yp, and zp must have same shape!\")\n res = 0\n for prism in prisms:\n if prism is None or 'density' not in prism.props:\n continue\n density = prism.props['density']\n res += kernelyz(xp, yp, zp, prism)*density\n res *= G * SI2EOTVOS\n return res", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def angle(self) -> int:", "def get_params(img, output_size):\n c, h, w = img.shape\n th, tw = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = (h - th)//2\n j = (w - tw)//2\n return i, j, th, tw", "def get_political_orientation(newspaper):\n return political_orientations[newspaper]", "def operator(self, params: Tensor) -> Tensor:\n theta, phi = params\n # calculate entries\n a: Tensor = exp(1j * phi) * cos(theta / 2)\n b: Tensor = sin(theta / 2)\n c: Tensor = -b\n d: Tensor = exp(-1j * phi) * cos(theta / 2)\n # construct the rows of the rotation matrix\n r1: Tensor = cat((a.view(1), b.view(1)))\n r2: Tensor = cat((c.view(1), d.view(1)))\n # build and return the rotation matrix\n rot: Tensor = cat((r1, r2)).view(2, 2)\n return rot", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0", "def origin() -> Tuple[float, float]:\n return (DIMENSION[0] / 2, DIMENSION[1] / 2)", "def approx_shoulders(upper_body_roi):\n height = upper_body_roi.shape[0]; width = upper_body_roi.shape[1]\n return (int(width / 6), int((height / 4) * 3)), (int((width / 6) * 5), int((height / 4) * 3))", "def polarization_ellipse(self):\n self.ellipse = {}\n self.ellipse['d_lin'] = sqrt(self.Q**2 + self.U**2)/self.I\n self.ellipse['d_cir'] = abs(self.V)/self.I\n self.ellipse['d'] = sqrt(self.Q**2 + self.U**2 + self.V**2)/self.I\n if self.Q:\n self.ellipse['theta'] = 0.5*atan(self.U/self.Q)\n else:\n self.ellipse['theta'] = float('NaN')\n self.logger.debug(\"polarization_ellipse: theta = %f\",\n self.ellipse['theta'])\n\n if (self.Q**2 + self.U**2):\n self.ellipse['beta'] = 0.5*atan(self.V/sqrt(self.Q**2 + self.U**2))\n if self.V:\n self.ellipse['eccen'] = tan(self.ellipse['beta'])\n else:\n self.ellipse['eccen'] = 0.\n else:\n self.ellipse['beta'] = pi/4\n self.ellipse['eccen'] = 1.\n self.logger.debug(\"polarization_ellipse: beta = %f\",\n self.ellipse['beta'])\n self.logger.debug(\"polarization_ellipse: eccen = %f\",\n self.ellipse['eccen'])", "def naturalAspectRatio(self):\n return math.sin(self.view_angle_h) / math.sin(self.view_angle_v)", "def harzburgite():\n\n rho = 3200.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 226.5; C[0,1] = 75.34; C[0,2] = 74.73; C[0,3] = -0.27; C[0,4] = -2.00; C[0,5] = 1.85\n C[1,0] = C[0,1]; C[1,1] = 242.8; C[1,2] = 73.68; C[1,3] = -3.6; C[1,4] = -1.91; C[1,5] = 4.14\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.; C[2,3] = -4.36; C[2,4] = -4.27; C[2,5] = -0.27\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 80.75; C[3,4] = 1.81; C[3,5] = -2.19\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 76.94; C[4,5] = -1.88\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.15\n\n return C, rho", "def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def even_pODF_opt_grad(angles, *args):\n\n qpoints = args[0]\n c = args[1]\n N = args[2]\n\n n,m = qpoints.shape\n\n\n theta,phi = angles[0], angles[1]\n omega = np.array([np.sin(theta)*np.cos(phi),np.sin(theta)*np.sin(phi),np.cos(theta)])\n\n #Partial in theta direction\n sum = 0.0\n for i in range(n):\n mu = np.dot(omega,qpoints[i,:])\n mu = np.clip(mu, -1.0, 1.0)\n\n r_i, theta_i, phi_i = car2sph(qpoints[i,0],qpoints[i,1],qpoints[i,2])\n\n sum += c[i]*even_kernel_der(mu, N)*(-np.cos(theta_i)*np.cos(theta) + np.cos(phi - phi_i)*np.cos(theta)*np.sin(theta_i))\n \n p_theta = sum\n\n #Partial in phi direction\n sum = 0.0\n for i in range(n):\n mu = np.dot(omega,qpoints[i,:])\n mu = np.clip(mu, -1.0, 1.0)\n\n r_i, theta_i, phi_i = car2sph(qpoints[i,0],qpoints[i,1],qpoints[i,2])\n\n sum += c[i]*even_kernel_der(mu, N)*( -np.sin(phi - phi_i)*np.sin(theta)*np.sin(theta_i) )\n\n p_phi = sum\n\n\n\n return -(N + 1)**2 * np.array([p_theta,p_phi])", "def Z(phi = None):\n if phi == None:\n return sz\n else:\n return scipy.linalg.expm(-1j * phi / 2 * sz)" ]
[ "0.6358747", "0.6289955", "0.626587", "0.6029137", "0.5908361", "0.5868645", "0.5784191", "0.57483953", "0.5706465", "0.5689375", "0.5624956", "0.5610132", "0.5609869", "0.560806", "0.5597778", "0.5587196", "0.5573857", "0.5553297", "0.55513024", "0.55361223", "0.5525677", "0.5524676", "0.5512775", "0.54997075", "0.54997075", "0.5473307", "0.5463591", "0.54411703", "0.5392926", "0.53810036", "0.5334575", "0.53326035", "0.5332575", "0.53288555", "0.52954066", "0.52948934", "0.5279783", "0.527597", "0.52718157", "0.52653605", "0.5260742", "0.52449775", "0.5236705", "0.52352196", "0.5227551", "0.5225471", "0.5220178", "0.5213081", "0.5210498", "0.5191963", "0.5188079", "0.5186847", "0.5182337", "0.51781994", "0.51754403", "0.51722395", "0.51722395", "0.5169634", "0.5159464", "0.5156836", "0.51489466", "0.5130411", "0.5129928", "0.51196", "0.5103687", "0.5090153", "0.50878483", "0.50751424", "0.50717133", "0.5054782", "0.50501627", "0.5049759", "0.50451624", "0.5042542", "0.50423855", "0.5033244", "0.5024257", "0.5017658", "0.50094086", "0.49985057", "0.49975812", "0.4997444", "0.49908587", "0.4988806", "0.49866837", "0.49838376", "0.49792963", "0.4973685", "0.49730644", "0.49718505", "0.49704415", "0.4967745", "0.49656367", "0.49594072", "0.4958978", "0.4952184", "0.49454445", "0.49415722", "0.4940041", "0.4939701" ]
0.740137
0
Computes the parameters of the inverse inner orientation transformation
def ComputeInverseInnerOrientation(self): a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]]) mat = la.inv(mat) return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def getInverseMatrix(self) -> CMatrix4:\n ...", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n # TODO - your code here\n inverse = []\n if self.h == 1:\n temp = []\n temp.append(1/self.g[0][0])\n inverse.append(temp)\n else:\n identity_matrix = identity(self.h)\n det_term = 1/self.determinant()\n trace_term = self.trace()\n # implement intermediate scaling step locally\n # trace_x_I = trace_term * identity_matrix\n trace_x_I = []\n for i in range(len(self.g)):\n temp_row = []\n for j in range(len(self.g[i])):\n temp_row.append(trace_term * identity_matrix[i][j])\n trace_x_I.append(temp_row)\n # implement sub-traction locally\n # sub_term = trace_x_I - self.g\n sub_term = []\n for i in range(len(trace_x_I)):\n temp_row = []\n for j in range(len(trace_x_I[i])):\n temp_row.append(trace_x_I[i][j] - self.g[i][j])\n sub_term.append(temp_row)\n # implement final scaling step locally\n # inverse = det_term * sub_term\n inverse = []\n for i in range(len(sub_term)):\n temp_row = []\n for j in range(len(sub_term[i])):\n temp_row.append(det_term * sub_term[i][j])\n inverse.append(temp_row)\n return Matrix(inverse)\n # TODO - your code here", "def get_inverse_affine_param(affine_param,dim=3):\n\n affine_param = affine_param.view(affine_param.shape[0], dim+1, dim)\n inverse_param = torch.zeros_like(affine_param.data).to(affine_param.device)\n for n in range(affine_param.shape[0]):\n tm_inv = torch.inverse(affine_param[n, :dim,:])\n inverse_param[n, :dim, :] = tm_inv\n inverse_param[n, dim, :] = - torch.matmul(tm_inv, affine_param[n, dim, :])\n inverse_param = inverse_param.contiguous().view(affine_param.shape[0], -1)\n return inverse_param", "def inverse(self):\n rotation_matrix = self.pose_mat[:3, :3]\n translation_vector = self.pose_mat[:3, 3]\n\n rot = np.transpose(rotation_matrix)\n trans = - np.matmul(np.transpose(rotation_matrix), translation_vector)\n return Transformation(rot, trans)", "def _inverse(self, y):\n d = self._compute_shared(y=y)\n rely = y - d.y_k # tf.where(d.out_of_bounds, tf.zeros_like(y), y - d.y_k)\n term2 = rely * (d.d_kp1 + d.d_k - 2 * d.s_k)\n # These terms are the a, b, c terms of the quadratic formula.\n a = d.h_k * (d.s_k - d.d_k) + term2\n b = d.h_k * d.d_k - term2\n c = -d.s_k * rely\n # The expression used here has better numerical behavior for small 4*a*c.\n relx = tf.where(\n tf.equal(rely, 0), tf.zeros_like(a),\n (2 * c) / (-b - tf.sqrt(b**2 - 4 * a * c)))\n return relx * d.w_k + d.x_k #tf.where(d.out_of_bounds, y, relx * d.w_k + d.x_k)", "def inverse(self, x, y):", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def inverse_transform(self, y: Array2D) -> Array2D:", "def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD3_GetInverse(self, *args)", "def inverse(self, y):\n device = y.device\n return t.einsum('ij,k,kj->ik', y, 1. / t.sqrt(self.eig).to(device), self.rot.to(device))", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def inverse(self, ys):\n with torch.no_grad():\n xs = torch.matmul(ys, torch.diag(torch.reciprocal(torch.exp(self.scaling_diag))))\n xs = self.layer4.inverse(xs)\n xs = self.layer3.inverse(xs)\n xs = self.layer2.inverse(xs)\n xs = self.layer1.inverse(xs)\n return xs", "def intrinsic_matrix_inv(self) -> np.ndarray:\n\n # determinant of top left of intrinsic matrix\n tldet = self.kx * self.ky\n\n return np.array([[1 / self.kx, -self.kxy / tldet, (self.py * self.kxy - self.ky * self.px) / tldet],\n [0, 1 / self.ky, -self.py / self.ky]])", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def complex_inverse(c1,cr):", "def affine_transform_inverse(np_transform):\n rotation = np_transform[:3, :3]\n translation = np_transform[:3, 3]\n rotation_inv = numpy.linalg.inv(rotation)\n translation_inv = -1 * numpy.dot(rotation_inv, translation)\n result = numpy.identity(4)\n result[:3, :3] = rotation_inv\n result[:3, 3] = translation_inv.flatten()\n return result", "def posdef_inv_matrix_inverse(tensor, identity, damping):\n return tf.matrix_inverse(tensor + damping * identity)", "def inverse(self):\n # find the determinant of the matrix\n determinant = self.determinant()\n # find the matrix of minors of the matrix\n matrix_of_minors = self.matrix_of_minors()\n # find the cofactor of the matrix of minors\n cofactor_matrix = self.cofactor_matrix(matrix_of_minors)\n # find the transpose of the cofactor matrix\n transpose_cofactor_matrix = self.transpose(cofactor_matrix)\n # find the adjugate (inverse) matrix\n inverse_matrix = self.adjugate_matrix(determinant, transpose_cofactor_matrix)\n\n return inverse_matrix", "def inverse(self):\n if not self.is_square():\n raise(ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n inverse = [[1/self.g[0][0]]];\n else:\n a = self.g[0][0];\n b = self.g[0][1];\n c = self.g[1][0];\n d = self.g[1][1];\n if(a*d==b*c):\n raise ValueError('matrix does not have a inverse!');\n else:\n weigh = 1/(a*d-b*c);\n inverse = [[weigh*d,weigh*-1*b],[weigh*-1*c,weigh*a]];\n return Matrix(inverse);", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse(self):\n # TODO\n # detA\n if not self.is_square():\n raise(\n ValueError, \"Non-square Matrix does not have an inverse.\")\n if self.h > 2:\n raise(\n NotImplementedError, \"inversion not implemented for matrices larger than 2x2.\")\n\n mD = self.determinant()\n if self.h == 1:\n if self.g[0][0] = 0:\n raise(NotImplementedError,\n \"The 1x1 Matrix contains 0 can't inverse\")\n else:\n return [[1 / self.g[0][0]]] \n for i in range(self.h): # Calculates the inverse of a 2x2 Matrix.\n my_Matrix = zeroes(2, 2)\n my_Matrix.g[1][1] = self.g[0][0] / mD\n my_Matrix.g[0][0] = self.g[1][1] / mD\n my_Matrix.g[0][1] = - self.g[0][1] / mD\n my_Matrix.g[1][0] = - self.g[1][0] / mD\n return my_Matrix\n\n # trace A\n # 与矩阵TraceA * I identity 单位矩阵", "def get_inverse_2x2(u, v):\n if not is_linearly_independent_2x2(u, v):\n return\n uv = get_uv(u, v)\n iden = get_uv([1, 0],[0, 1])\n a = np.zeros((2, 4))\n for i in range(2):\n for j in range(2):\n a[i][j] = uv[i][j]\n a[i][j+2] = iden[i][j]\n\n q = a[0][1] / a[1][1]\n a[0] = a[0] - q * a[1]\n\n q = a[1][0] / a[0][0]\n a[1] = a[1] - q * a[0]\n\n a[0] /= a[0][0]\n\n a[1] /= a[1][1]\n\n for i in range(2):\n for j in range(2):\n uv[i][j] = a[i][j+2]\n return uv", "def generate_inverse(self, coords):\n\n # Create list\n inv_nets = [False]*self.dpar\n inv_outputs = []\n\n # Iterate through equation parameters\n for pp in range(self.dpar):\n\n # Hidden parameter is a constant\n if self.inverse[pp]=='const':\n cte = keras.layers.Lambda(lambda x: 0*x[:,0:1]+1)(coords)\n ini = keras.initializers.Constant(value=self.eq_params[pp])\n hid = keras.layers.Dense(1,\n kernel_initializer=ini,\n use_bias=False)(cte)\n inv_outputs.append(hid)\n\n # Hidden parameter is a field\n elif self.inverse[pp]:\n if len(self.inverse[pp][0])==1:\n ii = self.inverse[pp][0][0]\n inps = coords[:,ii:ii+1]\n else:\n inps = keras.layers.concatenate(\n [coords[:,ii:ii+1] for ii in self.inverse[pp][0]])\n if self.norm_in:\n inps = keras.layers.Lambda(self.norm)(inps)\n hidden = inps\n for ii in range(self.inverse[pp][1]):\n hidden = keras.layers.Dense(self.inverse[pp][2])(hidden)\n if self.activation=='adaptive_layer':\n self.act_fn = AdaptiveAct()\n hidden = self.act_fn(hidden)\n if self.p_drop:\n hidden = keras.layers.Dropout(self.p_drop)(hidden)\n func = keras.layers.Dense(1)(hidden)\n inv_outputs.append(func)\n return inv_outputs", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def test__inverse_transform_continuous(self):", "def _inverse(self, x):\n alpha, beta = self._get_alpha_beta()\n diff = x - self.x0\n r = tf.linalg.norm(diff, axis=-1, keepdims=True)\n h = 1. / (alpha + r)\n beta_h = beta * h\n return x + beta_h * diff", "def build_inverse_covariance(self):\n return np.linalg.inv(self.cov)", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def ssc.inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inverseN(self):\r\n result = Matrix(self.rows, self.columns)\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n result.mat[r][c] = self.cofactor(r, c)\r\n result.out()\r\n result = result.transpose()\r\n det = self.determinant()\r\n print(\"1/(\" + str(det) + \")\")\r\n result.out()\r\n return result", "def inverse(self):\n invr = np.linalg.inv(self.affine_matrix)\n return SymmOp(invr)", "def inv(transform_matrix):\n\n r = transform_matrix[0:3, 0:3]\n t = transform_matrix[0:3, 3]\n t_inv = -1 * r.T.dot(t)\n transform_inv = np.eye(4)\n transform_inv[0:3, 0:3] = r.T\n transform_inv[0:3, 3] = t_inv\n\n return transform_inv", "def OAVolterra_inverse(pz,wD,dt,Nt):\n # INITIALIZATION ------------------------------------------------------\n p0 = np.zeros(Nt) # oa signal at detection point\n K0 = wD # oa propagator: K(0,0) \n K1 = wD*np.exp(-wD*dt) # oa propagator: K(1,0) \n K1_K0 = np.exp(-wD*dt) # quotient: K(i+1)/K(i)\n\n # SOLVE INVERSE PROBLEM VIA RECURRENCE RELATION -----------------------\n I = 0 \n p0[0] = pz[0] \n for i in range(1,Nt):\n # USE INFO FROM RECONSTRUCTION STEP i-1 TO COMPUTE p0[i] ----------\n p0[i] = (pz[i] + (I + 0.5*dt*K0*p0[i-1])*K1_K0)/(1.-0.5*dt*K0)\n # ADVANCE DIFFRACTION TERM TO NEXT TIMESTEP -----------------------\n I = I*K1_K0 + 0.5*dt*(K1*p0[i-1] + K0*p0[i])\n return p0", "def inverse(self, u: Tensor, covariates: Tensor) -> Tensor:\n return self.real_nvp.inverse(u, covariates)", "def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T", "def inverse_cayley_transform(z: torch.Tensor) -> torch.Tensor:\n identity = identity_like(z)\n i_identity = multiply_by_i(identity)\n\n z_minus_id = z - i_identity\n inv_z_plus_id = inverse(z + i_identity)\n return z_minus_id @ inv_z_plus_id", "def inv(self):\n return MoebGen(self._d / self._det, - self._b / self._det, - self._c / self._det, self._a / self._det)", "def __invert__(self):\n a = self.angle\n x, y = Vector.cartesian([1, a])\n return Vector(x, y)", "def test_inverse_transform(self):", "def inverse(self):\n return RZGate(-self.params[0])", "def inverse(self):\n self.check_square()\n\n\n N = self.rows\n\n inverse = make_matrix(N, N)\n\n # Solve on a per-column basis using Ax = b formalism\n for j in range(N):\n b = make_matrix(N, 1)\n b[j, 0] = 1\n\n x = self.solve_linear_system(b)\n\n for i in range(N):\n inverse[i, j] = x[i, 0]\n\n return inverse", "def _r_inv(self):\n raise NotImplementedError", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def inverse(self,mat):\n result = np.linalg.inv(mat)\n self.out = result\n return self.out", "def inverse_rigid_trans(Tr): \n inv_Tr = np.zeros_like(Tr) # 3x4\n inv_Tr[0:3,0:3] = np.transpose(Tr[0:3,0:3])\n inv_Tr[0:3,3] = np.dot(-np.transpose(Tr[0:3,0:3]), Tr[0:3,3])\n return inv_Tr", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def rot_inv(self):\n if not hasattr(self, '_rot_inv'):\n self._rot_inv=np.linalg.inv(self.rot)\n return self._rot_inv", "def convert_pose_inverse_transform(pose):\n translation = np.zeros((4,1))\n translation[0] = -pose.position.x\n translation[1] = -pose.position.y\n translation[2] = -pose.position.z\n translation[3] = 1.0\n\n rotation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n euler_angle = tr.euler_from_quaternion(rotation)\n rotation = np.transpose(tr.rotation_matrix(euler_angle[2], [0,0,1])) # the angle is a yaw\n transformed_translation = rotation.dot(translation)\n\n translation = (transformed_translation[0], transformed_translation[1], transformed_translation[2])\n rotation = tr.quaternion_from_matrix(rotation)\n return (translation, rotation)", "def inverse(self):\n q_vector = np.zeros(4)\n q_vector[:3] = self.imaginary*-1\n q_vector[3] = self.w\n return Quat(q_vector,\"xyzw\")", "def inverse(self):\n return Rotation(self.matrix.transposed())", "def params_2D_3D(alpha, sx, sy, mirror):\n\tphi = 0\n\tpsi = 0\n\ttheta = 0\n\talphan, s2x, s2y, scalen = compose_transform2(0, sx, sy, 1, -alpha, 0, 0, 1)\n\tif mirror > 0:\n\t\tphi = (540.0 + phi)%360.0\n\t\ttheta = 180.0 - theta\n\t\tpsi = (540.0 - psi + alphan)%360.0\n\telse:\n\t\tpsi = (psi + alphan)%360.0\n\treturn phi, theta, psi, s2x, s2y", "def inverse_matrice(T):\n a,b,c,d = T[0][0],T[0][1],T[1][0],T[1][1]\n det = a*d-b*c\n aa,bb,cc,dd = d/det,-b/det,-c/det,a/det\n Tinv = [[aa,bb],[cc,dd]]\n return Tinv", "def dispersion_inverse(self, E):\n if self._ksign is None:\n self._ksign = np.random.choice([-1, 1])\n return np.sqrt(\n 2*self.material.m_star_m * (\n self.material.z + self.ksign*np.sqrt(E**2 - 1)\n )\n ) / self.material.hbar_m", "def inv(X):\n R, t = Rt(X)\n Ri = R.T\n return np.concatenate((\n np.concatenate((Ri, -Ri.dot(t)[:,np.newaxis]), axis=1),\n np.array([[0, 0, 1]])))", "def support_inverse(rho):\n return LA.pinv(rho)", "def inv(self, y):\n pass", "def inverse(\n self, input: Tensor, params: Optional[List[ParamItem]] = None, extra_args: Dict[str, Any] = {}\n ) -> Tensor:\n if params is None:\n if self._params is not None:\n params = self._params\n else:\n raise RuntimeError(\"No valid params to inverse the transformation.\")\n\n return self.inverse_inputs(input, params, extra_args=extra_args)", "def _inverse_edges(edges: np.array) -> np.array:\n inversed_edges = edges.copy()\n inversed_edges[:, [0, 1]] = inversed_edges[:, [1, 0]]\n return inversed_edges", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def invert(self, *args, **kwargs):\n with_units = kwargs.pop('with_units', False)\n\n if not utils.isnumerical(args[0]):\n args = self.output_frame.coordinate_to_quantity(*args)\n if self.output_frame.naxes == 1:\n args = [args]\n try:\n if not self.backward_transform.uses_quantity:\n args = utils.get_values(self.output_frame.unit, *args)\n except (NotImplementedError, KeyError):\n args = utils.get_values(self.output_frame.unit, *args)\n\n if 'with_bounding_box' not in kwargs:\n kwargs['with_bounding_box'] = True\n\n if 'fill_value' not in kwargs:\n kwargs['fill_value'] = np.nan\n\n try:\n # remove iterative inverse-specific keyword arguments:\n akwargs = {k: v for k, v in kwargs.items() if k not in _ITER_INV_KWARGS}\n result = self.backward_transform(*args, **akwargs)\n except (NotImplementedError, KeyError):\n result = self.numerical_inverse(*args, **kwargs, with_units=with_units)\n\n if with_units and self.input_frame:\n if self.input_frame.naxes == 1:\n return self.input_frame.coordinates(result)\n else:\n return self.input_frame.coordinates(*result)\n else:\n return result", "def _inv22_vectorized(M):\n assert (M.ndim == 3)\n assert (M.shape[-2:] == (2, 2))\n M_inv = np.empty_like(M)\n delta_inv = np.reciprocal(M[:, 0, 0]*M[:, 1, 1] - M[:, 0, 1]*M[:, 1, 0])\n M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv\n M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv\n M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv\n M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv\n return M_inv", "def numerical_inverse(self, *args, **kwargs):\n tolerance = kwargs.get('tolerance', 1e-5)\n maxiter = kwargs.get('maxiter', 50)\n adaptive = kwargs.get('adaptive', True)\n detect_divergence = kwargs.get('detect_divergence', True)\n quiet = kwargs.get('quiet', True)\n with_bounding_box = kwargs.get('with_bounding_box', True)\n fill_value = kwargs.get('fill_value', np.nan)\n with_units = kwargs.pop('with_units', False)\n\n if not utils.isnumerical(args[0]):\n args = self.output_frame.coordinate_to_quantity(*args)\n if self.output_frame.naxes == 1:\n args = [args]\n args = utils.get_values(self.output_frame.unit, *args)\n\n args_shape = np.shape(args)\n nargs = args_shape[0]\n arg_dim = len(args_shape) - 1\n\n if nargs != self.world_n_dim:\n raise ValueError(\"Number of input coordinates is different from \"\n \"the number of defined world coordinates in the \"\n f\"WCS ({self.world_n_dim:d})\")\n\n if self.world_n_dim != self.pixel_n_dim:\n raise NotImplementedError(\n \"Support for iterative inverse for transformations with \"\n \"different number of inputs and outputs was not implemented.\"\n )\n\n # initial guess:\n if nargs == 2 and self._approx_inverse is None:\n self._calc_approx_inv(max_inv_pix_error=5, inv_degree=None)\n\n if self._approx_inverse is None:\n if self.bounding_box is None:\n x0 = np.ones(self.pixel_n_dim)\n else:\n x0 = np.mean(self.bounding_box, axis=-1)\n\n if arg_dim == 0:\n argsi = args\n\n if nargs == 2 and self._approx_inverse is not None:\n x0 = self._approx_inverse(*argsi)\n if not np.all(np.isfinite(x0)):\n return [np.array(np.nan) for _ in range(nargs)]\n\n result = tuple(self._vectorized_fixed_point(\n x0, argsi,\n tolerance=tolerance,\n maxiter=maxiter,\n adaptive=adaptive,\n detect_divergence=detect_divergence,\n quiet=quiet,\n with_bounding_box=with_bounding_box,\n fill_value=fill_value\n ).T.ravel().tolist())\n\n else:\n arg_shape = args_shape[1:]\n nelem = np.prod(arg_shape)\n\n args = np.reshape(args, (nargs, nelem))\n\n if self._approx_inverse is None:\n x0 = np.full((nelem, nargs), x0)\n else:\n x0 = np.array(self._approx_inverse(*args)).T\n\n result = self._vectorized_fixed_point(\n x0, args.T,\n tolerance=tolerance,\n maxiter=maxiter,\n adaptive=adaptive,\n detect_divergence=detect_divergence,\n quiet=quiet,\n with_bounding_box=with_bounding_box,\n fill_value=fill_value\n ).T\n\n result = tuple(np.reshape(result, args_shape))\n\n if with_units and self.input_frame:\n if self.input_frame.naxes == 1:\n return self.input_frame.coordinates(result)\n else:\n return self.input_frame.coordinates(*result)\n else:\n return result", "def evert(self):\n for e in self.edges:\n self.invert()\n for f in self.faces:\n f.invert()", "def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img", "def rotation_inv(R: np.array) -> np.array:\n return R.T", "def inverse_transform(self, matrix):\n\n x = matrix.shape[0]\n y = matrix.shape[1]\n N = x\n\n # Inverse Fourier Transform matrix:\n ift = np.zeros([x, y], complex)\n\n for i in range(0, x):\n for j in range(0, y):\n sum_ift = 0\n for u in range(0, x):\n for v in range(0, y):\n sum_ift = sum_ift + matrix[u, v] * (np.cos(((2 * np.pi) / N) * (u * i + v * j)) + 1j * np.sin(((2 * np.pi) / N) * (u * i + v * j)))\n\n ift[i, j] = sum_ift\n\n\n return ift/(x*x)", "def inv(self):\n\t\tdeterminant = self.det()\n\t\tif determinant:\n\t\t\treturn self.adj() / determinant\n\t\telse:\n\t\t\traise ValueError(\"Not Invertible\")", "def inverse_transform(v):\n v, k = divmod(v - 1, N)\n v, j = divmod(v, N)\n v, i = divmod(v, N)\n return i, j, k", "def toeplitz_inverse_multiplication_prep(T_column):\n \n phi=1\n psi=2\n assert phi != 0\n assert psi != 0\n assert phi != psi\n \n n = len(T_column)\n \n x = levinson(T_column, np.concatenate( (np.array([1]), np.zeros((n-1,))) ) )\n y = levinson(T_column, np.concatenate( (np.zeros((n-1,)), np.array([1])) ) )\n\n \n \n x_0 = x[0]\n \n D_phi = (phi**(1/n))**np.arange(0,n)\n D_psi = (psi**(1/n))**np.arange(0,n)\n\n Lambda_1 = fft(D_psi*x)\n Lambda_2 = fft(D_phi*np.concatenate(([phi*y[-1]], y[0:-1])))\n Lambda_3 = fft(D_psi*np.concatenate(([psi*y[-1]], y[0:-1])))\n Lambda_4 = fft(D_phi*x)\n \n return (x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4)", "def __truediv__(self, o): \n return self * o.inv()", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def __invert__(self):\n from sage.matrix.constructor import matrix\n from .comp import Components\n if self._is_identity:\n return self\n if self._inverse is None:\n if self._name is None:\n inv_name = None\n else:\n inv_name = self._name + '^(-1)'\n if self._latex_name is None:\n inv_latex_name = None\n else:\n inv_latex_name = self._latex_name + r'^{-1}'\n fmodule = self._fmodule\n si = fmodule._sindex\n nsi = fmodule._rank + si\n self._inverse = self.__class__(fmodule, inv_name, inv_latex_name)\n for basis in self._components:\n try:\n mat = self.matrix(basis)\n except (KeyError, ValueError):\n continue\n mat_inv = mat.inverse()\n cinv = Components(fmodule._ring, basis, 2, start_index=si,\n output_formatter=fmodule._output_formatter)\n for i in range(si, nsi):\n for j in range(si, nsi):\n cinv[i, j] = mat_inv[i-si,j-si]\n self._inverse._components[basis] = cinv\n self._inverse._inverse = self\n return self._inverse", "def inverse(self):\n return self.solve(Matrix.I(self.nrows))", "def toeplitz_inverse_multiplication(u, x_0, phi, psi, D_phi, D_psi, Lambda_1, Lambda_2, Lambda_3, Lambda_4):\n\n y = fft(D_phi*u)\n a = Lambda_1*fft(D_psi*(1/D_phi)*ifft(Lambda_2*y))\n b = Lambda_3*fft(D_psi*(1/D_phi)*ifft(Lambda_4*y))\n y = (1/D_psi)*real(ifft(a-b))/(x_0*(phi-psi))\n \n return y", "def MPinv(list_of_ch,direction, angle, azimuth):\n\n\n \"\"\"~~~~~~~~~~~ Input conditions ~~~~~~~~~~~~~~\"\"\"\n ch_list = list_of_ch\n direction_deg = float(direction) #inclined direction of wellbore from North\n angle_deg = float(angle) # inclined angle of well \n azimuth_deg = float(azimuth) # core orientation from North or inclined direction \n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n azimuth_deg = azimuth_deg - 45\n\n \"\"\"~~~~~~~~~~~ Allocate numbers to each direction (for example, xx => 0, xy => 3 etc...) ~~~~~~~~~~~~~~\"\"\"\n ch_col = ch_list.columns.values\n\n if \"xx\" in ch_col: ch_list.at[\"ch_no\",\"xx\"] =0\n if \"yy\" in ch_col: ch_list.at[\"ch_no\",\"yy\"] =1\n if \"zz\" in ch_col: ch_list.at[\"ch_no\",\"zz\"] =2\n if \"xy\" in ch_col: ch_list.at[\"ch_no\",\"xy\"] =3\n if \"yx\" in ch_col: ch_list.at[\"ch_no\",\"yx\"] =4\n if \"yz\" in ch_col: ch_list.at[\"ch_no\",\"yz\"] =5\n if \"zy\" in ch_col: ch_list.at[\"ch_no\",\"zy\"] =6\n if \"zx\" in ch_col: ch_list.at[\"ch_no\",\"zx\"] =7\n if \"xz\" in ch_col: ch_list.at[\"ch_no\",\"xz\"] =8\n\n ch = ch_list.loc[\"ch_no\",:].values\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n Number_of_vector = len(ch)\n No_v = Number_of_vector\n direction_rad = direction_deg*pi*180**(-1) \n angle_rad = angle_deg*pi*180**(-1) \n azimuth_rad = azimuth_deg*pi*180**(-1) \n\n\n \"\"\"~~~~~~~~ Create matrix of Direction Cosine vectors~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n c=np.cos(0.25*pi)\n s=np.sin(0.25*pi)\n n = np.zeros((3,9))\n\n n[:,0] = np.array([1,0,0])\n n[:,1] = np.array([0,1,0])\n n[:,2] = np.array([0,0,1])\n n[:,3] = np.array([c,s,0])\n n[:,4] = np.array([c,-s,0])\n n[:,5] = np.array([0,c,s])\n n[:,6] = np.array([0,c,-s])\n n[:,7] = np.array([c,0,s])\n n[:,8] = np.array([-c,0,s])\n\n\n \"\"\"~~~~~~~~~~~~~~ coordinate transformation from 'ASR local co-ordinate' to 'Geological co-ordinate' ~~~~~~~~~~~~~~~~~\"\"\"\n cdr = np.cos(direction_rad)\n sdr = np.sin(direction_rad)\n\n caz = np.cos(azimuth_rad)\n saz = np.sin(azimuth_rad)\n\n can = np.cos(angle_rad)\n san = np.sin(angle_rad)\n\n Rdr = np.array([[cdr, sdr, 0],[-sdr, cdr, 0],[0, 0, 1]]) #counter_clockwise\n Ran = np.array([[1, 0, 0],[0, can, san],[0, -san, can]])\n Raz = np.array([[caz, -saz, 0],[saz, caz, 0],[0, 0, 1]])\n\n R1 = Ran.dot(Rdr)\n R2 = Raz.dot(R1)\n\n for i in range(0,9):\n n[:,i] = R2.dot(n[:,i])\n n= np.round(n,6)\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n\n\n\n \"\"\"~~~~~~~~ Create matrix A (b = Ax: b;Observed normal strain data, x;strain tensor component which we have to determine) ~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n X = np.empty((No_v,6))\n\n for i in range(0,No_v):\n cc = ch[i]\n X[i,:] = np.array([n[0,cc]**2, n[1,cc]**2, n[2,cc]**2, 2*n[0,cc]*n[1,cc], 2*n[1,cc]*n[2,cc], 2*n[2,cc]*n[0,cc]])\n \"\"\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \"\"\"\n\n\n X_inv = np.linalg.pinv(X) # Calculate Moore-Penrose inverse matrix\n\n return X_inv", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def inverse(self) -> np.ndarray:\n if self.is_versor():\n return self.conjugate\n return self.conjugate / np.linalg.norm(self.q)", "def _inverse_kinematics(self):\n q_2 = np.arccos(\n (\n (self._x_obj_0 - self._jnt_lengths[2]) ** 2\n + self._y_obj_0 ** 2\n - self._jnt_lengths[0] ** 2\n - self._jnt_lengths[1] ** 2\n )\n / (2 * self._jnt_lengths[0] * self._jnt_lengths[1])\n )\n psi = np.arcsin(\n self._jnt_lengths[1] * np.sin(q_2)\n / np.sqrt(\n (self._x_obj_0 - self._jnt_lengths[2]) ** 2\n + self._y_obj_0 ** 2\n )\n )\n q_1 = (np.arctan2(self._x_obj_0 - self._jnt_lengths[2], self._y_obj_0)\n - psi)\n q_3 = np.pi / 2 - q_1 - q_2\n return np.array([q_1, q_2, q_3])", "def dispersion_inverse(self, E):\n return E / (self.material.hbar_m * self.material.c_s)", "def inverseIntermediateJac(self,x):\n \n Ri = self._rotation.T\n si = (1./self._scaled).reshape((1,1,self._dim))\n \n Jac = self.intermediateJacPol2Rot(x)\n \n #Ri.J\n Jac = np.einsum(\"jk,ikl->ijl\",Ri,Jac)\n #(Ri.J).diag(si)\n Jac *= si\n \n return Jac", "def inverse(self: Float[LinearOperator, \"*batch N N\"]) -> Float[LinearOperator, \"*batch N N\"]:\n return self.__class__(self._diag.reciprocal())", "def inverse(self):\n ss = self._sum_of_squares()\n if ss > 0:\n return self.__class__(array=(self._vector_conjugate() / ss))\n else:\n raise ZeroDivisionError(\"a zero quaternion (0 + 0i + 0j + 0k) cannot be inverted\")", "def invert(x):\n return linalg.inv(x)" ]
[ "0.63808596", "0.6343662", "0.63060564", "0.6252362", "0.61781716", "0.61693305", "0.61612785", "0.61262125", "0.61208826", "0.6098615", "0.609339", "0.60388464", "0.59626293", "0.59568155", "0.5946826", "0.5945765", "0.59148675", "0.5901364", "0.5894338", "0.5879606", "0.5866621", "0.5846758", "0.5840087", "0.5840087", "0.5840087", "0.5840087", "0.5840087", "0.5833871", "0.58016366", "0.5789425", "0.5733911", "0.57310665", "0.5730685", "0.57143193", "0.57123756", "0.57119614", "0.57080483", "0.5699251", "0.5698159", "0.5690242", "0.56726503", "0.56577337", "0.56426674", "0.56364226", "0.5636226", "0.56362206", "0.56358665", "0.563414", "0.5604714", "0.5603587", "0.5599135", "0.55941737", "0.55838495", "0.5582931", "0.55777097", "0.5572919", "0.55698544", "0.55698204", "0.5555619", "0.5529667", "0.5528338", "0.5523666", "0.5521346", "0.55118716", "0.5504881", "0.5503237", "0.54974544", "0.5494002", "0.54914665", "0.5489234", "0.5477827", "0.5470967", "0.5470418", "0.5461463", "0.5461243", "0.545371", "0.54422855", "0.5442276", "0.54403985", "0.5439962", "0.54390264", "0.5436044", "0.5419815", "0.5416801", "0.5412715", "0.54122716", "0.54086965", "0.5405417", "0.5404869", "0.53984505", "0.5390445", "0.5389796", "0.5380143", "0.53707486", "0.5368528", "0.5364865", "0.53606755", "0.53594494", "0.5357433", "0.5343636" ]
0.7558201
0
Transforms camera points to image points
def CameraToImage(self, cameraPoints): # setting up the required matrices a0 = self.innerOrientationParameters[0] b0 = self.innerOrientationParameters[1] a1 = self.innerOrientationParameters[2] a2 = self.innerOrientationParameters[3] b1 = self.innerOrientationParameters[4] b2 = self.innerOrientationParameters[5] if np.isscalar(a0): R = np.array([[a1, a2], [b1, b2]]) T = np.array([[a0], [b0]]) else: R = np.array([[a1[0], a2[0]], [b1[0], b2[0]]]) T = np.array([[a0[0]], [b0[0]]]) cameraPoints = cameraPoints.T # computing the transformation to the image system return (T + np.dot(R, cameraPoints)).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def imageFromCamera(self, points): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def ImageToCamera(self, imagePoints):\n inverse_pars = self.ComputeInverseInnerOrientation()\n imagePoints = imagePoints.T\n\n if imagePoints.size == 2:\n imagePoints = np.reshape(np.array(imagePoints), (np.size(imagePoints), 1))\n\n T = np.array([[inverse_pars[0]], [inverse_pars[1]]])\n R = np.array([[inverse_pars[2], inverse_pars[3]], [inverse_pars[4], inverse_pars[5]]])\n\n return (np.dot(R, imagePoints - T)).T", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def convert_image_point_to_global_coordinates(points, camera_location):\n # TODO: The camera should take photos which record the camera_location, and scale factors etc.\n # This should be a method on such an image.\n\n # Convert to numpy object for a clean notation\n points = np.array(points)\n camera_location = np.array(camera_location)\n scale_factors = np.array([config.Y_PIXELS_TO_MILLIMETRE_SCALE, config.X_PIXELS_TO_MILLIMETRE_SCALE])\n camera_resolution = np.array(config.CAMERA_RESOLUTION)\n\n # Do the computation\n image_centre = camera_resolution / 2\n return camera_location + scale_factors * (points - image_centre)", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def converte_coord(valor):\n\n pts1 = ([0,0],[24,0],[24,44],[0,44])\n pts1 = np.asarray(pts1, dtype = np.float32)\n pts2 = np.float32([[0,0],[100,0], [100,100], [0,100]])\n\n M = cv.getPerspectiveTransform(pts1,pts2)\n img2 = cv.warpPerspective(valor,M,(100,100))\n return img2", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def get_projections(self, points_in_camera_frame: ARRAY_LIKE,\n image: int = 0, temperature: Real = 0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n # ensure the input is an array\n points_in_camera_frame = np.asarray(points_in_camera_frame)\n\n # apply misalignment to the points\n if self.estimate_multiple_misalignments:\n if np.any(self.misalignment[image]): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment[image]).squeeze() @ \\\n points_in_camera_frame\n\n else:\n if np.any(self.misalignment): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment).squeeze() @ points_in_camera_frame\n\n # get the unitless image plane location\n pinhole_locations = points_in_camera_frame[:2] / points_in_camera_frame[2]\n\n # get the distorted image plane location\n image_locations = self.apply_distortion(pinhole_locations)\n\n # add the temperature based scaling\n image_locations *= self.get_temperature_scale(temperature)\n\n # get the pixel locations of the points, need to mess with transposes due to numpy broadcasting rules\n picture_locations = ((self.intrinsic_matrix[:, :2] @ image_locations).T + self.intrinsic_matrix[:, 2]).T\n\n return pinhole_locations, image_locations, picture_locations", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def transform32(points, H, add=(0, 0)):\n points = np.float32(points)\n return np.int32(cv2.perspectiveTransform(points.reshape(1, -1, 2), H).reshape(-1, 2) + add)", "def camera_to_pixel(self, X):\n raise NotImplementedError", "def get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val):\n point_3d = []\n dist_coeffs = np.zeros((4,1))\n rear_size = val[0]\n rear_depth = val[1]\n point_3d.append((-rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, -rear_size, rear_depth))\n \n front_size = val[2]\n front_depth = val[3]\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d.append((-front_size, front_size, front_depth))\n point_3d.append((front_size, front_size, front_depth))\n point_3d.append((front_size, -front_size, front_depth))\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)\n \n # Map to 2D image points\n (point_2d, _) = cv2.projectPoints(point_3d,rotation_vector,translation_vector,camera_matrix,dist_coeffs)\n point_2d = np.int32(point_2d.reshape(-1, 2))\n return point_2d", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def compute_point_perspective_transformation(matrix, list_downoids):\n # Compute the new coordinates of our points\n list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)\n transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)\n # Loop over the points and add them to the list that will be returned\n transformed_points_list = list()\n for i in range(0, transformed_points.shape[0]):\n transformed_points_list.append([transformed_points[i][0][0], transformed_points[i][0][1]])\n return transformed_points_list", "def cam2pixel(self, cam_coords, pose):\n\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b,3,-1) # [B,3,H*W]\n pcoords = pose[:,:,0:3].bmm(cam_coords_flat) + pose[:,:,3].view(b,3,1) #Bx[3x3 x 3xH*W] = [B x 3 x H*W]\n X, Y, Z = pcoords[:,0,:].clamp(-1e20,1e20), pcoords[:,1,:].clamp(-1e20,1e20), pcoords[:,2,:].clamp(1e-20,1e20) #each are [B x H*W] \n X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]\n\n X_mask = ((X_norm > 1)+(X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b,h,w,2)", "def to_camera_coords(K, px_pts):\n if len(px_pts.shape) == 1:\n px_pts = np.expand_dims(px_pts, axis=0)\n num_pts = len(px_pts)\n\n px_pts = np.concatenate([\n px_pts, np.ones((num_pts, 1))\n ], axis=-1)\n\n norm_pts = np.matmul(np.linalg.inv(K), px_pts.T)[:-1,:]\n return norm_pts.T", "def camera_to_object_transform(self):\n # form the full object to camera transform\n T_stp_camera = self.stp_to_camera_transform()\n T_obj_stp = self.object_to_stp_transform()\n T_obj_camera = T_stp_camera.dot(T_obj_stp)\n return T_obj_camera", "def compute_perspective_transform(corner_points, width, height, image):\n # Create an array out of the 4 corner points\n corner_points_array = np.float32(corner_points)\n # Create an array with the parameters (the dimensions) required to build the matrix\n img_params = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\n # Compute and return the transformation matrix\n matrix = cv2.getPerspectiveTransform(corner_points_array, img_params)\n img_transformed = cv2.warpPerspective(image, matrix, (width, height))\n return matrix, img_transformed", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n X_camera = np.matmul(R, X) + T\n X_camera = X_camera / X_camera[2, :] # Normalize\n\n if distortion_flag:\n radiusSq = (X_camera[0, :] * X_camera[0, :]) + (X_camera[1, :] * X_camera[1, :])\n X_camera = X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # X_camera = (X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # + (2 * distortion_params[2] * X_camera[0,:] * X_camera[1,:]) + distortion_params[3] * (radiusSq + (2 * X_camera * X_camera)))\n\n X_camera[2, :] = 1.0\n X_camera = np.matmul(K, X_camera)\n X_camera = X_camera[:2, :]\n\n return X_camera", "def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))", "def projectToImage(pts_3D, P):\n pts_3D = np.reshape(pts_3D, (-1, 3))\n pts_3D = np.transpose(pts_3D)\n pts_3D = np.vstack([pts_3D, 1])\n pts_2D = np.matmul(P, pts_3D)\n pts_2D = pts_2D[:2]/pts_2D[-1]\n pts_2D = np.transpose(pts_2D)\n return pts_2D", "def four_point_transform(image, pts):\n rect = order_points(pts)\n width_first = np.sqrt(\n ((rect[2][0] - rect[3][0]) ** 2) + ((rect[2][1] - rect[3][1]) ** 2)\n )\n width_second = np.sqrt(\n ((rect[1][0] - rect[0][0]) ** 2) + ((rect[1][1] - rect[0][1]) ** 2)\n )\n max_width = max(int(width_first), int(width_second))\n height_first = np.sqrt(\n ((rect[1][0] - rect[2][0]) ** 2) + ((rect[1][1] - rect[2][1]) ** 2)\n )\n height_second = np.sqrt(\n ((rect[0][0] - rect[3][0]) ** 2) + ((rect[0][1] - rect[3][1]) ** 2)\n )\n max_height = max(int(height_first), int(height_second))\n dst = np.array(\n [\n [0, 0],\n [max_width - 1, 0],\n [max_width - 1, max_height - 1],\n [0, max_height - 1],\n ],\n dtype=\"float32\",\n )\n view_transform = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, view_transform, (max_width, max_height))\n return warped", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def perspective_projection(points, rotation, translation, focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.0\n K[:, :-1, -1] = camera_center\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n projected_points = projected_points[:, :, :-1]\n return projected_points", "def transform_points(Points,R,t):\r\n return [transform_point(p,R,t) for p in Points]", "def projectToImage_kitti(pts_3D, P):\n # project in image\n mat = np.vstack((pts_3D, np.ones((pts_3D.shape[1]))))\n\n pts_2D = np.dot(P, mat)\n\n # scale projected points\n pts_2D[0, :] = pts_2D[0, :] / pts_2D[2, :]\n pts_2D[1, :] = pts_2D[1, :] / pts_2D[2, :]\n pts_2D = np.delete(pts_2D, 2, 0)\n\n return pts_2D", "def transform_points(transf_matrix, points):\n if(type(points)==list):\n temp_pts = [np.array([x[0],x[1],x[2],1]) for x in points]\n newpts = []\n for pt in temp_pts:\n newpts.append((transf_matrix@pt)[:3])\n else:\n temp_pts = np.array([points[0],points[1],points[2],1])\n newpts=(transf_matrix@temp_pts)[:3]\n return newpts", "def project_and_draw(img, X_3d, K, R, T, distortion_flag, distortion_parameters):\n # call your \"project_points\" function to project 3D points to camera coordinates\n # draw the projected points on the image and save your output image here\n # cv.imwrite(output_name, img_array)\n X_camera = project_points(X_3D,K,R,T,distortion_flag,distortion_parameters)\n\n newimg=copy.copy(img)\n color = (0, 230, 0)\n if not distortion_flag:\n color = (0,0,230)\n\n Xp = []\n Xp.append([])\n Xp.append([])\n\n for cur in range(0,np.shape(X_camera)[1]):\n x = X_camera[0,cur]\n y = X_camera[1,cur]\n z = X_camera[2,cur]\n xp = int(x/z)\n yp = int(y/z)\n Xp[0].append(xp)\n Xp[1].append(yp)\n Xp2 = np.row_stack((Xp,np.ones(len(Xp[0]))))\n if(distortion_flag):\n Xp2 = distort(Xp2,K,distortion_parameters)\n\n for cur in range(0, np.shape(X_camera)[1]):\n x = Xp2[0, cur]\n y = Xp2[1, cur]\n newimg = cv.circle(newimg, (int(x), int(y)), 2, color, 0)\n\n #cv.imshow(\"Test\",newimg)\n #cv.waitKey(0)\n\n return newimg", "def world_to_camera(self, X):\n raise NotImplementedError", "def project_to_image(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = points_proj[2,:]\n point_z = np.tile(point_depths, [3, 1])\n points_proj = np.divide(points_proj, point_z)\n if round_px:\n points_proj = np.round(points_proj)\n points_proj = points_proj[:2,:].astype(np.int16)\n\n valid_ind = np.where((points_proj[0,:] >= 0) & \\\n (points_proj[1,:] >= 0) & \\\n (points_proj[0,:] < self.width) & \\\n (points_proj[1,:] < self.height))[0]\n\n depth_data = np.zeros([self.height, self.width])\n depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind]\n return DepthImage(depth_data, frame=self.frame)", "def transform(self, previousimage):", "def transform_points(points, transf_matrix):\n if points.shape[0] not in [3, 4]:\n raise Exception(\n \"Points input should be (3,N) or (4,N) shape, received {}\".format(\n points.shape\n )\n )\n return transf_matrix.dot(np.vstack((points[:3, :], np.ones(points.shape[1]))))[\n :3, :\n ]", "def ImageToRay(self, imagePoints):\n pass # delete after implementations", "def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]\n if proj_c2p_rot is not None:\n pcoords = proj_c2p_rot.bmm(cam_coords_flat)\n else:\n pcoords = cam_coords_flat\n\n if proj_c2p_tr is not None:\n pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]\n X = pcoords[:, 0]\n Y = pcoords[:, 1]\n Z = pcoords[:, 2].clamp(min=1e-8)\n\n X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]\n if padding_mode == 'zeros':\n X_mask = ((X_norm > 1) + (X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b, h, w, 2)", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes", "def pixel2cam(self, depth, intrinsics_inv):\n b, _, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]\n ones = torch.ones(1,h,w).type_as(depth)\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n ###pixel_coords is an array of camera pixel coordinates (x,y,1) where x,y origin is the upper left corner of the image.\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).view(b,3,-1) #.contiguous().view(b, 3, -1) # [B, 3, H*W]\n #cam_coords = intrinsic_inv.expand(b,3,3).bmm(current_pixel_coords).view(b,3,h,w)\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b,3,h,w)\n return cam_coords * depth", "def camera_to_world(self, X):\n raise NotImplementedError", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def evaluate(self, points):\n points = np.array(points, np.float64)\n output_shape = points.shape[1:]\n points.shape = (points.shape[0], seq_prod(output_shape))\n cmapi = self.image.coordmap.inverse()\n voxels = cmapi(points.T).T\n V = map_coordinates(self.data,\n voxels,\n order=self.order,\n mode=self.mode,\n cval=self.cval,\n prefilter=False)\n # ndimage.map_coordinates returns a flat array,\n # it needs to be reshaped to the original shape\n V.shape = output_shape\n return V", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def get_point_coords_wrt_image(boxes_coords, point_coords):\n with torch.no_grad():\n point_coords_wrt_image = point_coords.clone()\n point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (\n boxes_coords[:, None, 2] - boxes_coords[:, None, 0]\n )\n point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (\n boxes_coords[:, None, 3] - boxes_coords[:, None, 1]\n )\n point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]\n point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]\n return point_coords_wrt_image", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def transform_points(self, points3d: np.ndarray) -> np.ndarray:\n assert self._r is not None\n assert self._t is not None\n assert isinstance(points3d, np.ndarray)\n\n if points3d.shape[1] == 6: # expunge RGB\n points3d = points3d[:, 0:3]\n points3d = points3d.transpose()\n rotation_matrix = np.empty((3, 3), dtype=float)\n rotation_as_np = np.array([self.r.w, self.r.x, self.r.y, self.r.z])\n _as_rotation_matrix_njit(rotation_as_np, rotation_matrix)\n points3d = np.add(np.matmul(rotation_matrix, points3d), self.t)\n return points3d.transpose()", "def myPerspectiveTransform(pts, H):\n\n # Clone and reshape the list of points\n new_pts = np.reshape(pts, (-1, 2))\n # Allocate a vector filled with one with size (-1, 1)\n one_vector = np.zeros((pts.shape[0], 1)) + 1\n # Concatenate the one vector to the list of points to form the homogenious coordiniate system\n new_pts = np.concatenate((new_pts, one_vector), axis=len(new_pts.shape)-1)\n\n # Perform transformation and transform results into the pixel coord. system\n # i.e., x' = x/w, and y' = y/w\n for i, pt in enumerate(new_pts):\n new_pts[i] = H.dot(pt.T)\n new_pts[i] /= new_pts[i, -1]\n\n # Return results with the same shape as the input has\n return new_pts[:, :-1].reshape(pts.shape)", "def get_warped_points(points, M):\n\n # verify that points type is ndarray, convert if not\n if type(points).__module__ != np.__name__:\n points = np.array(points)[np.newaxis, :]\n\n # reverse order of input points [y,x] -> [x,y]\n # points = points[...,::-1]\n\n # Find full affine marix\n rowM = np.array([[0, 0, 1]])\n M = np.concatenate((M, rowM), axis=0)\n\n size = len(points.shape)\n # p=points.copy() # for debug\n\n # option 1 - use cv2.perspectiveTransform()\n # cv2.perspectiveTransform() expects to receive 3D array, so we need to verify that points has 3 dimensions\n for m in range(3 - size):\n points = points[np.newaxis, ...]\n\n points_warped = cv2.perspectiveTransform(points.astype(np.float64), M)\n points_warped = np.squeeze(points_warped)\n\n # reverse order of input points [y,x] -> [x,y]\n # points_warped = points_warped[..., ::-1]\n\n '''\n # option 2 - use matrix multiplication\n # assumes points are ordered (y,x)!\n points = p # for debug\n if size == 1:\n rowP = np.ones(1)\n elif size == 2:\n rowP = np.ones(N)\n\n points = np.concatenate((points, rowP), axis=0)\n points_warped2 = np.dot(M, points)\n points_warped2 = points_warped2[:-1]\n\n diff = np.sum(np.abs(points_warped2 - points_warped)) # for debug\n '''\n return points_warped", "def pose_2d_pts(self,image):\n '''\n image- rgb image \n return:-\n pts - list of 2d pose landmarks as img coords\n image- rgb image on which the 2d pose landmarks are drawn\n ''' \n pts=[]\n imgRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n results=pose.process(imgRGB)\n if results.pose_landmarks:\n mpDraw.draw_landmarks(image,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c=image.shape\n imgx,imgy=int(lm.x*w),int(lm.y*h)\n \n pts.append((imgx,imgy)) \n return pts,image", "def GroundToImage_RzRyRz(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix_RzRyRz\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def project(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = np.tile(points_proj[2,:], [3, 1])\n points_proj = np.divide(points_proj, point_depths)\n if round_px:\n points_proj = np.round(points_proj)\n\n if isinstance(point_cloud, Point):\n return Point(data=points_proj[:2,:].astype(np.int16), frame=self._frame)\n return ImageCoords(data=points_proj[:2,:].astype(np.int16), frame=self._frame)", "def scale_camera(cam, scale=1):\n new_cam = np.copy(cam)\n # focal: \n new_cam[1][0][0] = cam[1][0][0] * scale\n new_cam[1][1][1] = cam[1][1][1] * scale\n # principle point:\n new_cam[1][0][2] = cam[1][0][2] * scale\n new_cam[1][1][2] = cam[1][1][2] * scale\n return new_cam", "def MakeCarToImageTransform(pixels_per_meter, image_ref_x, image_ref_y,\n flip_axes):\n ppm1 = 0. if flip_axes else pixels_per_meter\n ppm2 = -pixels_per_meter if flip_axes else 0.\n # pyformat: disable\n car_to_image_transform = np.array([\n [ppm1, ppm2, 0., image_ref_x],\n [ppm2, ppm1, 0., image_ref_y],\n [0., 0., 1., 0.],\n [0., 0., 0., 1.]])\n # pyformat: enable\n return car_to_image_transform", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def perspective_projection(points, rotation, translation,\n focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.\n K[:, :-1, -1] = camera_center\n\n # Transform points\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n\n # Apply perspective distortion\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n\n # Apply camera intrinsics\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n\n return projected_points[:, :, :-1]", "def transform_images(img1,img2):", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def getCartesianPointsImage(self, points):\n return getCartesianPointsImage(points, self)", "def rectangular_perpective_transform(image, points):\n # We first order our points so they go clockwise from the top left. Top left point must have the\n # lowest coordinate sum, while the bottom right must have the largest\n ordered_pts = np.empty((4, 2), dtype = 'float32')\n pt_sum = np.sum(points, axis = 1)\n ordered_pts[0] = points[np.argmin(pt_sum)]\n ordered_pts[2] = points[np.argmax(pt_sum)]\n\n # the top right should have smallest coordinate difference, bottom left the largest\n pt_diff = np.diff(points, axis = 1)\n ordered_pts[1] = points[np.argmin(pt_diff)]\n ordered_pts[3] = points[np.argmax(pt_diff)]\n\n # for convenience, we store the points as variables for convenience in calculating width / height\n (top_left, top_right, bottom_right, bottom_left) = ordered_pts\n\n top_width = np.linalg.norm(top_right - top_left)\n bottom_width = np.linalg.norm(bottom_right - bottom_left)\n width = int(max(top_width, bottom_width))\n\n left_height = np.linalg.norm(bottom_left - top_left)\n right_height = np.linalg.norm(bottom_right - top_right)\n height = int(max(left_height, right_height))\n\n # create destination coordinate points to give us a top-down view of the subimage enclosed by the original points\n dest_points = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype = 'float32')\n transform_matrix = cv2.getPerspectiveTransform(ordered_pts, dest_points)\n return cv2.warpPerspective(image, transform_matrix, (width, height))", "def projectPoints(self, points):\n return [self.projectPoint(point) for point in points]", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def transform(self, image):\n # e) use cv2.warpPerspective() to warp your image to a top-down view\n # Warp the image using OpenCV warpPerspective()\n w, h = image.shape[1], image.shape[0]\n return cv2.warpPerspective(image, self.p_mat, (w, h))", "def warpImag(src_img: np.ndarray, dst_img: np.ndarray) -> None:\r\n\r\n dst_p = []\r\n fig1 = plt.figure()\r\n size = src_img.shape\r\n # no need to take the coordinates of the second image in order to do the homography just pick the corners\r\n # coordinates\r\n pts_src = np.array(\r\n [\r\n [0, 0],\r\n [size[1] - 1, 0],\r\n [size[1] - 1, size[0] - 1],\r\n [0, size[0] - 1]\r\n ], dtype=float\r\n )\r\n def onclick_1(event):\r\n x = event.xdata\r\n y = event.ydata\r\n print(\"Loc: {:.0f},{:.0f}\".format(x, y))\r\n\r\n plt.plot(x, y, '*r')\r\n dst_p.append([x, y])\r\n\r\n if len(dst_p) == 4:\r\n plt.close()\r\n plt.show()\r\n\r\n # display image 1\r\n cid = fig1.canvas.mpl_connect('button_press_event', onclick_1)\r\n plt.imshow(dst_img)\r\n plt.show()\r\n dst_p = np.array(dst_p)\r\n\r\n ##### Your Code Here ######\r\n h = computeHomography(pts_src, dst_p) # my function to find the homography matrix in order to do projection\r\n # to the coordinates by this equations from opencv dst(x,y) = src(m11x + m12y +m13/ m31x +m32y +m33\r\n # , m21x + m22y +m23/ m31x +m32y +m33)\r\n im_temp = warpPerspective(src_img , h, (dst_img.shape[1],dst_img.shape[0]))\r\n plt.imshow(im_temp)\r\n plt.show()\r\n im_dst2 = im_temp + dst_img\r\n plt.imshow(im_dst2.astype('uint8'))\r\n plt.show()\r\n\r\n pass", "def project_impl(K, Rt, points):\n height, width, _ = points.shape\n projections = np.zeros((height, width, 2))\n projection_matrix = K.dot(Rt)\n\n for h in range(height):\n for w in range(width):\n p = np.append(points[h, w], 1)\n p = projection_matrix.dot(p)\n projections[h, w, 0] = p[0] / p[2]\n projections[h, w, 1] = p[1] / p[2]\n\n return projections", "def perspective_projection(points, rotation, translation,\n focal_length, camera_center, distortion=None):\n batch_size = points.shape[0]\n \n # Extrinsic\n if rotation is not None:\n points = torch.einsum('bij,bkj->bki', rotation, points)\n\n if translation is not None:\n points = points + translation.unsqueeze(1)\n\n if distortion is not None:\n kc = distortion\n points = points[:,:,:2] / points[:,:,2:]\n \n r2 = points[:,:,0]**2 + points[:,:,1]**2\n dx = (2 * kc[:,[2]] * points[:,:,0] * points[:,:,1] \n + kc[:,[3]] * (r2 + 2*points[:,:,0]**2))\n\n dy = (2 * kc[:,[3]] * points[:,:,0] * points[:,:,1] \n + kc[:,[2]] * (r2 + 2*points[:,:,1]**2))\n \n x = (1 + kc[:,[0]]*r2 + kc[:,[1]]*r2.pow(2) + kc[:,[4]]*r2.pow(3)) * points[:,:,0] + dx\n y = (1 + kc[:,[0]]*r2 + kc[:,[1]]*r2.pow(2) + kc[:,[4]]*r2.pow(3)) * points[:,:,1] + dy\n \n points = torch.stack([x, y, torch.ones_like(x)], dim=-1)\n \n \n # Intrinsic\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:,0,0] = focal_length\n K[:,1,1] = focal_length\n K[:,2,2] = 1.\n K[:,:-1, -1] = camera_center\n\n # Apply camera intrinsicsrf\n points = points / points[:,:,-1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, points)\n projected_points = projected_points[:, :, :-1]\n\n return projected_points", "def camera_pixels_to_camera_coords(left_pt, right_pt, nparrays=False):\n assert len(left_pt) == len(right_pt) == 2\n disparity = np.linalg.norm( np.array(left_pt) - np.array(right_pt) )\n (xx,yy,zz) = STEREO_MODEL.projectPixelTo3d( (left_pt[0],left_pt[1]), disparity )\n if nparrays:\n return np.array([xx,yy,zz])\n else:\n return [xx,yy,zz]", "def cam2pixel_torch(cam_coords, proj):\n batch, _, height, width = cam_coords.shape\n cam_coords = torch.reshape(cam_coords, [batch, 4, -1])\n unnormalized_pixel_coords = torch.matmul(proj, cam_coords)\n xy_u = unnormalized_pixel_coords[:, 0:2, :]\n z_u = unnormalized_pixel_coords[:, 2:3, :]\n\n pixel_coords = xy_u / (z_u + 1e-10)\n pixel_coords = torch.reshape(pixel_coords, [batch, 2, height, width])\n return pixel_coords.permute([0, 2, 3, 1])", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def affineTransform(img, pts, newPts):\n\ttmp = img.copy()\n\tif len(img.shape) is 3:\n\t\trows, cols, ch = img.shape\n\telse:\n\t\trows, cols = img.shape\n\tpts1 = np.float32(pts)\n\tpts2 = np.float32(newPts)\n\tM = cv2.getAffineTransform(pts1, pts2)\n\tdst = cv2.warpAffine(tmp, M, (cols, rows))\n\treturn dst", "def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))", "def TransformPoints(tFormMat,pts):\n pts = numpy.array(pts)\n nPts = len(pts)\n avgP = sum(pts)/nPts\n pts = pts - avgP\n res = [None]*nPts\n for i in range(nPts):\n res[i] = numpy.dot(tFormMat,pts[i])\n\n return res", "def transform(o):\n\t\t\tp = o.pos() - self.pos()\n\t\t\tx = cosa * p.x + sina * p.y\n\t\t\ty = -sina * p.x + cosa * p.y\n\t\t\treturn tuple(px_to_mm(x,y))", "def head_pose_points(image, rotation_vector, translation_vector, camera_matrix):\n rear_size = 1\n rear_depth = 0\n front_size = image.shape[1]\n front_depth = front_size*2\n val = [rear_size, rear_depth, front_size, front_depth]\n point_2d = get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val)\n y = (point_2d[5] + point_2d[8])//2\n x = point_2d[2]\n \n return (x, y)", "def prepare_data(cameras, frame_points_3d, frame_points_2d, keyframe_idx):\n camera_params = np.empty((0, 9))\n for c in cameras:\n R, _ = cv2.Rodrigues(c.R_mat)\n camera = build_camera(R, c.t)\n camera_params = np.append(camera_params, [camera], axis=0)\n\n camera_indices = []\n point_indices = []\n points_2d = np.empty((0, 2))\n points_3d = np.empty((0, 3))\n\n camera_id = 0\n pt_id_counter = 0\n for k, pts_2d in enumerate(frame_points_2d):\n if k > 0:\n halfway_idx = keyframe_idx[k] - keyframe_idx[k - 1] - 1\n points_2d = np.vstack((points_2d, frame_points_2d[k-1][halfway_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-1][halfway_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-1][halfway_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-1][halfway_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-1][halfway_idx])\n\n if k > 1:\n end_idx = keyframe_idx[k + 1] - keyframe_idx[k - 1] - 3\n points_2d = np.vstack((points_2d, frame_points_2d[k-2][end_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-2][end_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-2][end_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-2][end_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-2][end_idx])\n\n points_2d = np.vstack((points_2d, frame_points_2d[k][0]))\n points_3d = np.vstack((points_3d, frame_points_3d[k][0]))\n camera_indices += [camera_id for _ in range(pts_2d.shape[1])]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + pts_2d.shape[1])]\n\n camera_id += 1\n pt_id_counter = pt_id_counter + pts_2d.shape[1]\n\n return camera_params, np.asarray(camera_indices), np.asarray(point_indices), points_3d, points_2d", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def rec_transform(image, pts):\n ord_pts = order_points(pts)\n\n # find the dimension of the rectangular created by the given points", "def draw_rel_camera_pose(image: int, origin: list, camera_pose: list, plot_dir_path: str) -> None:\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(elev=10.)\n ax.set_title('camera pose image: %d' % image)\n scale = 7\n ax.set_xlim3d(-scale, scale)\n ax.set_ylim3d(-scale, scale)\n ax.set_zlim3d(-scale, scale)\n\n # replace the Y-Axis with Z-Axis\n ax.scatter(origin[0], origin[2], origin[1], c='black')\n for i in range(3):\n ax.plot([origin[0], camera_pose[i][0]], [origin[2], camera_pose[i][2]], [origin[1], camera_pose[i][1]])\n i += 1\n\n fig.savefig(f'{plot_dir_path}/%d.png' % image)\n plt.close(fig)\n plt.clf()", "def coordinates_to_imgpts(x, y):\n pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])\n return pts" ]
[ "0.75200397", "0.7221418", "0.72199947", "0.71426326", "0.71409965", "0.7138931", "0.7127518", "0.7100937", "0.70733917", "0.6988374", "0.69729286", "0.69044685", "0.6830623", "0.6829222", "0.6822007", "0.6778279", "0.67171186", "0.6710748", "0.66177326", "0.661222", "0.655764", "0.6540855", "0.6531074", "0.6498859", "0.6474865", "0.6451168", "0.6437138", "0.6397588", "0.6393258", "0.6328641", "0.63234365", "0.62792736", "0.62616396", "0.6254827", "0.62492806", "0.6248712", "0.6239259", "0.6215083", "0.62060887", "0.6201795", "0.6198591", "0.61922216", "0.6157212", "0.615541", "0.61516416", "0.6145841", "0.6144271", "0.6136897", "0.61312276", "0.61309564", "0.6120236", "0.6118542", "0.6115872", "0.6112858", "0.6111681", "0.61018366", "0.61004895", "0.6089682", "0.6089637", "0.60851085", "0.60762155", "0.60762155", "0.60506463", "0.60431975", "0.6042744", "0.6033046", "0.6031894", "0.60188174", "0.60187227", "0.60135955", "0.6012923", "0.5978421", "0.59734017", "0.59708846", "0.5970459", "0.59518033", "0.5946003", "0.5910833", "0.59079313", "0.5894531", "0.58802855", "0.5876366", "0.58723986", "0.58561325", "0.585025", "0.58485264", "0.5846957", "0.5838266", "0.5831698", "0.58296305", "0.5825727", "0.58224607", "0.5821111", "0.5820906", "0.5813698", "0.58088773", "0.5807661", "0.57956964", "0.5789369", "0.5789033" ]
0.73480624
1
Transforms image points to ideal camera points
def ImageToCamera(self, imagePoints): inverse_pars = self.ComputeInverseInnerOrientation() imagePoints = imagePoints.T if imagePoints.size == 2: imagePoints = np.reshape(np.array(imagePoints), (np.size(imagePoints), 1)) T = np.array([[inverse_pars[0]], [inverse_pars[1]]]) R = np.array([[inverse_pars[2], inverse_pars[3]], [inverse_pars[4], inverse_pars[5]]]) return (np.dot(R, imagePoints - T)).T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def find_initial_position(img1, img2):\n # find points of interest in points\n img1_kp, img1_des = compute_orb(img1)\n img2_kp, img2_des = compute_orb(img2)\n\n # get closest 2 matches per point\n bf = cv2.BFMatcher(normType=cv2.NORM_HAMMING)\n matches = bf.knnMatch(img1_des, img2_des, k=2)\n\n good_matches = []\n pts1 = []\n pts2 = []\n # Lowe's ratio test\n for m, n in matches:\n if m.distance < 0.75*n.distance:\n good_matches.append(m)\n pts1.append(img1_kp[m.queryIdx].pt)\n pts2.append(img2_kp[m.trainIdx].pt)\n\n pts1 = np.float32(pts1)\n pts2 = np.float32(pts2)\n\n # essential matrix gives the motion of the points\n # to get motion of the camera, flip the inputs between pts1 and pts2\n essential_matrix, e_mask = cv2.findEssentialMat(pts2, pts1, intrinsic_camera_matrix)\n\n # select only inlier points as per the RANSAC method\n pts1 = pts1[e_mask.ravel() == 1]\n pts2 = pts2[e_mask.ravel() == 1]\n\n _, rotation, translation, mask, triangulated_points = cv2.recoverPose(essential_matrix, pts2, pts1, intrinsic_camera_matrix, distanceThresh=50)\n triangulated_points = np.asarray([np.divide(triangulated_points[0], triangulated_points[3]),\n np.divide(triangulated_points[1], triangulated_points[3]),\n np.divide(triangulated_points[2], triangulated_points[3])]).transpose()\n\n CAMERA_POSES.clear()\n CAMERA_POSES.append(np.hstack((np.identity(3), np.array([[0], [0], [0]]))))\n CAMERA_POSES.append(np.hstack((rotation, translation)))\n return rotation, translation, triangulated_points", "def convert_image_point_to_global_coordinates(points, camera_location):\n # TODO: The camera should take photos which record the camera_location, and scale factors etc.\n # This should be a method on such an image.\n\n # Convert to numpy object for a clean notation\n points = np.array(points)\n camera_location = np.array(camera_location)\n scale_factors = np.array([config.Y_PIXELS_TO_MILLIMETRE_SCALE, config.X_PIXELS_TO_MILLIMETRE_SCALE])\n camera_resolution = np.array(config.CAMERA_RESOLUTION)\n\n # Do the computation\n image_centre = camera_resolution / 2\n return camera_location + scale_factors * (points - image_centre)", "def CameraToImage(self, cameraPoints):\n # setting up the required matrices\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n if np.isscalar(a0):\n\n R = np.array([[a1, a2], [b1, b2]])\n T = np.array([[a0], [b0]])\n\n else:\n R = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n T = np.array([[a0[0]], [b0[0]]])\n\n cameraPoints = cameraPoints.T\n # computing the transformation to the image system\n return (T + np.dot(R, cameraPoints)).T", "def converte_coord(valor):\n\n pts1 = ([0,0],[24,0],[24,44],[0,44])\n pts1 = np.asarray(pts1, dtype = np.float32)\n pts2 = np.float32([[0,0],[100,0], [100,100], [0,100]])\n\n M = cv.getPerspectiveTransform(pts1,pts2)\n img2 = cv.warpPerspective(valor,M,(100,100))\n return img2", "def get_projections(self, points_in_camera_frame: ARRAY_LIKE,\n image: int = 0, temperature: Real = 0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n\n # ensure the input is an array\n points_in_camera_frame = np.asarray(points_in_camera_frame)\n\n # apply misalignment to the points\n if self.estimate_multiple_misalignments:\n if np.any(self.misalignment[image]): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment[image]).squeeze() @ \\\n points_in_camera_frame\n\n else:\n if np.any(self.misalignment): # optimization to avoid matrix multiplication\n points_in_camera_frame = rotvec_to_rotmat(self.misalignment).squeeze() @ points_in_camera_frame\n\n # get the unitless image plane location\n pinhole_locations = points_in_camera_frame[:2] / points_in_camera_frame[2]\n\n # get the distorted image plane location\n image_locations = self.apply_distortion(pinhole_locations)\n\n # add the temperature based scaling\n image_locations *= self.get_temperature_scale(temperature)\n\n # get the pixel locations of the points, need to mess with transposes due to numpy broadcasting rules\n picture_locations = ((self.intrinsic_matrix[:, :2] @ image_locations).T + self.intrinsic_matrix[:, 2]).T\n\n return pinhole_locations, image_locations, picture_locations", "def four_point_transform(image, pts):\n rect = order_points(pts)\n width_first = np.sqrt(\n ((rect[2][0] - rect[3][0]) ** 2) + ((rect[2][1] - rect[3][1]) ** 2)\n )\n width_second = np.sqrt(\n ((rect[1][0] - rect[0][0]) ** 2) + ((rect[1][1] - rect[0][1]) ** 2)\n )\n max_width = max(int(width_first), int(width_second))\n height_first = np.sqrt(\n ((rect[1][0] - rect[2][0]) ** 2) + ((rect[1][1] - rect[2][1]) ** 2)\n )\n height_second = np.sqrt(\n ((rect[0][0] - rect[3][0]) ** 2) + ((rect[0][1] - rect[3][1]) ** 2)\n )\n max_height = max(int(height_first), int(height_second))\n dst = np.array(\n [\n [0, 0],\n [max_width - 1, 0],\n [max_width - 1, max_height - 1],\n [0, max_height - 1],\n ],\n dtype=\"float32\",\n )\n view_transform = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, view_transform, (max_width, max_height))\n return warped", "def imageFromCamera(self, points): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def compute_perspective_transform(corner_points, width, height, image):\n # Create an array out of the 4 corner points\n corner_points_array = np.float32(corner_points)\n # Create an array with the parameters (the dimensions) required to build the matrix\n img_params = np.float32([[0, 0], [width, 0], [0, height], [width, height]])\n # Compute and return the transformation matrix\n matrix = cv2.getPerspectiveTransform(corner_points_array, img_params)\n img_transformed = cv2.warpPerspective(image, matrix, (width, height))\n return matrix, img_transformed", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def perspective_transform():\n src = np.float32([(220,720), (1110, 720), (570, 470), (722, 470)]) # Manually get these numbers from plot\n dst = np.float32([[320, 720], [920, 720], [320, 1], [920, 1]])\n M = cv2.getPerspectiveTransform(src, dst)\n Minv = cv2.getPerspectiveTransform(dst, src)\n\n return M, Minv", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def fun(params,n_cameras,n_points,camera_indices,point_indices,points_3d , points_2d):\n camera_params = params[:n_cameras * 6].reshape((n_cameras, 6))\n # points_3d = points_3d.T\n # points_3d = params[n_cameras * 7:].reshape((n_points, 3))\n # print(point_indices)\n points_proj = project(points_3d[point_indices], camera_params[camera_indices])\n return (points_proj - points_2d).ravel()", "def rectangular_perpective_transform(image, points):\n # We first order our points so they go clockwise from the top left. Top left point must have the\n # lowest coordinate sum, while the bottom right must have the largest\n ordered_pts = np.empty((4, 2), dtype = 'float32')\n pt_sum = np.sum(points, axis = 1)\n ordered_pts[0] = points[np.argmin(pt_sum)]\n ordered_pts[2] = points[np.argmax(pt_sum)]\n\n # the top right should have smallest coordinate difference, bottom left the largest\n pt_diff = np.diff(points, axis = 1)\n ordered_pts[1] = points[np.argmin(pt_diff)]\n ordered_pts[3] = points[np.argmax(pt_diff)]\n\n # for convenience, we store the points as variables for convenience in calculating width / height\n (top_left, top_right, bottom_right, bottom_left) = ordered_pts\n\n top_width = np.linalg.norm(top_right - top_left)\n bottom_width = np.linalg.norm(bottom_right - bottom_left)\n width = int(max(top_width, bottom_width))\n\n left_height = np.linalg.norm(bottom_left - top_left)\n right_height = np.linalg.norm(bottom_right - top_right)\n height = int(max(left_height, right_height))\n\n # create destination coordinate points to give us a top-down view of the subimage enclosed by the original points\n dest_points = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype = 'float32')\n transform_matrix = cv2.getPerspectiveTransform(ordered_pts, dest_points)\n return cv2.warpPerspective(image, transform_matrix, (width, height))", "def solve_pose(self, image_points):\n\n if self.r_vec is None:\n (_, rotation_vector, translation_vector) = cv2.solvePnP(\n self.model_points, image_points, self.camera_matrix, self.dist_coeefs)\n self.r_vec = rotation_vector\n self.t_vec = translation_vector\n\n (_, rotation_vector, translation_vector) = cv2.solvePnP(\n self.model_points,\n image_points,\n self.camera_matrix,\n self.dist_coeefs,\n rvec=self.r_vec,\n tvec=self.t_vec,\n useExtrinsicGuess=True)\n\n return (rotation_vector, translation_vector)", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def transform(self, image):\n # e) use cv2.warpPerspective() to warp your image to a top-down view\n # Warp the image using OpenCV warpPerspective()\n w, h = image.shape[1], image.shape[0]\n return cv2.warpPerspective(image, self.p_mat, (w, h))", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val):\n point_3d = []\n dist_coeffs = np.zeros((4,1))\n rear_size = val[0]\n rear_depth = val[1]\n point_3d.append((-rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, rear_size, rear_depth))\n point_3d.append((rear_size, -rear_size, rear_depth))\n point_3d.append((-rear_size, -rear_size, rear_depth))\n \n front_size = val[2]\n front_depth = val[3]\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d.append((-front_size, front_size, front_depth))\n point_3d.append((front_size, front_size, front_depth))\n point_3d.append((front_size, -front_size, front_depth))\n point_3d.append((-front_size, -front_size, front_depth))\n point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)\n \n # Map to 2D image points\n (point_2d, _) = cv2.projectPoints(point_3d,rotation_vector,translation_vector,camera_matrix,dist_coeffs)\n point_2d = np.int32(point_2d.reshape(-1, 2))\n return point_2d", "def project_points(self, points_3d, camera):\n batch_size = points_3d.shape[0]\n device = points_3d.device\n cam_t = torch.stack([camera[:, 1], camera[:, 2], 2 * self.focal_length / (self.img_res * camera[:, 0] + 1e-09)], dim=-1)\n camera_center = camera.new_zeros([batch_size, 2])\n rot_t = torch.eye(3, device=device, dtype=points_3d.dtype).unsqueeze(0).expand(batch_size, -1, -1)\n joints_2d = perspective_projection(points_3d, rotation=rot_t, translation=cam_t, focal_length=self.focal_length, camera_center=camera_center)\n return joints_2d", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def transform(self, previousimage):", "def get_warped_points(points, M):\n\n # verify that points type is ndarray, convert if not\n if type(points).__module__ != np.__name__:\n points = np.array(points)[np.newaxis, :]\n\n # reverse order of input points [y,x] -> [x,y]\n # points = points[...,::-1]\n\n # Find full affine marix\n rowM = np.array([[0, 0, 1]])\n M = np.concatenate((M, rowM), axis=0)\n\n size = len(points.shape)\n # p=points.copy() # for debug\n\n # option 1 - use cv2.perspectiveTransform()\n # cv2.perspectiveTransform() expects to receive 3D array, so we need to verify that points has 3 dimensions\n for m in range(3 - size):\n points = points[np.newaxis, ...]\n\n points_warped = cv2.perspectiveTransform(points.astype(np.float64), M)\n points_warped = np.squeeze(points_warped)\n\n # reverse order of input points [y,x] -> [x,y]\n # points_warped = points_warped[..., ::-1]\n\n '''\n # option 2 - use matrix multiplication\n # assumes points are ordered (y,x)!\n points = p # for debug\n if size == 1:\n rowP = np.ones(1)\n elif size == 2:\n rowP = np.ones(N)\n\n points = np.concatenate((points, rowP), axis=0)\n points_warped2 = np.dot(M, points)\n points_warped2 = points_warped2[:-1]\n\n diff = np.sum(np.abs(points_warped2 - points_warped)) # for debug\n '''\n return points_warped", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def fun(params, n_cameras, n_points, camera_indices, point_indices, points_2d, theta):\n \n camera_params = params[:n_cameras * 9].reshape((n_cameras, 9))\n points_3d = params[n_cameras * 9:].reshape((n_points, 3))\n points_proj = project(points_3d[point_indices], camera_params[camera_indices], theta)\n print(\"Residual is: \", (points_proj - points_2d).ravel())\n return (points_proj - points_2d).ravel()", "def affineTransform(img, pts, newPts):\n\ttmp = img.copy()\n\tif len(img.shape) is 3:\n\t\trows, cols, ch = img.shape\n\telse:\n\t\trows, cols = img.shape\n\tpts1 = np.float32(pts)\n\tpts2 = np.float32(newPts)\n\tM = cv2.getAffineTransform(pts1, pts2)\n\tdst = cv2.warpAffine(tmp, M, (cols, rows))\n\treturn dst", "def camera_to_object_transform(self):\n # form the full object to camera transform\n T_stp_camera = self.stp_to_camera_transform()\n T_obj_stp = self.object_to_stp_transform()\n T_obj_camera = T_stp_camera.dot(T_obj_stp)\n return T_obj_camera", "def to_camera_coords(K, px_pts):\n if len(px_pts.shape) == 1:\n px_pts = np.expand_dims(px_pts, axis=0)\n num_pts = len(px_pts)\n\n px_pts = np.concatenate([\n px_pts, np.ones((num_pts, 1))\n ], axis=-1)\n\n norm_pts = np.matmul(np.linalg.inv(K), px_pts.T)[:-1,:]\n return norm_pts.T", "def ImageToRay(self, imagePoints):\n pass # delete after implementations", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def perspective_transform(img, chars):\n # img_expanded, (x1,y1,x2,y2) = simple_expand(img)\n x1,y1,x2,y2 = get_xy_minmax(chars)\n trans = get_random_transform(img, (x1,y1,x2,y2))\n warped_img = cv2.warpPerspective(img, trans, (img.shape[1], img.shape[0]))\n for char in chars:\n for corner in char['corners']:\n # print(corner)\n a = [corner[0], corner[1], 1]\n transformed_corner = np.matmul(trans, np.asarray(a).reshape(3,1))\n corner[0] = transformed_corner[0] / transformed_corner[2]\n corner[1] = transformed_corner[1] / transformed_corner[2]\n # print(corner)\n return warped_img, chars", "def compute_perspective_transform(self, binary_image):\r\n transform_src = np.float32([[300, 309], [500, 315], [120, 381], [685, 392]])\r\n transform_dst = np.float32([ [0,0], [800, 0], [0,600], [800,600]])\r\n perspective_transform = cv2.getPerspectiveTransform(transform_src, transform_dst)\r\n inverse_perspective_transform = cv2.getPerspectiveTransform(transform_dst, transform_src)\r\n warped_image = cv2.warpPerspective(binary_image, perspective_transform, \r\n (binary_image.shape[1], binary_image.shape[0]), \r\n flags=cv2.INTER_NEAREST)\r\n\r\n return warped_image, inverse_perspective_transform", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n X_camera = np.matmul(R, X) + T\n X_camera = X_camera / X_camera[2, :] # Normalize\n\n if distortion_flag:\n radiusSq = (X_camera[0, :] * X_camera[0, :]) + (X_camera[1, :] * X_camera[1, :])\n X_camera = X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # X_camera = (X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # + (2 * distortion_params[2] * X_camera[0,:] * X_camera[1,:]) + distortion_params[3] * (radiusSq + (2 * X_camera * X_camera)))\n\n X_camera[2, :] = 1.0\n X_camera = np.matmul(K, X_camera)\n X_camera = X_camera[:2, :]\n\n return X_camera", "def rec_transform(image, pts):\n ord_pts = order_points(pts)\n\n # find the dimension of the rectangular created by the given points", "def scale_camera(cam, scale=1):\n new_cam = np.copy(cam)\n # focal: \n new_cam[1][0][0] = cam[1][0][0] * scale\n new_cam[1][1][1] = cam[1][1][1] * scale\n # principle point:\n new_cam[1][0][2] = cam[1][0][2] * scale\n new_cam[1][1][2] = cam[1][1][2] * scale\n return new_cam", "def transform32(points, H, add=(0, 0)):\n points = np.float32(points)\n return np.int32(cv2.perspectiveTransform(points.reshape(1, -1, 2), H).reshape(-1, 2) + add)", "def compute_point_perspective_transformation(matrix, list_downoids):\n # Compute the new coordinates of our points\n list_points_to_detect = np.float32(list_downoids).reshape(-1, 1, 2)\n transformed_points = cv2.perspectiveTransform(list_points_to_detect, matrix)\n # Loop over the points and add them to the list that will be returned\n transformed_points_list = list()\n for i in range(0, transformed_points.shape[0]):\n transformed_points_list.append([transformed_points[i][0][0], transformed_points[i][0][1]])\n return transformed_points_list", "def evaluate(self, points):\n points = np.array(points, np.float64)\n output_shape = points.shape[1:]\n points.shape = (points.shape[0], seq_prod(output_shape))\n cmapi = self.image.coordmap.inverse()\n voxels = cmapi(points.T).T\n V = map_coordinates(self.data,\n voxels,\n order=self.order,\n mode=self.mode,\n cval=self.cval,\n prefilter=False)\n # ndimage.map_coordinates returns a flat array,\n # it needs to be reshaped to the original shape\n V.shape = output_shape\n return V", "def warpImag(src_img: np.ndarray, dst_img: np.ndarray) -> None:\r\n\r\n dst_p = []\r\n fig1 = plt.figure()\r\n size = src_img.shape\r\n # no need to take the coordinates of the second image in order to do the homography just pick the corners\r\n # coordinates\r\n pts_src = np.array(\r\n [\r\n [0, 0],\r\n [size[1] - 1, 0],\r\n [size[1] - 1, size[0] - 1],\r\n [0, size[0] - 1]\r\n ], dtype=float\r\n )\r\n def onclick_1(event):\r\n x = event.xdata\r\n y = event.ydata\r\n print(\"Loc: {:.0f},{:.0f}\".format(x, y))\r\n\r\n plt.plot(x, y, '*r')\r\n dst_p.append([x, y])\r\n\r\n if len(dst_p) == 4:\r\n plt.close()\r\n plt.show()\r\n\r\n # display image 1\r\n cid = fig1.canvas.mpl_connect('button_press_event', onclick_1)\r\n plt.imshow(dst_img)\r\n plt.show()\r\n dst_p = np.array(dst_p)\r\n\r\n ##### Your Code Here ######\r\n h = computeHomography(pts_src, dst_p) # my function to find the homography matrix in order to do projection\r\n # to the coordinates by this equations from opencv dst(x,y) = src(m11x + m12y +m13/ m31x +m32y +m33\r\n # , m21x + m22y +m23/ m31x +m32y +m33)\r\n im_temp = warpPerspective(src_img , h, (dst_img.shape[1],dst_img.shape[0]))\r\n plt.imshow(im_temp)\r\n plt.show()\r\n im_dst2 = im_temp + dst_img\r\n plt.imshow(im_dst2.astype('uint8'))\r\n plt.show()\r\n\r\n pass", "def cam2pixel(self, cam_coords, pose):\n\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b,3,-1) # [B,3,H*W]\n pcoords = pose[:,:,0:3].bmm(cam_coords_flat) + pose[:,:,3].view(b,3,1) #Bx[3x3 x 3xH*W] = [B x 3 x H*W]\n X, Y, Z = pcoords[:,0,:].clamp(-1e20,1e20), pcoords[:,1,:].clamp(-1e20,1e20), pcoords[:,2,:].clamp(1e-20,1e20) #each are [B x H*W] \n X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]\n\n X_mask = ((X_norm > 1)+(X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b,h,w,2)", "def myWarpPerspective(img, H, output_shapes):\n c, r = output_shapes\n \n # Create an output canvas according to the parameter \"output_shapes\"\n if len(img.shape) == 3:\n output = np.zeros((r, c, 3))\n else:\n output = np.zeros((r, c, 1))\n\n # List of pixel coordinates in canvas\n inverse_map = [[i, j] for i in range(c) for j in range(r)]\n\n # Covert the coordinates in the system of img2 back to the system of img1 \n # to find out the reference points\n inverse_map = np.asarray(inverse_map)\n inverse_map = myPerspectiveTransform(inverse_map, np.linalg.inv(H))\n \n \n for i in range(c):\n for j in range(r):\n index = i*r + j\n ix, iy = inverse_map[index]\n \n # Because the converted coords. are float, \n # we need to find out four ref. points to do bilinear interpolation\n tix, bix = np.ceil(ix), np.floor(ix)\n tiy, biy = np.ceil(iy), np.floor(iy)\n\n x_ratio = ix - bix\n y_ratio = iy - biy\n\n # Indexing does not allow float indices\n tix, bix, tiy, biy = np.int32(tix), np.int32(bix), np.int32(tiy), np.int32(biy)\n \n # Boundary checking: each ref point should locate within the input image\n if bix < 0 or biy < 0 or tix >= img.shape[1] or tiy >= img.shape[0]:\n continue\n else:\n # Bilinear interpolation\n output[j, i] = x_ratio*y_ratio*img[tiy, tix] \\\n + x_ratio*(1-y_ratio)*img[biy, tix] \\\n + (1-x_ratio)*y_ratio*img[tiy, bix] \\\n + (1-x_ratio)*(1-y_ratio)*img[biy, bix]\n output[j, i] = np.round(output[j, i])\n\n # Cast back to uint8 because of displaying and return results\n return np.uint8(output)", "def pixel2cam(self, depth, intrinsics_inv):\n b, _, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]\n ones = torch.ones(1,h,w).type_as(depth)\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n ###pixel_coords is an array of camera pixel coordinates (x,y,1) where x,y origin is the upper left corner of the image.\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).view(b,3,-1) #.contiguous().view(b, 3, -1) # [B, 3, H*W]\n #cam_coords = intrinsic_inv.expand(b,3,3).bmm(current_pixel_coords).view(b,3,h,w)\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b,3,h,w)\n return cam_coords * depth", "def project_and_draw(img, X_3d, K, R, T, distortion_flag, distortion_parameters):\n # call your \"project_points\" function to project 3D points to camera coordinates\n # draw the projected points on the image and save your output image here\n # cv.imwrite(output_name, img_array)\n X_camera = project_points(X_3D,K,R,T,distortion_flag,distortion_parameters)\n\n newimg=copy.copy(img)\n color = (0, 230, 0)\n if not distortion_flag:\n color = (0,0,230)\n\n Xp = []\n Xp.append([])\n Xp.append([])\n\n for cur in range(0,np.shape(X_camera)[1]):\n x = X_camera[0,cur]\n y = X_camera[1,cur]\n z = X_camera[2,cur]\n xp = int(x/z)\n yp = int(y/z)\n Xp[0].append(xp)\n Xp[1].append(yp)\n Xp2 = np.row_stack((Xp,np.ones(len(Xp[0]))))\n if(distortion_flag):\n Xp2 = distort(Xp2,K,distortion_parameters)\n\n for cur in range(0, np.shape(X_camera)[1]):\n x = Xp2[0, cur]\n y = Xp2[1, cur]\n newimg = cv.circle(newimg, (int(x), int(y)), 2, color, 0)\n\n #cv.imshow(\"Test\",newimg)\n #cv.waitKey(0)\n\n return newimg", "def compute_camera_calib_distortion_params():\r\n nx = 9#number of inside corners in x\r\n ny = 6#number of inside corners in y\r\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\r\n objp = np.zeros((ny*nx,3), np.float32)\r\n objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)\r\n\r\n # Arrays to store object points and image points from all the images.\r\n objpoints = [] # 3d points in real world space\r\n imgpoints = [] # 2d points in image plane.\r\n\r\n # Read a particular image just to get image size (all images in the directory are same size)\r\n img = cv2.imread('./camera_cal/calibration3.jpg')\r\n img_size = (img.shape[1], img.shape[0])\r\n # Make a list of calibration images\r\n images = glob.glob('./camera_cal/calibration*.jpg')\r\n # Step through the list and search for chessboard corners\r\n for idx, fname in enumerate(images):\r\n img = cv2.imread(fname)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\n # Find the chessboard corners\r\n ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)\r\n\r\n # If found, add object points, image points\r\n if ret == True:\r\n objpoints.append(objp)\r\n imgpoints.append(corners)\r\n # # Draw and display the corners\r\n # cv2.drawChessboardCorners(img, (nx,ny), corners, ret)\r\n # #write_name = 'corners_found'+str(idx)+'.jpg'\r\n # #cv2.imwrite(write_name, img)\r\n # cv2.imshow('img', img)\r\n # cv2.waitKey(500)\r\n\r\n # Do camera calibration given object points and image points\r\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)\r\n # Save the camera calibration result for later use (we won't worry about rvecs / tvecs)\r\n dist_pickle = {}\r\n dist_pickle[\"mtx\"] = mtx\r\n dist_pickle[\"dist\"] = dist\r\n pickle.dump( dist_pickle, open( \"data/cam_calib_pickle.p\", \"wb\" ) )\r\n print(\"Pickling done\")", "def head_pose_points(image, rotation_vector, translation_vector, camera_matrix):\n rear_size = 1\n rear_depth = 0\n front_size = image.shape[1]\n front_depth = front_size*2\n val = [rear_size, rear_depth, front_size, front_depth]\n point_2d = get_2d_points(image, rotation_vector, translation_vector, camera_matrix, val)\n y = (point_2d[5] + point_2d[8])//2\n x = point_2d[2]\n \n return (x, y)", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def project(face_image, u):\n \n # finding the magnitude of each component\n a = np.matmul(face_image, u)\n \n # use a's to get the projection back\n res = np.matmul(u, a.T)\n\n return res", "def pose_2d_pts(self,image):\n '''\n image- rgb image \n return:-\n pts - list of 2d pose landmarks as img coords\n image- rgb image on which the 2d pose landmarks are drawn\n ''' \n pts=[]\n imgRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n results=pose.process(imgRGB)\n if results.pose_landmarks:\n mpDraw.draw_landmarks(image,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c=image.shape\n imgx,imgy=int(lm.x*w),int(lm.y*h)\n \n pts.append((imgx,imgy)) \n return pts,image", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def __set_perspective(self):\n\n src = np.float32([[(.42 * self.img_shape[1],.65 * self.img_shape[0] ),\n (.58 * self.img_shape[1], .65 * self.img_shape[0]),\n (0 * self.img_shape[1],self.img_shape[0]),\n (1 * self.img_shape[1], self.img_shape[0])]])\n\n dst = np.float32([[0,0],\n [self.img_shape[1],0],\n [0,self.img_shape[0]],\n [self.img_shape[1],self.img_shape[0]]])\n\n self.M = cv2.getPerspectiveTransform(src, dst)\n self.M_inv = cv2.getPerspectiveTransform(dst, src)", "def camera_transformation_from_pose(azimutal, elevation):\n azimutal, elevation = azimutal * 2. * np.pi / 360., elevation * 2. * np.pi / 360.\n azimutal *= -1.\n elevation *= -1.\n r_y = np.array([[np.cos(elevation), 0, np.sin(elevation)],\n [0, 1, 0],\n [-np.sin(elevation), 0, np.cos(elevation)]])\n r_z = np.array([[np.cos(azimutal), -np.sin(azimutal), 0],\n [np.sin(azimutal), np.cos(azimutal), 0],\n [0, 0, 1]])\n r = r_z.dot(r_y)\n # world_to_camera matrix, camera_to_world matrix\n return r, np.linalg.inv(r)", "def perspective_projection(points, rotation, translation, focal_length, camera_center):\n batch_size = points.shape[0]\n K = torch.zeros([batch_size, 3, 3], device=points.device)\n K[:, 0, 0] = focal_length\n K[:, 1, 1] = focal_length\n K[:, 2, 2] = 1.0\n K[:, :-1, -1] = camera_center\n points = torch.einsum('bij,bkj->bki', rotation, points)\n points = points + translation.unsqueeze(1)\n projected_points = points / points[:, :, -1].unsqueeze(-1)\n projected_points = torch.einsum('bij,bkj->bki', K, projected_points)\n projected_points = projected_points[:, :, :-1]\n return projected_points", "def warp(img1, img2, M):\n\n # Get width and height of input images \n w1,h1 = img1.shape[:2]\n w2,h2 = img2.shape[:2]\n\n # Get the canvas dimesions\n img2_dims = np.float32([ [0,0], [0,w2], [h2, w2], [h2,0] ]).reshape(-1,1,2)\n img1_dims_temp = np.float32([ [0,0], [0,w1], [h1, w1], [h1,0] ]).reshape(-1,1,2)\n\n # Find out the boundary of img1 after projected onto the coord. system of img2\n img1_dims = myPerspectiveTransform(img1_dims_temp, M)\n\n # Resulting dimensions\n result_dims = np.concatenate( (img1_dims, img2_dims), axis = 0)\n \n # Getting images together\n # Calculate dimensions of match points\n x_min, y_min = np.int32(result_dims.min(axis=0).ravel() - 0.5)\n x_max, y_max = np.int32(result_dims.max(axis=0).ravel() + 0.5)\n\n # Create output array after affine transformation \n transform_dist = [-x_min,-y_min]\n transform_array = np.array([[1, 0, transform_dist[0]], \n [0, 1, transform_dist[1]], \n [0,0,1]]) \n \n # Warp images to get the resulting image\n result_img = myWarpPerspective(img1, transform_array.dot(M),\n (x_max-x_min, y_max-y_min))\n alpha = 0.1\n #result_img[transform_dist[1]:w1+transform_dist[1], \n # transform_dist[0]:h1+transform_dist[0]] = img2 \n print(transform_dist)\n #result_img[transform_dist[1]:w1+transform_dist[1], transform_dist[0]:transform_dist[0]+h1] = img1[transform_dist[1]:w1+transform_dist[1], transform_dist[0]:transform_dist[0]+h1] \n #result_img[transform_dist[1]:w1+transform_dist[1], \n # transform_dist[0]:transform_dist[0]+50] = img2[0:w1 , 0 : 50] \n alpha = 0.5\n img1_rest = x_max-x_min - h1\n print(img1_rest)\n #print(h1)\n for j in range(0 , h1):\n for i in range(0 , w1):\n alpha = 0.02 * j\n if alpha > 1:\n alpha = 1\n \n result_img[i + transform_dist[1], j + transform_dist[0]] = img2[i , j] * alpha + result_img[i + transform_dist[1] , j + transform_dist[0]] *(1 - alpha)\n #result_img[i + transform_dist[1], j + transform_dist[0]] = img2[i , j] * alpha \n return result_img", "def displace_to_pose(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros', return_coordinates=False):\n check_sizes(img, 'img', 'B3HW')\n\n src_pixel_coords = get_displacement_pixel_transformation(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode=rotation_mode, padding_mode=padding_mode)\n projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)\n if return_coordinates:\n return projected_img, src_pixel_coords\n else:\n return projected_img", "def adjustPerspectiveX(img, idx=-1, fac=0.15, scale=(1.0, 1.0)):\n h1, w1, _ = img.shape\n\n w, h = int(w1 * scale[0]), int(h1 * scale[1])\n aw = (w1 - w) // 2\n ah = (h1 - h) // 2\n\n dh = int(fac * w)\n dw = int(fac * h)\n pts1 = np.float32([[0, 0], [w1, 0], [0, h1], [w1, h1]])\n\n views = []\n #1. from left to right\n #pts2 = np.float32([[0, 0], [w-dw, dh], [0, h], [w-dw, h-dh]])\n pts2 = np.float32([[aw, ah], [w - dw, dh], [aw, h - ah], [w - dw, h - dh]])\n views.append(pts2)\n\n #2. from right to left\n pts2 = np.float32([[dw, dh], [w, 0], [dw, h - dh], [w, h]])\n views.append(pts2)\n\n #3. from bottom to head\n pts2 = np.float32([[dw, dh], [w - dw, dh], [0, h], [w, h]])\n views.append(pts2)\n\n #4. from header to bottom\n pts2 = np.float32([[0, 0], [w, 0], [dw, h - dh], [w - dw, h - dh]])\n views.append(pts2)\n\n ##5. from top-left to bottom-right\n pts2 = np.float32([[0, 0], [w - dw/2, dh/2], [dw/2, h-dh/2], [w-dw, h-dh]])\n views.append(pts2)\n\n #6. from bottom-right to top-left\n pts2 = np.float32([[dw, dh], [w-dw/2, dh/2], [dw/2, h-dh/2], [w, h]])\n views.append(pts2)\n pts2 = np.float32([[0, 0], [w-dw/2, dh/2], [dw/2, h-dh/2], [w, h]])\n views.append(pts2)\n\n #7. from top-right to bottom-left\n pts2 = np.float32([[dw/2, dh/2], [w, 0], [dw, h-dh], [w-dw/2, h-dh/2]])\n views.append(pts2)\n\n #8. from bottom-left to top-right\n pts2 = np.float32([[dw/2, dh/2], [w-dw, dh], [0, h], [w-dw/2, h-dh/2]])\n views.append(pts2)\n pts2 = np.float32([[dw/2, dh/2], [w, 0], [0, h], [w-dw/2, h-dh/2]])\n views.append(pts2)\n\n if idx < 0:\n idx = random.randint(0, len(views) - 1)\n else:\n idx = idx % len(views)\n\n pts2 = views[idx]\n fcolor = _genRandomColor()\n M = cv2.getPerspectiveTransform(pts1, pts2)\n img2 = cv2.warpPerspective(img, M, (w, h),\n borderMode=cv2.BORDER_CONSTANT, borderValue=fcolor)\n\n ## get it back\n #M = cv2.getPerspectiveTransform(pts2, pts1)\n #img3 = cv2.warpPerspective(img2, M, (w, h))\n\n if w != w1 or h != h1:\n bg_img = _genRandomImg(img.shape)\n img2 = randomPaste(bg_img, img2)\n \n return img2", "def perspective_transform(self, undistorted, direction='forward'):\n\t\t# Source image points\n\t\tsrc = np.float32([[255, 695], [585, 455], [700, 455], [1060, 690]])\n\t\t# Destination image points\n\t\tdst = np.float32([[305, 695], [305, 0], [1010, 0], [1010, 690]])\n\t\t# Perform forward or inverse perspective transform\n\t\tif direction == 'forward':\n\t\t\t# Compute the perspective transform, M\n\t\t\tM = cv2.getPerspectiveTransform(src, dst)\n\t\t\t# Create warped image - uses linear interpolation\n\t\t\treturn cv2.warpPerspective(undistorted, M, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)\n\t\telif direction == 'inverse':\n\t\t\t# Compute the inverse also by swapping the input parameters\n\t\t\tMinv = cv2.getPerspectiveTransform(dst, src)\n\t\t\treturn cv2.warpPerspective(undistorted, Minv, (undistorted.shape[1], undistorted.shape[0]), flags=cv2.INTER_LINEAR)", "def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def transformImage( iImage, oMat2D ):\n # ustvari diskretno mrezo tock\n gx, gy = np.meshgrid( range(iImage.shape[1]), \\\n range(iImage.shape[0]), \\\n indexing = 'xy' ) \n # ustvari Nx3 matriko vzorcnih tock \n pts = np.vstack( (gx.flatten(), gy.flatten(), np.ones( (gx.size,))) ).transpose()\n # preslikaj vzorcne tocke\n pts = np.dot( pts, oMat2D.transpose() )\n # ustvari novo sliko z interpolacijo sivinskih vrednosti\n oImage = interpolate1Image2D( iImage, \\\n pts[:,0].reshape( gx.shape ), \\\n pts[:,1].reshape( gx.shape ) )\n oImage[np.isnan( oImage )] = 0\n return oImage", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def projective_inverse_warp_torch2(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img", "def myPerspectiveTransform(pts, H):\n\n # Clone and reshape the list of points\n new_pts = np.reshape(pts, (-1, 2))\n # Allocate a vector filled with one with size (-1, 1)\n one_vector = np.zeros((pts.shape[0], 1)) + 1\n # Concatenate the one vector to the list of points to form the homogenious coordiniate system\n new_pts = np.concatenate((new_pts, one_vector), axis=len(new_pts.shape)-1)\n\n # Perform transformation and transform results into the pixel coord. system\n # i.e., x' = x/w, and y' = y/w\n for i, pt in enumerate(new_pts):\n new_pts[i] = H.dot(pt.T)\n new_pts[i] /= new_pts[i, -1]\n\n # Return results with the same shape as the input has\n return new_pts[:, :-1].reshape(pts.shape)", "def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]\n if proj_c2p_rot is not None:\n pcoords = proj_c2p_rot.bmm(cam_coords_flat)\n else:\n pcoords = cam_coords_flat\n\n if proj_c2p_tr is not None:\n pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]\n X = pcoords[:, 0]\n Y = pcoords[:, 1]\n Z = pcoords[:, 2].clamp(min=1e-8)\n\n X_norm = 2 * (X / Z) / (w - 1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2 * (Y / Z) / (h - 1) - 1 # Idem [B, H*W]\n if padding_mode == 'zeros':\n X_mask = ((X_norm > 1) + (X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1) + (Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b, h, w, 2)", "def apply_transform_to_image(self,img, transform, center=None):\n \n if center is None:\n center = (np.array(img.shape)[::-1]-1)/2.0\n \n displacement = np.dot(transform, center)\n shift = center - displacement\n \n img_tf = ndimage.interpolation.affine_transform(img, transform, offset=shift, mode=\"constant\", order=3, cval=0.0)\n return img_tf", "def stp_to_camera_transform(self):\n # setup variables\n camera_xyz_w = self.cam_pos\n camera_rot_w = self.cam_rot\n camera_int_pt_w = self.cam_interest_pt\n camera_xyz_obj_p = camera_xyz_w - camera_int_pt_w\n \n # get the distance from the camera to the world\n camera_dist_xy = np.linalg.norm(camera_xyz_w[:2])\n z = [0,0,np.linalg.norm(camera_xyz_w[:3])]\n\n # form the rotations about the x and z axis for the object on the tabletop\n theta = camera_rot_w[0] * np.pi / 180.0\n phi = -camera_rot_w[2] * np.pi / 180.0 + np.pi / 2.0\n camera_rot_obj_p_z = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n\n camera_rot_obj_p_x = np.array([[1, 0, 0],\n [0, np.cos(theta), -np.sin(theta)],\n [0, np.sin(theta), np.cos(theta)]])\n \n # form the full rotation matrix, swapping axes to match maya\n camera_md = np.array([[0, 1, 0],\n [1, 0, 0],\n [0, 0, -1]])\n camera_rot_obj_p = camera_md.dot(camera_rot_obj_p_z.dot(camera_rot_obj_p_x))\n camera_rot_obj_p = camera_rot_obj_p.T\n \n # form the full object to camera transform\n R_stp_camera = camera_rot_obj_p\n t_stp_camera = np.array(z)\n return RigidTransform(rotation=R_stp_camera,\n translation=t_stp_camera,\n from_frame='stp', to_frame='camera')", "def world_to_camera(self, X):\n raise NotImplementedError", "def transform_points(points, transf_matrix):\n if points.shape[0] not in [3, 4]:\n raise Exception(\n \"Points input should be (3,N) or (4,N) shape, received {}\".format(\n points.shape\n )\n )\n return transf_matrix.dot(np.vstack((points[:3, :], np.ones(points.shape[1]))))[\n :3, :\n ]", "def test_coords_transformation():\n\n # H+R+S+T, not reverse, depth\n img_meta = {\n 'pcd_scale_factor':\n 1.2311e+00,\n 'pcd_rotation': [[8.660254e-01, 0.5, 0], [-0.5, 8.660254e-01, 0],\n [0, 0, 1.0e+00]],\n 'pcd_trans': [1.111e-02, -8.88e-03, 0.0],\n 'pcd_horizontal_flip':\n True,\n 'transformation_3d_flow': ['HF', 'R', 'S', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01],\n [-9.1435e-01, 2.6675e+01, -5.5950e+00],\n [2.0089e-01, 5.8098e+00, -3.5409e+01],\n [-1.9461e-01, 3.1309e+01, -1.0901e+00]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=False)\n\n expected_tensor = torch.tensor(\n [[5.78332345e+00, 2.900697e+00, 4.92698531e+01],\n [-1.5433839e+01, 2.8993850e+01, -6.8880045e+00],\n [-3.77929405e+00, 6.061661e+00, -4.35920199e+01],\n [-1.9053658e+01, 3.3491436e+01, -1.34202211e+00]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # H+R+S+T, reverse, depth\n img_meta = {\n 'pcd_scale_factor':\n 7.07106781e-01,\n 'pcd_rotation': [[7.07106781e-01, 7.07106781e-01, 0.0],\n [-7.07106781e-01, 7.07106781e-01, 0.0],\n [0.0, 0.0, 1.0e+00]],\n 'pcd_trans': [0.0, 0.0, 0.0],\n 'pcd_horizontal_flip':\n False,\n 'transformation_3d_flow': ['HF', 'R', 'S', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01],\n [-9.1435e+01, 2.6675e+01, -5.5950e+00],\n [6.061661e+00, -0.0, -1.0e+02]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=True)\n\n expected_tensor = torch.tensor(\n [[-5.53977e+00, 4.94463e+00, 5.65982409e+01],\n [-6.476e+01, 1.1811e+02, -7.91252488e+00],\n [6.061661e+00, -6.061661e+00, -1.41421356e+02]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # H+R+S+T, not reverse, camera\n img_meta = {\n 'pcd_scale_factor':\n 1.0 / 7.07106781e-01,\n 'pcd_rotation': [[7.07106781e-01, 0.0, 7.07106781e-01],\n [0.0, 1.0e+00, 0.0],\n [-7.07106781e-01, 0.0, 7.07106781e-01]],\n 'pcd_trans': [1.0e+00, -1.0e+00, 0.0],\n 'pcd_horizontal_flip':\n True,\n 'transformation_3d_flow': ['HF', 'S', 'R', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, 4.0021e+01, -2.9757e-01],\n [-9.1435e+01, -5.5950e+00, 2.6675e+01],\n [6.061661e+00, -1.0e+02, -0.0]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'CAMERA', img_meta, reverse=False)\n\n expected_tensor = torch.tensor(\n [[6.53977e+00, 5.55982409e+01, 4.94463e+00],\n [6.576e+01, -8.91252488e+00, 1.1811e+02],\n [-5.061661e+00, -1.42421356e+02, -6.061661e+00]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V, reverse, camera\n img_meta = {'pcd_vertical_flip': True, 'transformation_3d_flow': ['VF']}\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'CAMERA', img_meta, reverse=True)\n\n expected_tensor = torch.tensor([[-5.2422e+00, 4.0021e+01, 2.9757e-01],\n [-9.1435e+01, -5.5950e+00, -2.6675e+01],\n [6.061661e+00, -1.0e+02, 0.0]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V+H, not reverse, depth\n img_meta = {\n 'pcd_vertical_flip': True,\n 'pcd_horizontal_flip': True,\n 'transformation_3d_flow': ['VF', 'HF']\n }\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=False)\n\n expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01],\n [9.1435e+01, 5.5950e+00, 2.6675e+01],\n [-6.061661e+00, 1.0e+02, 0.0]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V+H, reverse, lidar\n img_meta = {\n 'pcd_vertical_flip': True,\n 'pcd_horizontal_flip': True,\n 'transformation_3d_flow': ['VF', 'HF']\n }\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'LIDAR', img_meta, reverse=True)\n\n expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01],\n [9.1435e+01, 5.5950e+00, 2.6675e+01],\n [-6.061661e+00, 1.0e+02, 0.0]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)", "def transform_images(img1,img2):", "def transform(self, R, t, scale = 1):\n\n # Build 4-by-4 projection matrix from args ----------------------------\n # This is what we are doing internally:\n # Proj = np.r_[ scale * np.c_[R, t], [[0, 0, 0, 1]] ]\n # InvProj = np.r_[ scale * np.c_[R.T, -np.dot(R.T, t)], [[0,0,0,scale]] ]\n Proj = tf_format.tf_format('4x4', R, t)\n Proj[:-1,:] *= scale\n InvProj = tf_format.tf_format('i4x4', R, t) * scale\n \n \n # Apply transformation to pts3D ---------------------------------------\n if self.pts3D is not None and self.pts3D.shape[1] > 0:\n # Use homogeneous coords\n pts3D = np.r_[self.pts3D, np.ones((1, self.pts3D.shape[1]))]\n pts3D = np.dot(Proj, pts3D)\n self.pts3D = pts3D[:3, :]\n\n # Apply transformation to cameras -------------------------------------\n # Camera poses are stored using camera-to-world transformations, we \n # need to invert the projection matrix for this to work --> \n # we use InvProj\n\n cposes = self.cam_poses\n for i in range(cposes.shape[1]):\n\n # Extract camera projection matrix\n p_cam = tf_format.tf_format('4x4', cposes[:, i])\n\n # Transform camera projection matrix\n new_p_cam = np.dot(p_cam, InvProj)\n \n # Make sure it's a true rotation!\n [u, s, vT] = np.linalg.svd(new_p_cam[:3,:3])\n cposes[:3, i] = tf_format.rodrigues( np.dot(u,vT) ).ravel()\n cposes[3:, i] = new_p_cam[:3, 3]\n\n self.cam_poses = cposes", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def projective_inverse_warp_torch3(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n #delta_xy = src_center_xy - torch.tensor([float(tgt_width - 1) / 2, float(tgt_height - 1) / 2], device=src_center_xy.device)\n #delta_xyz = torch.cat([delta_xy, torch.zeros([batch, 1], device=delta_xy.device)], dim=1).unsqueeze(-1).unsqueeze(-1)\n # delta xyz [batch, 3, 1, 1]\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n #pixel_coords = pixel_coords + delta_xyz\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def warp(img, reverse=False):\n\n img_size = (img.shape[1], img.shape[0])\n\n points = np.array(\n [(581, 460),\n (278, img_size[1] - 50),\n (1020, img_size[1] - 50),\n (700, 460)])\n src = np.float32([points])\n\n dst = np.float32(\n [[278, 0],\n [278, img_size[1]],\n [1020, img_size[1]],\n [1020, 0]])\n\n if reverse:\n src, dst = dst, src\n\n M = cv2.getPerspectiveTransform(src, dst)\n\n return cv2.warpPerspective(img, M, img_size)", "def projectToImage_kitti(pts_3D, P):\n # project in image\n mat = np.vstack((pts_3D, np.ones((pts_3D.shape[1]))))\n\n pts_2D = np.dot(P, mat)\n\n # scale projected points\n pts_2D[0, :] = pts_2D[0, :] / pts_2D[2, :]\n pts_2D[1, :] = pts_2D[1, :] / pts_2D[2, :]\n pts_2D = np.delete(pts_2D, 2, 0)\n\n return pts_2D", "def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def projectToImage(pts_3D, P):\n pts_3D = np.reshape(pts_3D, (-1, 3))\n pts_3D = np.transpose(pts_3D)\n pts_3D = np.vstack([pts_3D, 1])\n pts_2D = np.matmul(P, pts_3D)\n pts_2D = pts_2D[:2]/pts_2D[-1]\n pts_2D = np.transpose(pts_2D)\n return pts_2D" ]
[ "0.74925107", "0.72041154", "0.7066651", "0.7043059", "0.7004196", "0.69641924", "0.69523805", "0.6926867", "0.6857847", "0.6825331", "0.67971975", "0.6750294", "0.6734582", "0.663383", "0.65459144", "0.65373534", "0.65190524", "0.65055305", "0.6471489", "0.6462659", "0.6461577", "0.64546305", "0.64426225", "0.6406362", "0.6388241", "0.6383348", "0.6379514", "0.6374536", "0.6367136", "0.6362778", "0.6356908", "0.6351567", "0.63468826", "0.6344896", "0.6343453", "0.6338962", "0.63352454", "0.6278795", "0.62726545", "0.62527", "0.62527", "0.6212654", "0.6175481", "0.615238", "0.6145205", "0.61328477", "0.61214113", "0.6116871", "0.6093443", "0.6081966", "0.6074786", "0.6061695", "0.6044024", "0.60398686", "0.60304356", "0.601823", "0.6012711", "0.59723365", "0.59707355", "0.5963979", "0.5958572", "0.59562826", "0.59505486", "0.59479165", "0.59420365", "0.59419477", "0.5927786", "0.5922535", "0.5921894", "0.59186465", "0.5918096", "0.5912747", "0.5906032", "0.5902164", "0.58984214", "0.589575", "0.58816445", "0.5872969", "0.5864041", "0.58618844", "0.5860027", "0.58525544", "0.5847409", "0.5842033", "0.58242893", "0.58206177", "0.58067274", "0.57896763", "0.57892364", "0.5786114", "0.57797766", "0.5772522", "0.5772294", "0.5771751", "0.5769681", "0.5766304", "0.5764713", "0.5764468", "0.57622397", "0.57558763" ]
0.70939386
2
Compute exterior orientation parameters. This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)``
def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon): # cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = imagePoints self.__ComputeApproximateVals(cameraPoints, groundPoints) l0 = self.__ComputeObservationVector(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) while la.norm(deltaX) > epsilon: l0 = self.__ComputeObservationVector(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) # compute residuals l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1)) v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1) if (np.size(A, 0) - np.size(deltaX)) != 0: sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX)) sigmaX = sig[0] * la.inv(N) else: sigmaX = None return [self.exteriorOrientationParameters, sigmaX, v]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def ComputeExteriorOrientation_RzRyRz(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.exteriorOrientationParameters[0:3] = np.dot(self.rotationMatrix_RzRyRz, self.exteriorOrientationParameters[0:3])\n self.exteriorOrientationParameters = np.add(self.exteriorOrientationParameters, np.random.normal(0, 0.01, self.exteriorOrientationParameters.shape))\n l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector_RzRyRz(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def __ComputeDesignMatrix(self, groundPoints):\n # initialization for readability\n omega = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T\n dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def __ComputeDesignMatrix_RzRyRz(self, groundPoints):\n # initialization for readability\n azimuth = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix_RzRyRz.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'azimuth').T\n dRTdPhi = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def _exteriorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n exteriorIDs = numerix.concatenate((numerix.ravel(XYids[..., 0].swapaxes(0, 1)),\n numerix.ravel(XYids[..., -1].swapaxes(0, 1)),\n numerix.ravel(XZids[:, 0,:]),\n numerix.ravel(XZids[:, -1,:]),\n numerix.ravel(YZids[ 0, ...]),\n numerix.ravel(YZids[-1, ...])))\n\n from fipy.variables.faceVariable import FaceVariable\n exteriorFaces = FaceVariable(mesh=self, value=False)\n exteriorFaces[exteriorIDs] = True\n return exteriorFaces", "def __ComputeObservationVector(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def exterior_der(self):\n from sage.calculus.functional import diff\n from utilities import format_unop_txt, format_unop_latex\n from sage.tensor.modules.comp import CompFullyAntiSym\n from vectorframe import CoordFrame\n if self._exterior_derivative is None:\n # A new computation is necessary:\n fmodule = self._fmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n self._exterior_derivative = DiffFormParal(fmodule, \n self._tensor_rank+1, \n name=rname, \n latex_name=rlname)\n # 1/ List of all coordinate frames in which the components of self\n # are known\n coord_frames = []\n for frame in self._components:\n if isinstance(frame, CoordFrame):\n coord_frames.append(frame)\n if coord_frames == []:\n # A coordinate frame is searched, at the price of a change of\n # frame, priveleging the frame of the domain's default chart\n dom = self._domain\n def_coordf = dom._def_chart._frame\n for frame in self._components:\n if (frame, def_coordf) in dom._frame_changes:\n self.comp(def_coordf, from_basis=frame)\n coord_frames = [def_coordf]\n break\n if coord_frames == []:\n for chart in dom._atlas:\n if chart != dom._def_chart: # the case def_chart is treated above\n coordf = chart._frame\n for frame in self._components:\n if (frame, coordf) in dom._frame_changes:\n self.comp(coordf, from_basis=frame)\n coord_frames[coordf]\n break\n if coord_frames != []:\n break \n # 2/ The computation:\n for frame in coord_frames:\n chart = frame._chart\n sc = self._components[frame]\n dc = CompFullyAntiSym(fmodule._ring, frame, \n self._tensor_rank+1, \n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n for ind, val in sc._comp.iteritems():\n for i in fmodule.irange():\n ind_d = (i,) + ind\n if len(ind_d) == len(set(ind_d)): \n # all indices are different\n dc[[ind_d]] += \\\n val.function_chart(chart).diff(i).scalar_field()\n self._exterior_derivative._components[frame] = dc\n return self._exterior_derivative", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def __ComputeObservationVector_RzRyRz(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix_RzRyRz.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def yy(self):\n return self.exterior[:, 1]", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def orientation(self) -> Orientation:\n # if orientation was passed in, use it\n if self._orientation is not None:\n return convert_to_enum(self._orientation, Orientation)\n\n # replace any dead pixels with median value\n temp_image = self.image.array.copy()\n temp_image[temp_image < np.median(temp_image)] = np.median(temp_image)\n\n # find \"range\" of 80 to 90th percentiles\n row_sum = np.sum(temp_image, 0)\n col_sum = np.sum(temp_image, 1)\n row80, row90 = np.percentile(row_sum, [85, 99])\n col80, col90 = np.percentile(col_sum, [85, 99])\n row_range = row90 - row80\n col_range = col90 - col80\n\n # The true picket side will have a greater difference in\n # percentiles than will the non-picket size.\n if row_range < col_range:\n orientation = Orientation.LEFT_RIGHT\n else:\n orientation = Orientation.UP_DOWN\n return orientation", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def raw_orient(\n cal: Calibration,\n cpar: ControlPar,\n nfix: int,\n fix: List[np.ndarray],\n pix: List[Target],\n) -> bool:\n X = np.zeros((10, 6))\n y = np.zeros((10,))\n XPX = np.zeros((6, 6))\n XPy = np.zeros((6,))\n beta = np.zeros((6,))\n itnum = 0\n stopflag = False\n dm = 0.0001\n drad = 0.0001\n cal.added_par.k1 = 0\n cal.added_par.k2 = 0\n cal.added_par.k3 = 0\n cal.added_par.p1 = 0\n cal.added_par.p2 = 0\n cal.added_par.scx = 1\n cal.added_par.she = 0\n\n while not stopflag and itnum < 20:\n itnum += 1\n\n n = 0\n for i in range(nfix):\n xc, yc = pixel_to_metric(pix[i].x, pix[i].y, cpar)\n\n pos = vec_set(fix[i][0], fix[i][1], fix[i][2])\n cal.ext_par.update_rotation_matrix()\n xp, yp = img_coord(pos, cal, cpar.mm)\n\n X[n], X[n + 1] = num_deriv_exterior(cal, cpar, dm, drad, pos)\n y[n], y[n + 1] = xc - xp, yc - yp\n\n n += 2\n\n # void ata (double *a, double *ata, int m, int n, int n_large )\n ata(X, XPX, n, 6, 6)\n if np.any(XPX):\n XPXi = np.linalg.inv(XPX)\n else:\n XPXi = XPX\n\n # atl (double *u, double *a, double *l, int m, int n, int n_large)\n XPy = atl(XPy, X, y, 6)\n beta = XPXi @ XPy\n\n # ata ((double *) X, (double *) XPX, n, 6, 6);\n # matinv ((double *) XPX, 6, 6);\n # atl ((double *) XPy, (double *) X, y, n, 6, 6);\n # matmul ((double *) beta, (double *) XPX, (double *) XPy, 6,6,1,6,6);\n\n stopflag = all(abs(beta) <= 0.1)\n\n cal.ext_par.x0 += beta[0]\n cal.ext_par.y0 += beta[1]\n cal.ext_par.z0 += beta[2]\n cal.ext_par.omega += beta[3]\n cal.ext_par.phi += beta[4]\n cal.ext_par.kappa += beta[5]\n\n if stopflag:\n cal.ext_par.rotation_matrix()\n\n return stopflag", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def num_deriv_exterior(\n cal: Calibration, cpar: ControlPar, dpos: float, dang: float, pos: vec3d\n):\n var = [\n cal.ext_par.x0,\n cal.ext_par.y0,\n cal.ext_par.z0,\n cal.ext_par.omega,\n cal.ext_par.phi,\n cal.ext_par.kappa,\n ]\n x_ders = np.zeros(6)\n y_ders = np.zeros(6)\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n xs, ys = img_coord(pos, cal, cpar.mm)\n\n for pd in range(6):\n step = dang if pd > 2 else dpos\n var[pd] += step\n\n if pd > 2:\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n xpd, ypd = img_coord(pos, cal, cpar.mm)\n x_ders[pd] = (xpd - xs) / step\n y_ders[pd] = (ypd - ys) / step\n\n var[pd] -= step\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n return (x_ders, y_ders)", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def euler_to_rodrigues(X_params):\n data_samples = X_params.shape[0]\n pose_euler = np.array([X_params[:, i:i+3] for i in range(0, 72, 3)])\n #print(pose_euler[0][0])\n #pose_euler = pose_euler.reshape((24, data_samples, 1, 3))\n #print(pose_euler[0][0])\n print(\"pose_euler shape: \" + str(pose_euler.shape))\n #R = np.array([[eulerAnglesToRotationMatrix(vector) for vector in vectors] for vectors in pose_euler])\n #print(\"R shape: \" + str(R.shape))\n #print(R[0][0])\n #R = R.reshape((data_samples, 24, 3, 3))\n\n #pose_params = np.array([[Rot.from_dcm(rot_mat).as_rotvec() for rot_mat in param_rot_mats] for param_rot_mats in R])\n pose_params = np.array([Rot.from_euler('xyz', vectors, degrees=False).as_rotvec() for vectors in pose_euler])\n print(\"pose_params shape: \" + str(pose_params.shape))\n pose_params = pose_params.reshape((data_samples, 72))\n print(\"pose_params shape: \" + str(pose_params.shape))\n print(\"other params shape: \" + str(X_params[:, 72:85].shape))\n X_params = np.concatenate([pose_params, X_params[:, 72:85]], axis=1)\n print(\"X_params shape: \" + str(X_params.shape))\n\n return X_params", "def get_orientation(self):\r\n return self.__orientation", "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def recoverParams(self):\n self.shape, self.rate = self.posterior[1] + 1, -self.posterior[0]", "def getSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n ys = np.array(self.XYProjections)[:,1]\n zs = np.array(self.XZProjections)[:,1]\n\n L = xs[-1] - xs[0]\n self.L = L\n xis = (xs - xs[0]) / L\n\n errorValue = lambda x,y,A: y - np.dot(A, x)\n a_init = np.array([1] * 4)\n\n # Calculate the derivation equation on x-y plane\n # Get the optimal parameters using least squre error method\n a1 = sp.optimize.leastsq(errorValue, a_init, args=(ys, self._H(xis, L)))[0]\n self.alpha_xyPlane = a1\n \n # Derivation\n xi = sy.symbols('xi')\n self.u_xyPlane = (self._H(xi, L, ifsymbol=True) * a1).sum()\n \n # Then calculate the derivation equation on x-z plane\n a2 = sp.optimize.leastsq(errorValue, a_init, args=(zs, self._H(xis, L)))[0]\n self.alpha_xzPlane = a2\n self.u_xzPlane = (self._H(xi, L, ifsymbol=True) * a2).sum()", "def pressure_equality_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 1] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 1] = -1\n return deriv", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def xx(self):\n return self.exterior[:, 0]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]", "def get_affine_reg_params(self):\n affine_params = [\n self.affine_reg_pyramid_steps,\n self.affine_reg_used_pyramid_steps,\n ]\n return affine_params", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def orientation(self):\n agents = self.board[self.agent_locs_idx]\n out = (agents & CellTypes.orientation_mask) >> CellTypes.orientation_bit\n return out.astype(np.int64)", "def test_active_matrix_from_extrinsic_euler_zyz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw([0.5 * np.pi, 0, 0]),\n np.array([\n [1, 0, 0],\n [0, 0, -1],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 1, 0],\n [0, 0, -1],\n [-1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, 1, 0],\n [-1, 0, 0]\n ])\n )", "def extrinsic(self):\n return self._extrinsic", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def orientation(point_p, point_q, point_r):\n # Set https://www.geeksforgeeks.org/orientation-3-ordered-points/\n # for details of below formula.\n r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -\n (point_q.x - point_p.x) * (point_r.y - point_q.y))\n if r == 0:\n return 0\n return 1 if r > 0 else 2", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def orientation(self, point):\n p_x = self.begin.x\n p_y = self.begin.y\n\n q_x = self.end.x\n q_y = self.end.y\n\n r_x = point.x\n r_y = point.y\n\n D = q_x * r_y + p_x * q_y + p_y * r_x - q_x * p_y - r_x * q_y - r_y * p_x\n\n if D > 0:\n return 1\n elif D == 0:\n return 0\n else:\n return -1", "def enthalpy_equality_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 2] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 2] = -1\n return deriv", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def P(self):\n self.eigenmatrix()", "def get_orientation(self):\n return self._orientation", "def aic(self):\n return 2*self.number_of_parameters() - 2*self.ll[-1]", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def calculate_ic(self):\n # dt:\n dt = self.E\n\n # dr:\n dr = np.sqrt(self.E ** 2 - (self.Q + self.L ** 2) / self.r ** 2)\n #print(dr)\n if np.isnan(dr):\n dr = 0\n #dr = self._check_dr_sign(self.alpha)\n\n # dtheta:\n omega = self.Q - self.L ** 2 * (np.cos(self.theta) / np.sin(self.theta)) ** 2\n if omega < 0:\n omega = np.abs(omega)\n dtheta = np.sqrt(omega) / self.r**2\n if self.eta < np.pi / 2:\n dtheta *= -1\n\n # dphi:\n dphi = self.L / (self.r * np.sin(self.theta)) ** 2\n\n return dt, dr, dtheta, dphi", "def _sector_orientation(self, vertices):\n if not vertices[0] == vertices[-1]:\n vertices.append(vertices[0])\n xy = np.transpose(np.array(vertices))\n x, y = xy[0], xy[1]\n return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0, vertices", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def vorticity(self):\n \n ux,_ = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n _,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n # self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y'])\n self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y','t'])\n \n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append('1/dt')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = ('1/dt')\n\n\n return self._obj", "def test_active_matrix_from_extrinsic_roll_pitch_yaw():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz([0.5 * np.pi, 0, 0]),\n np.array([\n [0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, 0, 1],\n [0, 1, 0]\n ])\n )", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def posterior_parameter(self):\n if self.__posterior_parameter is not None:\n return self.__posterior_parameter\n else:\n self.clean()\n self.log(\"Schur's complement\")\n r = (self.xtqx + self.parcov.inv).inv\n assert r.row_names == r.col_names\n self.__posterior_parameter = Cov(r.x,row_names=r.row_names,col_names=r.col_names)\n self.log(\"Schur's complement\")\n return self.__posterior_parameter", "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def orientation(self):\n return self._orientation", "def orientation(self):\n return self._orientation", "def getDerivativeSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n # Derivation\n xi = sy.symbols('xi')\n self.dudx_xyPlane = sy.diff(self.u_xyPlane, xi) / L\n \n # Then calculate the derivation equation on x-z plane\n self.dudx_xzPlane = sy.diff(self.u_xzPlane, xi) / L", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def calc_axes(self):\n y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n y_axis *= self.pixelsize[0]\n x_axis *= self.pixelsize[1]\n return x_axis, y_axis", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def GetEigenvectors(self):\n\t\treturn self.Solver.GetEigenvectors()", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def _derY(self, x, y):\n x_pos, y_pos = self.find_sector(x, y)\n alpha, beta = self.find_coords(x, y, x_pos, y_pos)\n\n # Get four corners data for each point\n xA = self.x_values[x_pos, y_pos]\n xB = self.x_values[x_pos + 1, y_pos]\n xC = self.x_values[x_pos, y_pos + 1]\n xD = self.x_values[x_pos + 1, y_pos + 1]\n yA = self.y_values[x_pos, y_pos]\n yB = self.y_values[x_pos + 1, y_pos]\n yC = self.y_values[x_pos, y_pos + 1]\n yD = self.y_values[x_pos + 1, y_pos + 1]\n fA = self.f_values[x_pos, y_pos]\n fB = self.f_values[x_pos + 1, y_pos]\n fC = self.f_values[x_pos, y_pos + 1]\n fD = self.f_values[x_pos + 1, y_pos + 1]\n\n # Calculate components of the alpha,beta --> x,y delta translation matrix\n alpha_x = (1 - beta) * (xB - xA) + beta * (xD - xC)\n alpha_y = (1 - beta) * (yB - yA) + beta * (yD - yC)\n beta_x = (1 - alpha) * (xC - xA) + alpha * (xD - xB)\n beta_y = (1 - alpha) * (yC - yA) + alpha * (yD - yB)\n\n # Invert the delta translation matrix into x,y --> alpha,beta\n det = alpha_x * beta_y - beta_x * alpha_y\n y_alpha = -beta_x / det\n y_beta = alpha_x / det\n\n # Calculate the derivative of f w.r.t. alpha and beta\n dfda = (1 - beta) * (fB - fA) + beta * (fD - fC)\n dfdb = (1 - alpha) * (fC - fA) + alpha * (fD - fB)\n\n # Calculate the derivative with respect to x (and return it)\n dfdy = y_alpha * dfda + y_beta * dfdb\n return dfdy", "def yprojection(self):\n return self.image.sum(axis=1)", "def test_array_orientation_consistency_tilt():\n samples = 128\n p = FringeZernike(Z2=1000, samples=samples)\n ps = PSF.from_pupil(p, 1)\n idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape) # row-major y, x\n assert idx_x == ps.center_x\n assert idx_y > ps.center_y", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def reflect_base_points(base_pts, domain_size):\n domain_size = np.array(domain_size)\n if len(domain_size) == 1:\n r, theta, phi = base_pts\n new_r = 2*domain_size[0] - r\n r = np.hstack([r, new_r])\n theta = np.hstack([theta, theta])\n phi = np.hstack([phi, phi])\n base_pts = np.vstack((r, theta, phi))\n if len(domain_size) == 2:\n r, theta, z = base_pts\n new_r = 2*domain_size[0] - r\n r = np.hstack([r, new_r])\n theta = np.hstack([theta, theta])\n z = np.hstack([z, z])\n if domain_size[1] != 0: # If not a disk\n r = np.hstack([r, r, r])\n theta = np.hstack([theta, theta, theta])\n z = np.hstack([z, -z, 2*domain_size[1]-z])\n base_pts = np.vstack((r, theta, z))\n elif len(domain_size) == 3:\n Nx, Ny, Nz = domain_size\n # Reflect base points about all 6 faces\n orig_pts = base_pts\n base_pts = np.vstack((base_pts,\n [-1, 1, 1] * orig_pts + [2.0 * Nx, 0, 0]))\n base_pts = np.vstack((base_pts, [-1, 1, 1] * orig_pts))\n base_pts = np.vstack((base_pts,\n [1, -1, 1] * orig_pts + [0, 2.0 * Ny, 0]))\n base_pts = np.vstack((base_pts, [1, -1, 1] * orig_pts))\n if domain_size[2] != 0:\n base_pts = np.vstack((base_pts,\n [1, 1, -1] * orig_pts + [0, 0, 2.0 * Nz]))\n base_pts = np.vstack((base_pts, [1, 1, -1] * orig_pts))\n return base_pts", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py", "def test_estimate_head_pose_hight_level_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = VLFaceDetector(DetectorType.FACE_DET_V3, faceEngine)\n\n angles0 = detector.detectOne(VLImage.load(filename=ROTATED0)).headPose\n angles90 = detector.detectOne(VLImage.load(filename=ROTATED90)).headPose\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def exterior_interior_points_eval(grid, points, solid_angle_tolerance, verbose=False):\n\n elements = grid.leaf_view.elements\n vertices = grid.leaf_view.vertices\n number_of_elements = grid.leaf_view.entity_count(0)\n elem = list(grid.leaf_view.entity_iterator(0))\n\n element_property = _np.zeros(number_of_elements, dtype=_np.int)\n element_groups = _np.zeros(shape=(4, number_of_elements), dtype=_np.int)\n element_groups[1:4, :] = elements\n for i in range(number_of_elements):\n property_number = elem[i].domain\n element_property[i] = property_number\n element_groups[0, i] = property_number\n\n element_properties = _np.array(list(set(element_property)), dtype=_np.int)\n if verbose:\n print(\"Element groups are:\")\n print(element_properties)\n\n points_interior = []\n points_exterior = []\n points_boundary = []\n index_interior = []\n index_exterior = _np.full(points.shape[1], True, dtype=bool)\n index_boundary = []\n\n for i in range(element_properties.size):\n\n elements_trunc = elements[:, element_groups[0, :] == element_properties[i]]\n num_elem = elements_trunc.shape[1]\n\n elements_x_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_y_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_z_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n # Populate grid vertices matrices\n for k in range(3):\n elements_x_coordinate[k, :] = vertices[0, elements_trunc[k, :]]\n elements_y_coordinate[k, :] = vertices[1, elements_trunc[k, :]]\n elements_z_coordinate[k, :] = vertices[2, elements_trunc[k, :]]\n # Obtain coordinates of triangular elements centroielements_surface_area\n # through barycentric method.\n elements_barycent_x_coordinate = _np.mean(elements_x_coordinate, axis=0)\n elements_barycent_y_coordinate = _np.mean(elements_y_coordinate, axis=0)\n elements_barycent_z_coordinate = _np.mean(elements_z_coordinate, axis=0)\n\n # Preallocate matrix of vectors for triangular elementses\n elements_u_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_v_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n # Compute matrix of vectors defining each triangular elements\n elements_u_coordinate = _np.array(\n [\n elements_x_coordinate[1, :] - elements_x_coordinate[0, :],\n elements_y_coordinate[1, :] - elements_y_coordinate[0, :],\n elements_z_coordinate[1, :] - elements_z_coordinate[0, :],\n ]\n )\n elements_v_coordinate = _np.array(\n [\n elements_x_coordinate[2, :] - elements_x_coordinate[0, :],\n elements_y_coordinate[2, :] - elements_y_coordinate[0, :],\n elements_z_coordinate[2, :] - elements_z_coordinate[0, :],\n ]\n )\n elements_u_cross_v = _np.cross(\n elements_u_coordinate, elements_v_coordinate, axisa=0, axisb=0, axisc=0\n )\n elements_u_cross_v_norm = _np.linalg.norm(elements_u_cross_v, axis=0)\n # Obtain outward pointing unit normal vectors for each elements\n normals = _np.divide(elements_u_cross_v, elements_u_cross_v_norm)\n # Obtain surface area of each elements\n elements_surface_area = 0.5 * elements_u_cross_v_norm\n\n start_time = _time.time()\n N_workers = _mp.cpu_count()\n parallelised_compute_solid_angle = _partial(\n compute_solid_angle,\n elements_barycent_x_coordinate,\n elements_barycent_y_coordinate,\n elements_barycent_z_coordinate,\n points,\n normals,\n elements_surface_area,\n )\n pool = _mp.Pool(N_workers)\n result = pool.starmap(\n parallelised_compute_solid_angle, zip(_np.arange(0, points.shape[1]))\n )\n pool.close()\n end_time = _time.time() - start_time\n if verbose:\n print(\"Time to complete solid angle field parallelisation: \", end_time)\n solid_angle = _np.hstack(result)\n if solid_angle_tolerance:\n index_interior_tmp = solid_angle > 0.5 + solid_angle_tolerance\n index_boundary_tmp = (solid_angle > 0.5 - solid_angle_tolerance) & (\n solid_angle < 0.5 + solid_angle_tolerance\n )\n points_boundary.append(points[:, index_boundary_tmp])\n index_boundary.append(index_boundary_tmp)\n index_exterior = index_exterior & (\n (index_interior_tmp == False) & (index_boundary_tmp == False)\n )\n else:\n index_interior_tmp = solid_angle > 0.5\n index_exterior = index_exterior & (index_interior_tmp == False)\n\n points_interior.append(points[:, index_interior_tmp])\n index_interior.append(index_interior_tmp)\n\n points_exterior = points[:, index_exterior]\n\n return (\n points_interior,\n points_exterior,\n points_boundary,\n index_interior,\n index_exterior,\n index_boundary,\n )", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def _get_geometric_augmentation_parameter(self, entry: SupervisedKeypointDBEntry) -> (float, float):\n # Not training\n if not self._is_train:\n return 1.0, 0.0\n\n # For scale\n scale = np.clip(np.random.randn(), -1.0, 1.0) * self._config.aug_scale_factor + 1.0\n\n # For rotate:\n if random.random() < self._config.aug_rot_rate and (not entry.on_boundary):\n rotate_rad = np.clip(np.random.randn(), -2.0, 2.0) * self._config.aug_rot_rad_factor\n else:\n rotate_rad = 0.0\n\n # OK\n return scale, rotate_rad", "def setup_orientation_annotation(self) :\n \n # Anatomical directions in LPS convention, numpy order\n directions_anatomical = {\n \"L\" : (0,0,+1),\n \"R\" : (0,0,-1),\n \"P\" : (0,+1,0),\n \"A\" : (0,-1,0),\n \"I\" : (-1,0,0),\n \"S\" : (+1,0,0),\n }\n \n # Index directions, numpy order\n directions_index = {\n \"+x\" : (0,0,+1),\n \"-x\" : (0,0,-1),\n \"+y\" : (0,+1,0),\n \"-y\" : (0,-1,0),\n \"+z\" : (-1,0,0),\n \"-z\" : (+1,0,0),\n }\n \n directions = (directions_anatomical \n if self.display_coordinates in [\"physical\", \"nearest_axis_aligned\"]\n else directions_index)\n \n # Window locations\n locations = {\n \"up\" : (1,0),\n \"down\" : (-1,0),\n \"left\" : (0,-1),\n \"right\" : (0,1)\n }\n \n for location, p in locations.items() :\n matrix = self._3d_world_to_slice\n direction = numpy.dot(self._3d_slice_to_world, numpy.hstack((0, p)))\n \n # Find closest in-slice direction based on dot product\n closest = None\n max_distance = -1\n for name, d in directions.items() :\n distance = numpy.dot(d, direction)\n if distance > max_distance :\n max_distance = distance\n closest = name\n \n # Set text\n index = self._orientation_annotation_index[location]\n self._orientation_annotation.SetText(index, closest)", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def test_estimate_head_pose_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n images = [VLImage.load(filename=ROTATED0), VLImage.load(filename=ROTATED90)]\n detections = detector.detect(images, detect68Landmarks=True)\n angles0 = TestHeadPose.headPoseEstimator.estimate(detections[0][0].landmarks68)\n angles90 = TestHeadPose.headPoseEstimator.estimate(detections[1][0].landmarks68)\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu" ]
[ "0.71254003", "0.68282026", "0.6420376", "0.61255026", "0.61024475", "0.5787343", "0.57387304", "0.5717728", "0.5698031", "0.567763", "0.5626359", "0.55450577", "0.5525408", "0.5507461", "0.54807997", "0.54278624", "0.5404268", "0.53900427", "0.5351375", "0.5299402", "0.5291824", "0.5286612", "0.5283092", "0.52719927", "0.5255926", "0.5250085", "0.5173937", "0.514478", "0.512267", "0.51066446", "0.51020575", "0.50985557", "0.5087729", "0.5079079", "0.5057393", "0.504156", "0.50404125", "0.5033221", "0.50285715", "0.50285715", "0.50285715", "0.5024489", "0.50170964", "0.50170964", "0.5001144", "0.4998715", "0.49979356", "0.499453", "0.4993955", "0.49870342", "0.49777684", "0.49722567", "0.49708712", "0.49616125", "0.49565193", "0.4948623", "0.4945738", "0.49343586", "0.49258715", "0.49244723", "0.4920674", "0.49206284", "0.4919384", "0.48953095", "0.48896152", "0.48885545", "0.4881412", "0.48752126", "0.48741966", "0.48716462", "0.48697534", "0.4851721", "0.48491317", "0.4847848", "0.48443484", "0.483214", "0.483214", "0.48273113", "0.48214126", "0.482082", "0.48175353", "0.4799199", "0.47973904", "0.47951606", "0.4794221", "0.4784934", "0.47832796", "0.47816518", "0.47783676", "0.47711885", "0.47707", "0.4769085", "0.47658572", "0.47535357", "0.4752991", "0.47518855", "0.47466585", "0.47452712", "0.47369626", "0.47312072" ]
0.69500554
1
Compute exterior orientation parameters. This function can be used in conjecture with ``self.__ComputeDesignMatrix(groundPoints)`` and ``self__ComputeObservationVector(imagePoints)``
def ComputeExteriorOrientation_RzRyRz(self, imagePoints, groundPoints, epsilon): # cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = imagePoints self.exteriorOrientationParameters[0:3] = np.dot(self.rotationMatrix_RzRyRz, self.exteriorOrientationParameters[0:3]) self.exteriorOrientationParameters = np.add(self.exteriorOrientationParameters, np.random.normal(0, 0.01, self.exteriorOrientationParameters.shape)) l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) while la.norm(deltaX) > epsilon: l0 = self.__ComputeObservationVector_RzRyRz(groundPoints.T) l0 = np.reshape(l0, (-1, 1)) l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0 A = self.__ComputeDesignMatrix_RzRyRz(groundPoints.T) N = np.dot(A.T, A) u = np.dot(A.T, l) deltaX = np.dot(la.inv(N), u) # update orientation pars self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6)) # compute residuals l_a = np.reshape(self.__ComputeObservationVector_RzRyRz(groundPoints.T), (-1, 1)) v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1) if (np.size(A, 0) - np.size(deltaX)) != 0: sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX)) sigmaX = sig[0] * la.inv(N) else: sigmaX = None return [self.exteriorOrientationParameters, sigmaX, v]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.__ComputeApproximateVals(cameraPoints, groundPoints)\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def __ComputeDesignMatrix(self, groundPoints):\n # initialization for readability\n omega = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T\n dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def __ComputeDesignMatrix_RzRyRz(self, groundPoints):\n # initialization for readability\n azimuth = self.exteriorOrientationParameters[3]\n phi = self.exteriorOrientationParameters[4]\n kappa = self.exteriorOrientationParameters[5]\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n\n rotationMatrixT = self.rotationMatrix_RzRyRz.T\n rotatedG = rotationMatrixT.dot(dXYZ)\n rT1g = rotatedG[0, :]\n rT2g = rotatedG[1, :]\n rT3g = rotatedG[2, :]\n\n focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2\n\n dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :]\n dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :]\n\n dgdX0 = np.array([-1, 0, 0], 'f')\n dgdY0 = np.array([0, -1, 0], 'f')\n dgdZ0 = np.array([0, 0, -1], 'f')\n\n # Derivatives with respect to X0\n dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)\n dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)\n\n # Derivatives with respect to Y0\n dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)\n dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)\n\n # Derivatives with respect to Z0\n dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)\n dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)\n\n dRTdOmega = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'azimuth').T\n dRTdPhi = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'phi').T\n dRTdKappa = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'kappa').T\n\n gRT3g = dXYZ * rT3g\n\n # Derivatives with respect to Omega\n dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Phi\n dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]\n\n # Derivatives with respect to Kappa\n dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -\n rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -\n rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]\n\n # all derivatives of x and y\n dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T,\n np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T])\n\n a = np.zeros((2 * dd[0].shape[0], 6))\n a[0::2] = dd[0]\n a[1::2] = dd[1]\n\n return a", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def _exteriorFaces(self):\n XYids = self._XYFaceIDs\n XZids = self._XZFaceIDs\n YZids = self._YZFaceIDs\n\n exteriorIDs = numerix.concatenate((numerix.ravel(XYids[..., 0].swapaxes(0, 1)),\n numerix.ravel(XYids[..., -1].swapaxes(0, 1)),\n numerix.ravel(XZids[:, 0,:]),\n numerix.ravel(XZids[:, -1,:]),\n numerix.ravel(YZids[ 0, ...]),\n numerix.ravel(YZids[-1, ...])))\n\n from fipy.variables.faceVariable import FaceVariable\n exteriorFaces = FaceVariable(mesh=self, value=False)\n exteriorFaces[exteriorIDs] = True\n return exteriorFaces", "def __ComputeObservationVector(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def exterior_der(self):\n from sage.calculus.functional import diff\n from utilities import format_unop_txt, format_unop_latex\n from sage.tensor.modules.comp import CompFullyAntiSym\n from vectorframe import CoordFrame\n if self._exterior_derivative is None:\n # A new computation is necessary:\n fmodule = self._fmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n self._exterior_derivative = DiffFormParal(fmodule, \n self._tensor_rank+1, \n name=rname, \n latex_name=rlname)\n # 1/ List of all coordinate frames in which the components of self\n # are known\n coord_frames = []\n for frame in self._components:\n if isinstance(frame, CoordFrame):\n coord_frames.append(frame)\n if coord_frames == []:\n # A coordinate frame is searched, at the price of a change of\n # frame, priveleging the frame of the domain's default chart\n dom = self._domain\n def_coordf = dom._def_chart._frame\n for frame in self._components:\n if (frame, def_coordf) in dom._frame_changes:\n self.comp(def_coordf, from_basis=frame)\n coord_frames = [def_coordf]\n break\n if coord_frames == []:\n for chart in dom._atlas:\n if chart != dom._def_chart: # the case def_chart is treated above\n coordf = chart._frame\n for frame in self._components:\n if (frame, coordf) in dom._frame_changes:\n self.comp(coordf, from_basis=frame)\n coord_frames[coordf]\n break\n if coord_frames != []:\n break \n # 2/ The computation:\n for frame in coord_frames:\n chart = frame._chart\n sc = self._components[frame]\n dc = CompFullyAntiSym(fmodule._ring, frame, \n self._tensor_rank+1, \n start_index=fmodule._sindex,\n output_formatter=fmodule._output_formatter)\n for ind, val in sc._comp.iteritems():\n for i in fmodule.irange():\n ind_d = (i,) + ind\n if len(ind_d) == len(set(ind_d)): \n # all indices are different\n dc[[ind_d]] += \\\n val.function_chart(chart).diff(i).scalar_field()\n self._exterior_derivative._components[frame] = dc\n return self._exterior_derivative", "def define_orientation_matrix(self):\n from lmfit import Parameters\n p = Parameters()\n for i in range(3):\n for j in range(3):\n p.add('U%d%d' % (i, j), self.Umat[i, j])\n self.init_p = self.Umat\n return p", "def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def __ComputeObservationVector_RzRyRz(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix_RzRyRz.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def Orientation(self, *args):\n return _Adaptor3d.Adaptor3d_TopolTool_Orientation(self, *args)", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def yy(self):\n return self.exterior[:, 1]", "def exterior_der(self):\n from utilities import format_unop_txt, format_unop_latex\n if self._exterior_derivative is None:\n vmodule = self._vmodule # shortcut\n rname = format_unop_txt('d', self._name)\n rlname = format_unop_latex(r'\\mathrm{d}', self._latex_name)\n resu = vmodule.alternating_form(self._tensor_rank+1, name=rname, \n latex_name=rlname)\n for dom, rst in self._restrictions.iteritems():\n resu._restrictions[dom] = rst.exterior_der()\n self._exterior_derivative = resu\n return self._exterior_derivative", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def orientation(self) -> Orientation:\n # if orientation was passed in, use it\n if self._orientation is not None:\n return convert_to_enum(self._orientation, Orientation)\n\n # replace any dead pixels with median value\n temp_image = self.image.array.copy()\n temp_image[temp_image < np.median(temp_image)] = np.median(temp_image)\n\n # find \"range\" of 80 to 90th percentiles\n row_sum = np.sum(temp_image, 0)\n col_sum = np.sum(temp_image, 1)\n row80, row90 = np.percentile(row_sum, [85, 99])\n col80, col90 = np.percentile(col_sum, [85, 99])\n row_range = row90 - row80\n col_range = col90 - col80\n\n # The true picket side will have a greater difference in\n # percentiles than will the non-picket size.\n if row_range < col_range:\n orientation = Orientation.LEFT_RIGHT\n else:\n orientation = Orientation.UP_DOWN\n return orientation", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def raw_orient(\n cal: Calibration,\n cpar: ControlPar,\n nfix: int,\n fix: List[np.ndarray],\n pix: List[Target],\n) -> bool:\n X = np.zeros((10, 6))\n y = np.zeros((10,))\n XPX = np.zeros((6, 6))\n XPy = np.zeros((6,))\n beta = np.zeros((6,))\n itnum = 0\n stopflag = False\n dm = 0.0001\n drad = 0.0001\n cal.added_par.k1 = 0\n cal.added_par.k2 = 0\n cal.added_par.k3 = 0\n cal.added_par.p1 = 0\n cal.added_par.p2 = 0\n cal.added_par.scx = 1\n cal.added_par.she = 0\n\n while not stopflag and itnum < 20:\n itnum += 1\n\n n = 0\n for i in range(nfix):\n xc, yc = pixel_to_metric(pix[i].x, pix[i].y, cpar)\n\n pos = vec_set(fix[i][0], fix[i][1], fix[i][2])\n cal.ext_par.update_rotation_matrix()\n xp, yp = img_coord(pos, cal, cpar.mm)\n\n X[n], X[n + 1] = num_deriv_exterior(cal, cpar, dm, drad, pos)\n y[n], y[n + 1] = xc - xp, yc - yp\n\n n += 2\n\n # void ata (double *a, double *ata, int m, int n, int n_large )\n ata(X, XPX, n, 6, 6)\n if np.any(XPX):\n XPXi = np.linalg.inv(XPX)\n else:\n XPXi = XPX\n\n # atl (double *u, double *a, double *l, int m, int n, int n_large)\n XPy = atl(XPy, X, y, 6)\n beta = XPXi @ XPy\n\n # ata ((double *) X, (double *) XPX, n, 6, 6);\n # matinv ((double *) XPX, 6, 6);\n # atl ((double *) XPy, (double *) X, y, n, 6, 6);\n # matmul ((double *) beta, (double *) XPX, (double *) XPy, 6,6,1,6,6);\n\n stopflag = all(abs(beta) <= 0.1)\n\n cal.ext_par.x0 += beta[0]\n cal.ext_par.y0 += beta[1]\n cal.ext_par.z0 += beta[2]\n cal.ext_par.omega += beta[3]\n cal.ext_par.phi += beta[4]\n cal.ext_par.kappa += beta[5]\n\n if stopflag:\n cal.ext_par.rotation_matrix()\n\n return stopflag", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def num_deriv_exterior(\n cal: Calibration, cpar: ControlPar, dpos: float, dang: float, pos: vec3d\n):\n var = [\n cal.ext_par.x0,\n cal.ext_par.y0,\n cal.ext_par.z0,\n cal.ext_par.omega,\n cal.ext_par.phi,\n cal.ext_par.kappa,\n ]\n x_ders = np.zeros(6)\n y_ders = np.zeros(6)\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n xs, ys = img_coord(pos, cal, cpar.mm)\n\n for pd in range(6):\n step = dang if pd > 2 else dpos\n var[pd] += step\n\n if pd > 2:\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n xpd, ypd = img_coord(pos, cal, cpar.mm)\n x_ders[pd] = (xpd - xs) / step\n y_ders[pd] = (ypd - ys) / step\n\n var[pd] -= step\n\n cal.ext_par = rotation_matrix(cal.ext_par)\n\n return (x_ders, y_ders)", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def euler_to_rodrigues(X_params):\n data_samples = X_params.shape[0]\n pose_euler = np.array([X_params[:, i:i+3] for i in range(0, 72, 3)])\n #print(pose_euler[0][0])\n #pose_euler = pose_euler.reshape((24, data_samples, 1, 3))\n #print(pose_euler[0][0])\n print(\"pose_euler shape: \" + str(pose_euler.shape))\n #R = np.array([[eulerAnglesToRotationMatrix(vector) for vector in vectors] for vectors in pose_euler])\n #print(\"R shape: \" + str(R.shape))\n #print(R[0][0])\n #R = R.reshape((data_samples, 24, 3, 3))\n\n #pose_params = np.array([[Rot.from_dcm(rot_mat).as_rotvec() for rot_mat in param_rot_mats] for param_rot_mats in R])\n pose_params = np.array([Rot.from_euler('xyz', vectors, degrees=False).as_rotvec() for vectors in pose_euler])\n print(\"pose_params shape: \" + str(pose_params.shape))\n pose_params = pose_params.reshape((data_samples, 72))\n print(\"pose_params shape: \" + str(pose_params.shape))\n print(\"other params shape: \" + str(X_params[:, 72:85].shape))\n X_params = np.concatenate([pose_params, X_params[:, 72:85]], axis=1)\n print(\"X_params shape: \" + str(X_params.shape))\n\n return X_params", "def get_orientation(self):\r\n return self.__orientation", "def get_camera_orientation(self):\n\n # Create the vector from the camera to the robot\n vector_x = self.robot_x - self.camera_x\n vector_y = self.robot_y - self.camera_y\n vector_z = self.robot_z - self.camera_z\n\n # Calculate yaw and pitch from this vector\n yaw = math.atan2(vector_y, vector_x)\n pitch = -math.asin(vector_z)\n\n # Create the quaternion from the euler angles\n self.quaternion = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(0, pitch, yaw))", "def recoverParams(self):\n self.shape, self.rate = self.posterior[1] + 1, -self.posterior[0]", "def getSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n ys = np.array(self.XYProjections)[:,1]\n zs = np.array(self.XZProjections)[:,1]\n\n L = xs[-1] - xs[0]\n self.L = L\n xis = (xs - xs[0]) / L\n\n errorValue = lambda x,y,A: y - np.dot(A, x)\n a_init = np.array([1] * 4)\n\n # Calculate the derivation equation on x-y plane\n # Get the optimal parameters using least squre error method\n a1 = sp.optimize.leastsq(errorValue, a_init, args=(ys, self._H(xis, L)))[0]\n self.alpha_xyPlane = a1\n \n # Derivation\n xi = sy.symbols('xi')\n self.u_xyPlane = (self._H(xi, L, ifsymbol=True) * a1).sum()\n \n # Then calculate the derivation equation on x-z plane\n a2 = sp.optimize.leastsq(errorValue, a_init, args=(zs, self._H(xis, L)))[0]\n self.alpha_xzPlane = a2\n self.u_xzPlane = (self._H(xi, L, ifsymbol=True) * a2).sum()", "def pressure_equality_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 1] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 1] = -1\n return deriv", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def xx(self):\n return self.exterior[:, 0]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def get_pos_and_orientation(self):\n pos, rot = p.getBasePositionAndOrientation(self.pybullet_id)\n euler = p.getEulerFromQuaternion(rot)\n return np.array(pos), euler[2]", "def base_orientation_quaternion(self):\n raise NotImplementedError('Not yet implemented!')", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def _get_params(self):\r\n return np.hstack((self.varianceU,self.varianceY, self.lengthscaleU,self.lengthscaleY))", "def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]", "def get_affine_reg_params(self):\n affine_params = [\n self.affine_reg_pyramid_steps,\n self.affine_reg_used_pyramid_steps,\n ]\n return affine_params", "def calc_axes(self):\n self.y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n self.x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n self.y_axis *= self.pixelsize[0]\n self.x_axis *= self.pixelsize[1]\n\n # %%RETRIEVING FUNCTIONS", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def orientation(self):\n agents = self.board[self.agent_locs_idx]\n out = (agents & CellTypes.orientation_mask) >> CellTypes.orientation_bit\n return out.astype(np.int64)", "def test_active_matrix_from_extrinsic_euler_zyz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw([0.5 * np.pi, 0, 0]),\n np.array([\n [1, 0, 0],\n [0, 0, -1],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 1, 0],\n [0, 0, -1],\n [-1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, 1, 0],\n [-1, 0, 0]\n ])\n )", "def extrinsic(self):\n return self._extrinsic", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def orientation(point_p, point_q, point_r):\n # Set https://www.geeksforgeeks.org/orientation-3-ordered-points/\n # for details of below formula.\n r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -\n (point_q.x - point_p.x) * (point_r.y - point_q.y))\n if r == 0:\n return 0\n return 1 if r > 0 else 2", "def _calculate_parameters(self, thickness: int = 10):\n\n self.thickness = thickness\n\n # set orientation dependent parameters: (different for x, y, z-PML)\n # NOTE: these methods are implemented by the subclasses of PML.\n self._set_locations()\n self._set_shape()\n self._set_sigmaE()\n self._set_sigmaH()\n\n # set the other parameters\n Nx, Ny, Nz = self.shape # is defined by _set_shape()\n self.phi_E = bd.zeros((Nx, Ny, Nz, 3))\n self.phi_H = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ex = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ey = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Ez = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hx = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hy = bd.zeros((Nx, Ny, Nz, 3))\n self.psi_Hz = bd.zeros((Nx, Ny, Nz, 3))\n\n self.bE = bd.exp(-(self.sigmaE / self.k + self.a) * self.grid.courant_number)\n self.cE = (\n (self.bE - 1.0)\n * self.sigmaE # is defined by _set_sigmaE()\n / (self.sigmaE * self.k + self.a * self.k ** 2)\n )\n\n self.bH = bd.exp(-(self.sigmaH / self.k + self.a) * self.grid.courant_number)\n self.cH = (\n (self.bH - 1.0)\n * self.sigmaH # is defined by _set_sigmaH()\n / (self.sigmaH * self.k + self.a * self.k ** 2)\n )", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def orientation(self, point):\n p_x = self.begin.x\n p_y = self.begin.y\n\n q_x = self.end.x\n q_y = self.end.y\n\n r_x = point.x\n r_y = point.y\n\n D = q_x * r_y + p_x * q_y + p_y * r_x - q_x * p_y - r_x * q_y - r_y * p_x\n\n if D > 0:\n return 1\n elif D == 0:\n return 0\n else:\n return -1", "def enthalpy_equality_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 2] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 2] = -1\n return deriv", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def get_orienationDict(self,orienation='zyx'):\n try:\n _str_func = 'rootShape_update'\n log.debug(cgmGEN.logString_start(_str_func))\n \n _d = {}\n _mOrientation = VALID.simpleOrientation('zyx')#cgmValid.simpleOrientation(str(modules.returnSettingsData('jointOrientation')) or 'zyx')\n _d['str'] = _mOrientation.p_string\n _d['mOrientation'] = _mOrientation\n _d['vectorAim'] = _mOrientation.p_aim.p_vector\n _d['vectorUp'] = _mOrientation.p_up.p_vector\n _d['vectorOut'] = _mOrientation.p_out.p_vector\n \n _d['vectorAimNeg'] = _mOrientation.p_aimNegative.p_vector\n _d['vectorUpNeg'] = _mOrientation.p_upNegative.p_vector\n _d['vectorOutNeg'] = _mOrientation.p_outNegative.p_vector\n \n \n _d['stringAim'] = _mOrientation.p_aim.p_string\n _d['stringUp'] = _mOrientation.p_up.p_string\n _d['stringOut'] = _mOrientation.p_out.p_string\n \n _d['stringAimNeg'] = _mOrientation.p_aimNegative.p_string\n _d['stringUpNeg'] = _mOrientation.p_upNegative.p_string\n _d['stringOutNeg'] = _mOrientation.p_outNegative.p_string \n return _d\n except Exception,err:\n cgmGEN.cgmExceptCB(Exception,err)", "def P(self):\n self.eigenmatrix()", "def get_orientation(self):\n return self._orientation", "def aic(self):\n return 2*self.number_of_parameters() - 2*self.ll[-1]", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def calculate_ic(self):\n # dt:\n dt = self.E\n\n # dr:\n dr = np.sqrt(self.E ** 2 - (self.Q + self.L ** 2) / self.r ** 2)\n #print(dr)\n if np.isnan(dr):\n dr = 0\n #dr = self._check_dr_sign(self.alpha)\n\n # dtheta:\n omega = self.Q - self.L ** 2 * (np.cos(self.theta) / np.sin(self.theta)) ** 2\n if omega < 0:\n omega = np.abs(omega)\n dtheta = np.sqrt(omega) / self.r**2\n if self.eta < np.pi / 2:\n dtheta *= -1\n\n # dphi:\n dphi = self.L / (self.r * np.sin(self.theta)) ** 2\n\n return dt, dr, dtheta, dphi", "def _sector_orientation(self, vertices):\n if not vertices[0] == vertices[-1]:\n vertices.append(vertices[0])\n xy = np.transpose(np.array(vertices))\n x, y = xy[0], xy[1]\n return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0, vertices", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def vorticity(self):\n \n ux,_ = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n _,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n # self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y'])\n self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y','t'])\n \n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append('1/dt')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = ('1/dt')\n\n\n return self._obj", "def test_active_matrix_from_extrinsic_roll_pitch_yaw():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz([0.5 * np.pi, 0, 0]),\n np.array([\n [0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_euler_zyz(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [-1, 0, 0],\n [0, 0, 1],\n [0, 1, 0]\n ])\n )", "def galaxy1_orbital_orientation(self):\n return self._galaxy1_orbital_orientation", "def get_iperparams(self):\n\t\treturn (self.D, self.K)", "def posterior_parameter(self):\n if self.__posterior_parameter is not None:\n return self.__posterior_parameter\n else:\n self.clean()\n self.log(\"Schur's complement\")\n r = (self.xtqx + self.parcov.inv).inv\n assert r.row_names == r.col_names\n self.__posterior_parameter = Cov(r.x,row_names=r.row_names,col_names=r.col_names)\n self.log(\"Schur's complement\")\n return self.__posterior_parameter", "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def orientation(self):\n return self._orientation", "def orientation(self):\n return self._orientation", "def getDerivativeSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n # Derivation\n xi = sy.symbols('xi')\n self.dudx_xyPlane = sy.diff(self.u_xyPlane, xi) / L\n \n # Then calculate the derivation equation on x-z plane\n self.dudx_xzPlane = sy.diff(self.u_xzPlane, xi) / L", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def calc_axes(self):\n y_axis = np.linspace(0, self.image_shape[0] - 1, self.image_shape[0])\n x_axis = np.linspace(0, self.image_shape[1] - 1, self.image_shape[1])\n if hasattr(self, 'pixelsize'):\n y_axis *= self.pixelsize[0]\n x_axis *= self.pixelsize[1]\n return x_axis, y_axis", "def _get_prior_params(self):\n # relation transformation matrix\n M_mu = np.eye(self.n_polarities, dtype=\"float32\")\n M_mu[1, :] = [0., 0.3, 0.]\n M_mu = np.tile(M_mu, (self.n_rels, 1)).reshape(\n self.n_rels, self.n_polarities, self.n_polarities\n )\n # for rel, rel_idx in iteritems(self.rel2idx):\n # # swap axes for contrastive relations\n # if check_rel(rel, CONTRASTIVE_RELS):\n # mu_i = M_mu[rel_idx]\n # mu_i[[0, 2]] = mu_i[[2, 0]]\n M_mu = torch.tensor(M_mu)\n M_sigma = torch.tensor(\n np.ones((self.n_rels, self.n_polarities, self.n_polarities),\n dtype=\"float32\")\n )\n # beta\n beta_p = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n beta_q = 5. * torch.tensor(np.ones((self.n_rels, self.n_polarities),\n dtype=\"float32\"))\n # z_epsilon\n z_epsilon_p = torch.tensor(1.)\n z_epsilon_q = torch.tensor(15.)\n # scale factor\n scale_factor = torch.tensor(34.)\n return {\"M_mu\": M_mu, \"M_sigma\": M_sigma, \"beta_p\": beta_p,\n \"beta_q\": beta_q, \"z_epsilon_p\": z_epsilon_p,\n \"z_epsilon_q\": z_epsilon_q, \"scale_factor\": scale_factor}", "def GetEigenvectors(self):\n\t\treturn self.Solver.GetEigenvectors()", "def calc_enginprops(self):\n # Let's assemble the ABD matrix even if it is not required\n ABD = np.bmat([[self.A, self.B], [self.B, self.D]])\n ABD_inv = np.linalg.inv(ABD)\n # We would use the whole matrix. This gives results similar to elamX and considers poisson effects\n A_inv = ABD_inv[0:3, 0:3]\n self.Ex = 1 / (self.total_t * A_inv[0, 0]) # It is 2 * t because we need total thickness\n self.Ey = 1 / (self.total_t * A_inv[1, 1])\n self.Gxy = 1 / (self.total_t * A_inv[2, 2])\n self.poissonxy = - A_inv[0,1] / A_inv[0, 0]\n # Flexural stiffness properties\n self.zx = 0.0\n self.zy = 0.0\n zx_dem = 0.0\n zy_dem = 0.0\n self.EIx = 0.0\n self.EIy = 0.0\n z = 0.0\n # Calculate neutral axis in direction x and y\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n z += t / 2.0\n self.zx += Ex * t * z\n zx_dem += Ex * t\n self.zy += Ey * t * z\n zy_dem += Ey * t\n z += t / 2.0\n self.zx = self.zx / zx_dem\n self.zy = self.zy / zy_dem\n # Calculate EI in direction x and y\n z = 0.0\n for S_bar, t in zip(self.S_bars, self.ts):\n Ex = 1 / S_bar[0, 0]\n Ey = 1 / S_bar[1, 1]\n Gxy = 1 / S_bar[2, 2]\n z += t / 2.0\n self.EIx += Ex * (t**3 / 12 + t * (z - self.zx)**2)\n self.EIy += Ey * (t**3 / 12 + t * (z - self.zy)**2)\n self.GA += Gxy * t\n z += t / 2.0\n return self.Ex, self.Ey, self.Gxy, self.poissonxy", "def _derY(self, x, y):\n x_pos, y_pos = self.find_sector(x, y)\n alpha, beta = self.find_coords(x, y, x_pos, y_pos)\n\n # Get four corners data for each point\n xA = self.x_values[x_pos, y_pos]\n xB = self.x_values[x_pos + 1, y_pos]\n xC = self.x_values[x_pos, y_pos + 1]\n xD = self.x_values[x_pos + 1, y_pos + 1]\n yA = self.y_values[x_pos, y_pos]\n yB = self.y_values[x_pos + 1, y_pos]\n yC = self.y_values[x_pos, y_pos + 1]\n yD = self.y_values[x_pos + 1, y_pos + 1]\n fA = self.f_values[x_pos, y_pos]\n fB = self.f_values[x_pos + 1, y_pos]\n fC = self.f_values[x_pos, y_pos + 1]\n fD = self.f_values[x_pos + 1, y_pos + 1]\n\n # Calculate components of the alpha,beta --> x,y delta translation matrix\n alpha_x = (1 - beta) * (xB - xA) + beta * (xD - xC)\n alpha_y = (1 - beta) * (yB - yA) + beta * (yD - yC)\n beta_x = (1 - alpha) * (xC - xA) + alpha * (xD - xB)\n beta_y = (1 - alpha) * (yC - yA) + alpha * (yD - yB)\n\n # Invert the delta translation matrix into x,y --> alpha,beta\n det = alpha_x * beta_y - beta_x * alpha_y\n y_alpha = -beta_x / det\n y_beta = alpha_x / det\n\n # Calculate the derivative of f w.r.t. alpha and beta\n dfda = (1 - beta) * (fB - fA) + beta * (fD - fC)\n dfdb = (1 - alpha) * (fC - fA) + alpha * (fD - fB)\n\n # Calculate the derivative with respect to x (and return it)\n dfdy = y_alpha * dfda + y_beta * dfdb\n return dfdy", "def yprojection(self):\n return self.image.sum(axis=1)", "def test_array_orientation_consistency_tilt():\n samples = 128\n p = FringeZernike(Z2=1000, samples=samples)\n ps = PSF.from_pupil(p, 1)\n idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape) # row-major y, x\n assert idx_x == ps.center_x\n assert idx_y > ps.center_y", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def reflect_base_points(base_pts, domain_size):\n domain_size = np.array(domain_size)\n if len(domain_size) == 1:\n r, theta, phi = base_pts\n new_r = 2*domain_size[0] - r\n r = np.hstack([r, new_r])\n theta = np.hstack([theta, theta])\n phi = np.hstack([phi, phi])\n base_pts = np.vstack((r, theta, phi))\n if len(domain_size) == 2:\n r, theta, z = base_pts\n new_r = 2*domain_size[0] - r\n r = np.hstack([r, new_r])\n theta = np.hstack([theta, theta])\n z = np.hstack([z, z])\n if domain_size[1] != 0: # If not a disk\n r = np.hstack([r, r, r])\n theta = np.hstack([theta, theta, theta])\n z = np.hstack([z, -z, 2*domain_size[1]-z])\n base_pts = np.vstack((r, theta, z))\n elif len(domain_size) == 3:\n Nx, Ny, Nz = domain_size\n # Reflect base points about all 6 faces\n orig_pts = base_pts\n base_pts = np.vstack((base_pts,\n [-1, 1, 1] * orig_pts + [2.0 * Nx, 0, 0]))\n base_pts = np.vstack((base_pts, [-1, 1, 1] * orig_pts))\n base_pts = np.vstack((base_pts,\n [1, -1, 1] * orig_pts + [0, 2.0 * Ny, 0]))\n base_pts = np.vstack((base_pts, [1, -1, 1] * orig_pts))\n if domain_size[2] != 0:\n base_pts = np.vstack((base_pts,\n [1, 1, -1] * orig_pts + [0, 0, 2.0 * Nz]))\n base_pts = np.vstack((base_pts, [1, 1, -1] * orig_pts))\n return base_pts", "def parameters(self):\n\n m = self.__m\n s = linalg.cholesky(self.__prod).transpose()\n w = self.__weight\n\n # Compute the parameters of the posterior distribution.\n return linalg.solve(s[:m, :m], s[:m, m:]), \\\n np.dot(s[:m, :m].transpose(), s[:m, :m]), \\\n np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \\\n w", "def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py", "def test_estimate_head_pose_hight_level_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = VLFaceDetector(DetectorType.FACE_DET_V3, faceEngine)\n\n angles0 = detector.detectOne(VLImage.load(filename=ROTATED0)).headPose\n angles90 = detector.detectOne(VLImage.load(filename=ROTATED90)).headPose\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def exterior_interior_points_eval(grid, points, solid_angle_tolerance, verbose=False):\n\n elements = grid.leaf_view.elements\n vertices = grid.leaf_view.vertices\n number_of_elements = grid.leaf_view.entity_count(0)\n elem = list(grid.leaf_view.entity_iterator(0))\n\n element_property = _np.zeros(number_of_elements, dtype=_np.int)\n element_groups = _np.zeros(shape=(4, number_of_elements), dtype=_np.int)\n element_groups[1:4, :] = elements\n for i in range(number_of_elements):\n property_number = elem[i].domain\n element_property[i] = property_number\n element_groups[0, i] = property_number\n\n element_properties = _np.array(list(set(element_property)), dtype=_np.int)\n if verbose:\n print(\"Element groups are:\")\n print(element_properties)\n\n points_interior = []\n points_exterior = []\n points_boundary = []\n index_interior = []\n index_exterior = _np.full(points.shape[1], True, dtype=bool)\n index_boundary = []\n\n for i in range(element_properties.size):\n\n elements_trunc = elements[:, element_groups[0, :] == element_properties[i]]\n num_elem = elements_trunc.shape[1]\n\n elements_x_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_y_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_z_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n # Populate grid vertices matrices\n for k in range(3):\n elements_x_coordinate[k, :] = vertices[0, elements_trunc[k, :]]\n elements_y_coordinate[k, :] = vertices[1, elements_trunc[k, :]]\n elements_z_coordinate[k, :] = vertices[2, elements_trunc[k, :]]\n # Obtain coordinates of triangular elements centroielements_surface_area\n # through barycentric method.\n elements_barycent_x_coordinate = _np.mean(elements_x_coordinate, axis=0)\n elements_barycent_y_coordinate = _np.mean(elements_y_coordinate, axis=0)\n elements_barycent_z_coordinate = _np.mean(elements_z_coordinate, axis=0)\n\n # Preallocate matrix of vectors for triangular elementses\n elements_u_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n elements_v_coordinate = _np.zeros(shape=(3, num_elem), dtype=float)\n # Compute matrix of vectors defining each triangular elements\n elements_u_coordinate = _np.array(\n [\n elements_x_coordinate[1, :] - elements_x_coordinate[0, :],\n elements_y_coordinate[1, :] - elements_y_coordinate[0, :],\n elements_z_coordinate[1, :] - elements_z_coordinate[0, :],\n ]\n )\n elements_v_coordinate = _np.array(\n [\n elements_x_coordinate[2, :] - elements_x_coordinate[0, :],\n elements_y_coordinate[2, :] - elements_y_coordinate[0, :],\n elements_z_coordinate[2, :] - elements_z_coordinate[0, :],\n ]\n )\n elements_u_cross_v = _np.cross(\n elements_u_coordinate, elements_v_coordinate, axisa=0, axisb=0, axisc=0\n )\n elements_u_cross_v_norm = _np.linalg.norm(elements_u_cross_v, axis=0)\n # Obtain outward pointing unit normal vectors for each elements\n normals = _np.divide(elements_u_cross_v, elements_u_cross_v_norm)\n # Obtain surface area of each elements\n elements_surface_area = 0.5 * elements_u_cross_v_norm\n\n start_time = _time.time()\n N_workers = _mp.cpu_count()\n parallelised_compute_solid_angle = _partial(\n compute_solid_angle,\n elements_barycent_x_coordinate,\n elements_barycent_y_coordinate,\n elements_barycent_z_coordinate,\n points,\n normals,\n elements_surface_area,\n )\n pool = _mp.Pool(N_workers)\n result = pool.starmap(\n parallelised_compute_solid_angle, zip(_np.arange(0, points.shape[1]))\n )\n pool.close()\n end_time = _time.time() - start_time\n if verbose:\n print(\"Time to complete solid angle field parallelisation: \", end_time)\n solid_angle = _np.hstack(result)\n if solid_angle_tolerance:\n index_interior_tmp = solid_angle > 0.5 + solid_angle_tolerance\n index_boundary_tmp = (solid_angle > 0.5 - solid_angle_tolerance) & (\n solid_angle < 0.5 + solid_angle_tolerance\n )\n points_boundary.append(points[:, index_boundary_tmp])\n index_boundary.append(index_boundary_tmp)\n index_exterior = index_exterior & (\n (index_interior_tmp == False) & (index_boundary_tmp == False)\n )\n else:\n index_interior_tmp = solid_angle > 0.5\n index_exterior = index_exterior & (index_interior_tmp == False)\n\n points_interior.append(points[:, index_interior_tmp])\n index_interior.append(index_interior_tmp)\n\n points_exterior = points[:, index_exterior]\n\n return (\n points_interior,\n points_exterior,\n points_boundary,\n index_interior,\n index_exterior,\n index_boundary,\n )", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def _get_geometric_augmentation_parameter(self, entry: SupervisedKeypointDBEntry) -> (float, float):\n # Not training\n if not self._is_train:\n return 1.0, 0.0\n\n # For scale\n scale = np.clip(np.random.randn(), -1.0, 1.0) * self._config.aug_scale_factor + 1.0\n\n # For rotate:\n if random.random() < self._config.aug_rot_rate and (not entry.on_boundary):\n rotate_rad = np.clip(np.random.randn(), -2.0, 2.0) * self._config.aug_rot_rad_factor\n else:\n rotate_rad = 0.0\n\n # OK\n return scale, rotate_rad", "def setup_orientation_annotation(self) :\n \n # Anatomical directions in LPS convention, numpy order\n directions_anatomical = {\n \"L\" : (0,0,+1),\n \"R\" : (0,0,-1),\n \"P\" : (0,+1,0),\n \"A\" : (0,-1,0),\n \"I\" : (-1,0,0),\n \"S\" : (+1,0,0),\n }\n \n # Index directions, numpy order\n directions_index = {\n \"+x\" : (0,0,+1),\n \"-x\" : (0,0,-1),\n \"+y\" : (0,+1,0),\n \"-y\" : (0,-1,0),\n \"+z\" : (-1,0,0),\n \"-z\" : (+1,0,0),\n }\n \n directions = (directions_anatomical \n if self.display_coordinates in [\"physical\", \"nearest_axis_aligned\"]\n else directions_index)\n \n # Window locations\n locations = {\n \"up\" : (1,0),\n \"down\" : (-1,0),\n \"left\" : (0,-1),\n \"right\" : (0,1)\n }\n \n for location, p in locations.items() :\n matrix = self._3d_world_to_slice\n direction = numpy.dot(self._3d_slice_to_world, numpy.hstack((0, p)))\n \n # Find closest in-slice direction based on dot product\n closest = None\n max_distance = -1\n for name, d in directions.items() :\n distance = numpy.dot(d, direction)\n if distance > max_distance :\n max_distance = distance\n closest = name\n \n # Set text\n index = self._orientation_annotation_index[location]\n self._orientation_annotation.SetText(index, closest)", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def test_estimate_head_pose_with_use_orientation_mode(self):\n\n faceEngine = VLFaceEngine()\n faceEngine.faceEngineProvider.faceDetV3Settings.useOrientationMode = 1\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n images = [VLImage.load(filename=ROTATED0), VLImage.load(filename=ROTATED90)]\n detections = detector.detect(images, detect68Landmarks=True)\n angles0 = TestHeadPose.headPoseEstimator.estimate(detections[0][0].landmarks68)\n angles90 = TestHeadPose.headPoseEstimator.estimate(detections[1][0].landmarks68)\n\n assert pytest.approx(angles90.pitch, abs=2) == angles0.pitch\n assert pytest.approx(angles90.roll, abs=2) == angles0.roll\n assert pytest.approx(angles90.yaw, abs=2) == angles0.yaw", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu" ]
[ "0.71254003", "0.69500554", "0.68282026", "0.61255026", "0.61024475", "0.5787343", "0.57387304", "0.5717728", "0.5698031", "0.567763", "0.5626359", "0.55450577", "0.5525408", "0.5507461", "0.54807997", "0.54278624", "0.5404268", "0.53900427", "0.5351375", "0.5299402", "0.5291824", "0.5286612", "0.5283092", "0.52719927", "0.5255926", "0.5250085", "0.5173937", "0.514478", "0.512267", "0.51066446", "0.51020575", "0.50985557", "0.5087729", "0.5079079", "0.5057393", "0.504156", "0.50404125", "0.5033221", "0.50285715", "0.50285715", "0.50285715", "0.5024489", "0.50170964", "0.50170964", "0.5001144", "0.4998715", "0.49979356", "0.499453", "0.4993955", "0.49870342", "0.49777684", "0.49722567", "0.49708712", "0.49616125", "0.49565193", "0.4948623", "0.4945738", "0.49343586", "0.49258715", "0.49244723", "0.4920674", "0.49206284", "0.4919384", "0.48953095", "0.48896152", "0.48885545", "0.4881412", "0.48752126", "0.48741966", "0.48716462", "0.48697534", "0.4851721", "0.48491317", "0.4847848", "0.48443484", "0.483214", "0.483214", "0.48273113", "0.48214126", "0.482082", "0.48175353", "0.4799199", "0.47973904", "0.47951606", "0.4794221", "0.4784934", "0.47832796", "0.47816518", "0.47783676", "0.47711885", "0.47707", "0.4769085", "0.47658572", "0.47535357", "0.4752991", "0.47518855", "0.47466585", "0.47452712", "0.47369626", "0.47312072" ]
0.6420376
3
Transforming ground points to image points
def GroundToImage(self, groundPoints): X0 = float(self.exteriorOrientationParameters[0]) Y0 = float(self.exteriorOrientationParameters[1]) Z0 = float(self.exteriorOrientationParameters[2]) xp = float(self.camera.principalPoint[0]) yp = float(self.camera.principalPoint[1]) R = self.rotationMatrix r11 = float(R[0, 0]) r12 = float(R[0, 1]) r13 = float(R[0, 2]) r21 = float(R[1, 0]) r22 = float(R[1, 1]) r23 = float(R[1, 2]) r31 = float(R[2, 0]) r32 = float(R[2, 1]) r33 = float(R[2, 2]) f = self.camera.focalLength camPoints = [] for i in range(groundPoints.shape[0]): x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) camPoints.append([x, y]) # return self.CameraToImage(np.array(camPoints)) return (np.array(camPoints))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GroundToImage_RzRyRz(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix_RzRyRz\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def ImageToGround_GivenZ(self, imagePoints, Z_values):\n cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = cameraPoints.T\n pars = self.exteriorOrientationParameters\n X0 = pars[0]\n Y0 = pars[1]\n Z0 = pars[2]\n\n T = np.array([[X0], [Y0], [Z0]])\n\n omega = pars[3]\n phi = pars[4]\n kappa = pars[5]\n R = Compute3DRotationMatrix(omega, phi, kappa)\n\n f = self.camera.focalLength\n\n # allocating memory for return array\n groundPoints = []\n\n for i in range(len(cameraPoints[1])):\n camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f)\n lam = (Z_values - Z0) / (np.dot(R[2, :], camVec))\n\n X = X0 + lam * np.dot(R[0, :], camVec)\n Y = Y0 + lam * np.dot(R[1, :], camVec)\n\n xy = [X, Y, Z_values]\n groundPoints.append(xy)\n\n groundPoints = np.array(groundPoints)\n\n return groundPoints", "def getCartesianPointsImage(self, points):\n return getCartesianPointsImage(points, self)", "def transform(self, previousimage):", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def _update_imgs_and_pt_list(self, points, edge_points, segs, index):\n # index specifies whether to use the x or y coordinate in x_pts\n x_pts=[]\n for i in range(0, len(points)):\n pt=points[i]\n #edge_points[pt[0],pt[1]] = 255\n x_pts.append(pt[index])\n #segs[pt[0],pt[1]]=150\n\n return x_pts, segs, edge_points", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def process_warp(src_img, result_img: np.zeros,\n tri_affines: np.matrix, dst_points: np.array,\n delaunay) -> None:\n roi_coords = grid_coordinates(dst_points)\n # indices to vertices. -1 if pixel is not in any triangle\n roi_tri_indices = delaunay.find_simplex(roi_coords)\n\n for simplex in enumerate(delaunay.simplices):\n coords = roi_coords[roi_tri_indices == simplex[0]]\n num_coords = len(coords)\n out_coords = np.dot(tri_affines[simplex[0]],\n np.vstack((coords.T, np.ones(num_coords))))\n x, y = coords.T\n result_img[y, x] = bilinear_interpolate(src_img, out_coords)\n\n return None", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def transform_images(img1,img2):", "def geo_transform(self):\n pass", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project(self):\n def _project(point):\n return (\n point[0]/(point[2]/Window.COP_DISTANCE+1),\n point[1]/(point[2]/Window.COP_DISTANCE+1))\n\n self._points = [list(map(_project, face)) for face in self._points]", "def get_point_coords_wrt_image(boxes_coords, point_coords):\n with torch.no_grad():\n point_coords_wrt_image = point_coords.clone()\n point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (\n boxes_coords[:, None, 2] - boxes_coords[:, None, 0]\n )\n point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (\n boxes_coords[:, None, 3] - boxes_coords[:, None, 1]\n )\n point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]\n point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]\n return point_coords_wrt_image", "def GenerateMapAffinity(img,nb_vertex,pointsInterest,objects_centroid,scale):\n\n # Apply the downscale right now, so the vectors are correct. \n img_affinity = Image.new(img.mode, (int(img.size[0]/scale),int(img.size[1]/scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))\n \n for i_pointsImage in range(len(pointsInterest)): \n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0]/scale),\n int(img.size[1]/scale),\n tuple((np.array(pointsImage[i_points])/scale).tolist()),\n tuple((np.array(center)/scale).tolist()), \n img_affinity = img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair)/2\n\n\n # Normalizing\n v = affinities[i_points].numpy() \n \n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero]/=norms[nonzero]\n yvec[nonzero]/=norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec],[yvec]]))\n affinities = torch.cat(affinities,0)\n\n return affinities", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def _convert_image_to_coordinates(self, vect) -> np.ndarray:\n xdim = vect.shape[0]\n ydim = vect.shape[1]\n\n # stride is used during averaging and length adjustment\n stride_x, stride_y = self._averaging, self._averaging\n\n # create empty vector of necessary shape\n # every \"pixel\" has 2 coordinates\n pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32)\n\n # create coordinate spacing for x-y\n # double the num of elements by doubling x sampling\n xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False)\n yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False)\n xv, yv = np.meshgrid(xspace, yspace)\n\n # assign coordinates (pos) to all pixels\n pos[:, 0] = xv.flatten()\n pos[:, 1] = yv.flatten()\n\n # pixel midpoints are the first x-values of positions\n midpt = np.zeros((xdim * ydim, 2), dtype=np.float32)\n midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2\n midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2\n\n # rotate coordinates about midpoint to represent angle and length\n pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n\n return pos", "def projectBack(points, proj):\n\n mpoints = MultiPoint(points)\n project = partial(\n pyproj.transform,\n proj,\n pyproj.Proj(proj='latlong', datum='WGS84'))\n gmpoints = transform(project, mpoints)\n coords = []\n for point in gmpoints.geoms:\n x, y = point.coords[0]\n coords.append((x, y))\n coords = np.array(coords)\n return coords", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def coordinates_to_imgpts(x, y):\n pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])\n return pts", "def problem2():\n \n pts_array, feats_array = p2.load_pts_features('data/pts_feats.npz')\n\n # points and features for image1 and image2\n pts1, pts2 = pts_array\n fts1, fts2 = feats_array\n\n # Loading images\n img1 = Image.open('data/img1.png')\n img2 = Image.open('data/img2.png')\n\n im1 = np.array(img1)\n im2 = np.array(img2)\n\n plt.figure(1)\n plt.subplot(1, 2, 1)\n plt.imshow(im1)\n plt.plot(pts1[:, 0], pts1[:, 1], 'ro', markersize=1.3)\n plt.subplot(1, 2, 2)\n plt.imshow(im2)\n plt.plot(pts2[:, 0], pts2[:, 1], 'ro', markersize=1.3)\n\n # display algined image\n H, ix1, ix2 = p2.final_homography(pts1, pts2, feats_array[0],\n feats_array[1])\n\n pts1 = pts1[ix1]\n pts2 = pts2[ix2]\n\n plt.figure(2)\n plt.subplot(1, 3, 1).set_title('Image 1')\n plt.imshow(im1)\n plt.plot(pts1[:, 0],\n pts1[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 2).set_title('Image 2')\n plt.imshow(im2)\n plt.plot(pts2[:, 0],\n pts2[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 3).set_title('Algined image 1')\n\n H_inv = np.linalg.inv(H)\n H_inv /= H_inv[2, 2]\n im3 = img1.transform(size=(im1.shape[1], im1.shape[0]),\n method=Image.PERSPECTIVE,\n data=H_inv.ravel(),\n resample=Image.BICUBIC)\n\n plt.show()", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def projectToImage(pts_3D, P):\n pts_3D = np.reshape(pts_3D, (-1, 3))\n pts_3D = np.transpose(pts_3D)\n pts_3D = np.vstack([pts_3D, 1])\n pts_2D = np.matmul(P, pts_3D)\n pts_2D = pts_2D[:2]/pts_2D[-1]\n pts_2D = np.transpose(pts_2D)\n return pts_2D", "def ImageToRay(self, imagePoints):\n pass # delete after implementations", "def direction(self):\n import pylab\n i = 0\n j = 0\n vals = []\n vects = []\n kpx = self.keypoints.x\n kpy = self.keypoints.y\n sigma = self.keypoints.sigma\n img = self.raw\n pylab.figure()\n pylab.imshow(img, interpolation='nearest')\n\n for y, x, s in zip(kpy, kpx, sigma):\n s_patch = numpy.trunc(s * 2)\n\n if s_patch % 2 == 0 :\n s_patch += 1\n\n if s_patch < 3 : s_patch = 3\n\n if (x > s_patch / 2 and x < img.shape[1] - s_patch / 2 - 1 and y > s_patch / 2 and y < img.shape[0] - s_patch / 2):\n\n patch = img[y - (s_patch - 1) / 2:y + (s_patch - 1) / 2 + 1, x - (s_patch - 1) / 2:x + (s_patch - 1) / 2 + 1]\n x_patch = numpy.arange(s_patch)\n Gx = numpy.exp(-4 * numpy.log(2) * (x_patch - numpy.median(x_patch)) ** 2 / s)\n Gy = Gx[:, numpy.newaxis]\n dGx = -Gx * 4 * numpy.log(2) / s * 2 * (x_patch - numpy.median(x_patch))\n dGy = dGx[:, numpy.newaxis]\n d2Gx = -8 * numpy.log(2) / s * ((x_patch - numpy.median(x_patch)) * dGx + Gx)\n d2Gy = d2Gx[:, numpy.newaxis]\n\n Hxx = d2Gx * Gy\n Hyy = d2Gy * Gx\n Hxy = dGx * dGy\n\n d2x = (Hxx.ravel() * patch.ravel()).sum()\n d2y = (Hyy.ravel() * patch.ravel()).sum()\n dxy = (Hxy.ravel() * patch.ravel()).sum()\n H = numpy.array([[d2y, dxy], [dxy, d2x]])\n val, vect = numpy.linalg.eig(H)\n\n# print 'new point'\n# print x, y\n# print val\n# print vect\n# print numpy.dot(vect[0],vect[1])\n e = numpy.abs(val[0] - val[1]) / numpy.abs(val[0] + val[1])\n j += 1\n# print j\n# print e\n if numpy.abs(val[1]) < numpy.abs(val[0]): # reorganisation des valeurs propres et vecteurs propres\n val[0],val[1] = val[1],val[0]\n vect = vect[-1::-1,:]\n\n\n pylab.annotate(\"\", xy=(x + vect[0][0] * val[0], y + vect[0][1] * val[0]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n\n pylab.annotate(\"\", xy=(x + vect[1][0] * val[1], y + vect[1][1] * val[1]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n pylab.plot(x, y, 'og')\n vals.append(val)\n vects.append(vect)\n return vals, vects", "def extract_poses(self, labels):\n height, width = self.topdown_view.shape\n n_gridpoints_width, n_gridpoints_height = (\n width // self.dist - 1,\n height // self.dist - 1,\n )\n self.gridpoints = []\n for h in range(n_gridpoints_height):\n for w in range(n_gridpoints_width):\n point = (self.dist + h * self.dist, self.dist + w * self.dist)\n if self.valid_point(*point):\n self.gridpoints.append(point)\n\n # Find the closest point of the target class to each gridpoint\n poses = []\n self.cpis = []\n for point in self.gridpoints:\n closest_point_of_interest, label = self._bfs(point, labels)\n if closest_point_of_interest is None:\n continue\n\n poses.append((point, closest_point_of_interest, label))\n self.cpis.append(closest_point_of_interest)\n\n # Convert from topdown map coordinate system to that of the pathfinder\n startw, starty, starth = self._get_pathfinder_reference_point()\n for i, pose in enumerate(poses):\n pos, cpi, label = pose\n r1, c1 = pos\n r2, c2 = cpi\n new_pos = np.array(\n [\n startw + c1 * self.pixels_per_meter,\n starty,\n starth + r1 * self.pixels_per_meter,\n ]\n )\n new_cpi = np.array(\n [\n startw + c2 * self.pixels_per_meter,\n starty,\n starth + r2 * self.pixels_per_meter,\n ]\n )\n cam_normal = new_cpi - new_pos\n new_rot = self._compute_quat(cam_normal)\n poses[i] = (new_pos, new_rot, label)\n\n return poses", "def generate_pointcloud(rgb_file, mask_file,depth_file,ply_file):\n rgb = Image.open(rgb_file)\n # depth = Image.open(depth_file)\n depth = Image.open(depth_file).convert('I')\n mask = Image.open(mask_file).convert('I')\n\n # if rgb.size != depth.size:\n # raise Exception(\"Color and depth image do not have the same resolution.\")\n # if rgb.mode != \"RGB\":\n # raise Exception(\"Color image is not in RGB format\")\n # if depth.mode != \"I\":\n # raise Exception(\"Depth image is not in intensity format\")\n\n\n points = [] \n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u,v))\n # Z = depth.getpixel((u,v)) / scalingFactor\n # if Z==0: continue\n # X = (u - centerX) * Z / focalLength\n # Y = (v - centerY) * Z / focalLength\n if (mask.getpixel((u,v))<55):\n Z = depth.getpixel((u, v))*.22 \n if Z == 0: continue\n Y = .22 * v\n X = .22 * u\n points.append(\"%f %f %f %d %d %d 0\\n\"%(X,Y,Z,color[0],color[1],color[2]))\n file = open(ply_file,\"w\")\n file.write('''ply\nformat ascii 1.0\nelement vertex %d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty uchar alpha\nend_header\n%s\n'''%(len(points),\"\".join(points)))\n file.close()", "def imageFromCamera(self, points): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def project_points(self, points3d, withmask=False, binary_mask=True):\n if withmask:\n return Geometry.reproject_points_to_2d(\n points3d, self.rvec, self.tvec, self.K, self.w, self.h,\n distCoef=self.distCoef, binary_mask=binary_mask)\n else:\n assert type(points3d) == np.ndarray, \"Points3d has to be a numpy array\"\n\n # Treatment for empty points\n # Add row index of None points to new list and remove the empty elements\n indexes_of_empty_points = np.unique(np.where(points3d == None)[0])\n points_to_interpolate = np.float32(points3d.copy())\n points_to_interpolate = np.delete(points_to_interpolate, indexes_of_empty_points, axis=0)\n\n # Project all the points in the camera if points_to_interpolate is not empty\n if points_to_interpolate.size != 0:\n batch_size = 1000000 \n if len(points_to_interpolate) > batch_size:\n # Create batches\n num_batches = int(len(points_to_interpolate) / batch_size)\n last_batch_length = len(points_to_interpolate) % batch_size\n\n pts2d = [] \n for i in range(num_batches):\n indices = range(i*batch_size,i*batch_size + batch_size)\n\n batch = np.take(points_to_interpolate, indices, axis=0) \n pts2d_batch, _ = cv2.projectPoints(batch, self.rvec, self.tvec, self.K, self.distCoef)\n pts2d.append(pts2d_batch)\n\n if last_batch_length > 0:\n indices = range(num_batches*batch_size,num_batches*batch_size + last_batch_length)\n batch = np.take(points_to_interpolate, indices, axis=0) \n pts2d_batch, _ = cv2.projectPoints(batch, self.rvec, self.tvec, self.K, self.distCoef)\n pts2d.append(pts2d_batch)\n\n pts2d = np.concatenate(pts2d, axis=0)\n else:\n pts2d, _ = cv2.projectPoints(points_to_interpolate,\n self.rvec,\n self.tvec,\n self.K, self.distCoef)\n else:\n pts2d = np.array([])\n\n pts2d = np.squeeze(pts2d)\n if len(pts2d.shape) == 1:\n pts2d = np.expand_dims(pts2d, axis=0)\n\n # Add empty(None) points again\n pts2d = pts2d.tolist()\n for index in indexes_of_empty_points:\n pts2d.insert(index, [None, None])\n return np.array(pts2d)", "def triangulate(left_pts, right_pts, P, P1):\n left_pts = np.array(left_pts).astype(float)\n right_pts = np.array(right_pts).astype(float)\n my_points = cv2.triangulatePoints(P,P1,left_pts.T,right_pts.T)\n\n # convert to inhomogeneous coordinates\n for i in range(my_points.shape[1]):\n my_points[0,i] /= my_points[3,i]\n my_points[1,i] /= my_points[3,i]\n my_points[2,i] /= my_points[3,i]\n my_points[3,i] /= my_points[3,i]\n\n return create_pointcloud(my_points.T)", "def inv_projmap(self, img, nside=None):\n pass", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def warping(src, dst, H, ymin, ymax, xmin, xmax, direction='b'):\r\n\r\n h_src, w_src, ch = src.shape\r\n h_dst, w_dst, ch = dst.shape\r\n H_inv = np.linalg.inv(H)\r\n\r\n # TODO: 1.meshgrid the (x,y) coordinate pairs\r\n x = np.linspace(xmin, xmax-1, xmax-xmin)\r\n y = np.linspace(ymin, ymax-1, ymax-ymin)\r\n x, y = np.meshgrid(x, y)\r\n x = x.reshape(-1).astype(int)\r\n y = y.reshape(-1).astype(int)\r\n u = np.vstack((x, y, np.ones(len(x))))\r\n\r\n # TODO: 2.reshape the destination pixels as N x 3 homogeneous coordinate\r\n\r\n if direction == 'b':\r\n # TODO: 3.apply H_inv to the destination pixels and retrieve (u,v) pixels, then reshape to (ymax-ymin),(xmax-xmin)\r\n H_inv = np.linalg.inv(H)\r\n v = H_inv @ u\r\n vx = np.round(v[0] / v[2]).astype(int)\r\n vy = np.round(v[1] / v[2]).astype(int)\r\n\r\n # TODO: 4.calculate the mask of the transformed coordinate (should not exceed the boundaries of source image)\r\n mask = (vx >= 0) & (vx < w_src) & (vy >= 0) & (vy < h_src)\r\n\r\n # TODO: 5.sample the source image with the masked and reshaped transformed coordinates\r\n x = x[mask]\r\n y = y[mask]\r\n vx = vx[mask]\r\n vy = vy[mask]\r\n\r\n # TODO: 6. assign to destination image with proper masking\r\n dst[y, x] = src[vy, vx]\r\n\r\n elif direction == 'f':\r\n # TODO: 3.apply H to the source pixels and retrieve (u,v) pixels, then reshape to (ymax-ymin),(xmax-xmin)\r\n v = H @ u\r\n vx = np.round(v[0] / v[2]).astype(int)\r\n vy = np.round(v[1] / v[2]).astype(int)\r\n\r\n # TODO: 4.calculate the mask of the transformed coordinate (should not exceed the boundaries of destination image)\r\n mask = (vx >= 0) & (vx < w_dst) & (vy >= 0) & (vy < h_dst)\r\n\r\n # TODO: 5.filter the valid coordinates using previous obtained mask\r\n x = x[mask]\r\n y = y[mask]\r\n vx = vx[mask]\r\n vy = vy[mask]\r\n\r\n # TODO: 6. assign to destination image using advanced array indicing\r\n dst[vy, vx] = src[y, x]\r\n\r\n return dst", "def projectPoints(self, points):\n return [self.projectPoint(point) for point in points]", "def translate(self, source, destination):\n\n # Based on https://www.onlinemathlearning.com/transformation-review.html\n x_diff = destination.x - source.x\n y_diff = destination.y - source.y\n return Collection(\n ColouredPoint(x=p.x+x_diff, y=p.y+y_diff, color=p.color) for p in self.points\n )", "def get_point_coords_wrt_image(boxes, point_coords):\n # with tf.variable_scope(\"get_point_coords_wrt_image\", reuse=False):\n boxes = tf.stop_gradient(boxes)\n point_coords = tf.stop_gradient(point_coords)\n h = boxes[:, None, 2] - boxes[:, None, 0]\n w = boxes[:, None, 3] - boxes[:, None, 1]\n y1 = boxes[:, None, 0]\n x1 = boxes[:, None, 1]\n scale = tf.stack([h, w], axis=-1)\n trans = tf.stack([y1, x1], axis=-1)\n point_coords = point_coords * scale\n point_coords = point_coords + trans\n return point_coords", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def pose_2d_pts(self,image):\n '''\n image- rgb image \n return:-\n pts - list of 2d pose landmarks as img coords\n image- rgb image on which the 2d pose landmarks are drawn\n ''' \n pts=[]\n imgRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n results=pose.process(imgRGB)\n if results.pose_landmarks:\n mpDraw.draw_landmarks(image,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c=image.shape\n imgx,imgy=int(lm.x*w),int(lm.y*h)\n \n pts.append((imgx,imgy)) \n return pts,image", "def spatial(self):", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def draw_points(self, pic_path, points_data):\n # Pupil Finding here\n pupils = get_eye_locations_in_image(pic_path)\n img = cv2.imread(pic_path)\n frame_number = int(re.findall(r'\\d+', pic_path.split('/')[-1])[0])\n dets = detector(img)\n shape = None\n height, width, channels = img.shape\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n if(not shape):\n return\n\n pointList = []\n c = 0\n for b in range(68):\n # sanitizing input points\n point = Point(shape.part(b).x, shape.part(b).y)\n points_data[c] = [point.x, point.y]\n c = c + 1\n # some points might be out of bound\n # so, move them to the closest boundary\n if(point.x < 0):\n point.x = 0\n elif(point.x >= width):\n point.x = width - 1\n if(point.y < 0):\n point.y = 0\n elif(point.y >= height):\n point.y = height - 1\n\n pointList.append(point)\n\n roll = findRoll(pointList)\n #print(\"roll is \" + str(roll) + ' angles')\n yaw = findYaw(pointList)\n #print(\"yaw is \" + str(yaw) + ' angles')\n pitch = findPitch(pointList)\n #print(\"pitch is \" + str(pitch) + ' angles')\n self.data[frame_number] = [roll, yaw, pitch]\n counter = 0\n for point in pointList:\n cv2.circle(img, (point.x, point.y), ImageProcessor.POINT_SIZE, ImageProcessor.POINT_COLOR, -1)\n counter = counter + 1\n\n self.draw_triangles(img, pointList)\n \n for pupil in pupils:\n cv2.circle(img, (pupil.left.x, pupil.left.y), 5, (0,0,255), -1)\n cv2.circle(img, (pupil.right.x, pupil.right.y), 5, (0,0,255), -1)\n points_data[-1] = [pupil.left.x, pupil.left.y]\n points_data[-2] = [pupil.right.x, pupil.right.y]\n #print(pupil.left.x, \", \", pupil.left.y)\n #print(pupil.right.x, \", \", pupil.right.y)\n\n cv2.imwrite(pic_path, img)", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def GenerateMapAffinity(img, nb_vertex, pointsInterest, objects_centroid, scale):\n\n # Apply the downscale right now, so the vectors are correct.\n img_affinity = Image.new(img.mode, (int(img.size[0] / scale), int(img.size[1] / scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2, int(img.size[1] / scale), int(img.size[0] / scale)))\n\n for i_pointsImage in range(len(pointsInterest)):\n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0] / scale),\n int(img.size[1] / scale),\n tuple((np.array(pointsImage[i_points]) / scale).tolist()),\n tuple((np.array(center) / scale).tolist()),\n img_affinity=img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair) / 2\n\n # Normalizing\n v = affinities[i_points].numpy()\n\n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero] /= norms[nonzero]\n yvec[nonzero] /= norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec], [yvec]]))\n affinities = torch.cat(affinities, 0)\n\n return affinities", "def projectToImage_kitti(pts_3D, P):\n # project in image\n mat = np.vstack((pts_3D, np.ones((pts_3D.shape[1]))))\n\n pts_2D = np.dot(P, mat)\n\n # scale projected points\n pts_2D[0, :] = pts_2D[0, :] / pts_2D[2, :]\n pts_2D[1, :] = pts_2D[1, :] / pts_2D[2, :]\n pts_2D = np.delete(pts_2D, 2, 0)\n\n return pts_2D", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def project_points_undist(self, points3d):\n pts2d, _ = cv2.projectPoints(points3d,\n self.rvec,\n self.tvec,\n self.K_new, 0)\n pts2d = np.squeeze(pts2d)\n if len(pts2d.shape) == 1:\n pts2d = np.expand_dims(pts2d, axis=0)\n return pts2d", "def convert_image_point_to_global_coordinates(points, camera_location):\n # TODO: The camera should take photos which record the camera_location, and scale factors etc.\n # This should be a method on such an image.\n\n # Convert to numpy object for a clean notation\n points = np.array(points)\n camera_location = np.array(camera_location)\n scale_factors = np.array([config.Y_PIXELS_TO_MILLIMETRE_SCALE, config.X_PIXELS_TO_MILLIMETRE_SCALE])\n camera_resolution = np.array(config.CAMERA_RESOLUTION)\n\n # Do the computation\n image_centre = camera_resolution / 2\n return camera_location + scale_factors * (points - image_centre)", "def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]", "def evaluate(self, points):\n points = np.array(points, np.float64)\n output_shape = points.shape[1:]\n points.shape = (points.shape[0], seq_prod(output_shape))\n cmapi = self.image.coordmap.inverse()\n voxels = cmapi(points.T).T\n V = map_coordinates(self.data,\n voxels,\n order=self.order,\n mode=self.mode,\n cval=self.cval,\n prefilter=False)\n # ndimage.map_coordinates returns a flat array,\n # it needs to be reshaped to the original shape\n V.shape = output_shape\n return V", "def warpImag(src_img: np.ndarray, dst_img: np.ndarray) -> None:\r\n\r\n dst_p = []\r\n fig1 = plt.figure()\r\n size = src_img.shape\r\n # no need to take the coordinates of the second image in order to do the homography just pick the corners\r\n # coordinates\r\n pts_src = np.array(\r\n [\r\n [0, 0],\r\n [size[1] - 1, 0],\r\n [size[1] - 1, size[0] - 1],\r\n [0, size[0] - 1]\r\n ], dtype=float\r\n )\r\n def onclick_1(event):\r\n x = event.xdata\r\n y = event.ydata\r\n print(\"Loc: {:.0f},{:.0f}\".format(x, y))\r\n\r\n plt.plot(x, y, '*r')\r\n dst_p.append([x, y])\r\n\r\n if len(dst_p) == 4:\r\n plt.close()\r\n plt.show()\r\n\r\n # display image 1\r\n cid = fig1.canvas.mpl_connect('button_press_event', onclick_1)\r\n plt.imshow(dst_img)\r\n plt.show()\r\n dst_p = np.array(dst_p)\r\n\r\n ##### Your Code Here ######\r\n h = computeHomography(pts_src, dst_p) # my function to find the homography matrix in order to do projection\r\n # to the coordinates by this equations from opencv dst(x,y) = src(m11x + m12y +m13/ m31x +m32y +m33\r\n # , m21x + m22y +m23/ m31x +m32y +m33)\r\n im_temp = warpPerspective(src_img , h, (dst_img.shape[1],dst_img.shape[0]))\r\n plt.imshow(im_temp)\r\n plt.show()\r\n im_dst2 = im_temp + dst_img\r\n plt.imshow(im_dst2.astype('uint8'))\r\n plt.show()\r\n\r\n pass", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def get_graph(self, points):\n\n gridmap = cv2.imread(self.ruta_imagen, -1)\n\n gridmap = self.four_point_transform(gridmap, points)\n\n gridmap[(gridmap >= 179) & (gridmap <= 238)] = 0\n gridmap[(gridmap >= 241) & (gridmap <= 255)] = 255\n\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n gridmap_dilatated = cv2.dilate(cv2.bitwise_not(gridmap), kernel, iterations=1)\n gridmap_dilatated = cv2.bitwise_not(gridmap_dilatated)\n\n scale_percent = 25 # percent of original size\n width = int(gridmap_dilatated.shape[1] * scale_percent / 100)\n height = int(gridmap_dilatated.shape[0] * scale_percent / 100)\n dim = (width, height)\n gridmap_resized = cv2.resize(gridmap_dilatated, dim, interpolation=cv2.INTER_NEAREST)\n\n self.gridmap2graph(gridmap_resized, width, height)\n\n return gridmap_resized, width, height", "def _image_to_point_space(pixel_coordinates: np.array, boundary_radius: int, resolution: int) -> np.array:\n pix_origin = np.array([resolution / 2, resolution / 2])\n return np.array((pixel_coordinates - pix_origin) * (2 * boundary_radius) / resolution)", "def _point_to_image_space(point: Union[Point, np.array], boundary_radius: int, resolution: int) -> np.array:\n origin = np.array([resolution / 2, resolution / 2])\n if type(point) is Point:\n point = point.numpy()\n return (point * resolution / (2 * boundary_radius) + origin).round().astype(np.uint16)", "def GeneratePointsImg(self, n, ppa):\n x = np.linspace(0,self.camera.sensorSize,n)+ppa[0]\n y = np.linspace(0,self.camera.sensorSize,n)+ppa[1]\n\n return np.meshgrid(x, y)", "def charuco_img_points(images, objpoint, board, a_dict):\r\n #Criteria for subpixel refinement\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\r\n\r\n objpoints = [] # 3d point in world space\r\n imgpoints = [] # 2d point in image plane\r\n\r\n for img in images:\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n corners, ids, rejpoints = cv2.aruco.detectMarkers(gray, a_dict)\r\n if len(corners)>0:\r\n res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)\r\n if res2[1] is not None:\r\n cv2.cornerSubPix(gray,res2[1],(3,3),(-1,1),criteria)\r\n imgpoints.append(res2[1].T[:,0,:])\r\n objpoints.append(objpoint[:,res2[2].flatten()])\r\n cv2.aruco.drawDetectedCornersCharuco(img,res2[1],res2[2])\r\n cv2.imshow(\"frame\",img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n return objpoints,imgpoints", "def generate_image(self,true_dist):\n N_POINTS = 128\n RANGE = 3\n\n points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')\n points[:, :, 0] = np.linspace(-RANGE, RANGE, N_POINTS)[:, None]\n points[:, :, 1] = np.linspace(-RANGE, RANGE, N_POINTS)[None, :]\n points = points.reshape((-1, 2))\n\n plt.clf()\n\n #true_dist = true_dist.cpu().data.numpy()\n samples = self.G(self.sample_z_)\n print('generate size is',samples.size())\n samples = samples.cpu().data.numpy()\n\n x = y = np.linspace(-RANGE, RANGE, N_POINTS)\n # plt.contour(x, y, disc_map.reshape((len(x), len(y))).transpose())\n\n plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+')\n plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+')\n plt.show()", "def converte_coord(valor):\n\n pts1 = ([0,0],[24,0],[24,44],[0,44])\n pts1 = np.asarray(pts1, dtype = np.float32)\n pts2 = np.float32([[0,0],[100,0], [100,100], [0,100]])\n\n M = cv.getPerspectiveTransform(pts1,pts2)\n img2 = cv.warpPerspective(valor,M,(100,100))\n return img2", "def rectangular_perpective_transform(image, points):\n # We first order our points so they go clockwise from the top left. Top left point must have the\n # lowest coordinate sum, while the bottom right must have the largest\n ordered_pts = np.empty((4, 2), dtype = 'float32')\n pt_sum = np.sum(points, axis = 1)\n ordered_pts[0] = points[np.argmin(pt_sum)]\n ordered_pts[2] = points[np.argmax(pt_sum)]\n\n # the top right should have smallest coordinate difference, bottom left the largest\n pt_diff = np.diff(points, axis = 1)\n ordered_pts[1] = points[np.argmin(pt_diff)]\n ordered_pts[3] = points[np.argmax(pt_diff)]\n\n # for convenience, we store the points as variables for convenience in calculating width / height\n (top_left, top_right, bottom_right, bottom_left) = ordered_pts\n\n top_width = np.linalg.norm(top_right - top_left)\n bottom_width = np.linalg.norm(bottom_right - bottom_left)\n width = int(max(top_width, bottom_width))\n\n left_height = np.linalg.norm(bottom_left - top_left)\n right_height = np.linalg.norm(bottom_right - top_right)\n height = int(max(left_height, right_height))\n\n # create destination coordinate points to give us a top-down view of the subimage enclosed by the original points\n dest_points = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype = 'float32')\n transform_matrix = cv2.getPerspectiveTransform(ordered_pts, dest_points)\n return cv2.warpPerspective(image, transform_matrix, (width, height))", "def transform_image(image, transform, mapping, alpha = 1, incr_x = 10, incr_y = 10):\r\n background = [255, 255, 255, 0]\r\n width, height = image.size\r\n image_in = np.array(image.convert(\"RGBA\"))\r\n image_out = [[background[:] for j in range(width)] for i in range(height)]\r\n transform_row = []\r\n for i in range(0, width + incr_x, incr_x):\r\n transform_row.append(transform(vec2(i, 0), mapping, alpha))\r\n for i in range(incr_y, height + incr_y, incr_y):\r\n p_ur = transform_row[0]\r\n p_lr = transform_row[0] = transform(vec2(0, i), mapping, alpha)\r\n for j in range(incr_x, width + incr_x, incr_x):\r\n p_ul = p_ur\r\n p_ll = p_lr\r\n p_ur = transform_row[j//incr_x]\r\n p_lr = transform_row[j//incr_x] = transform(vec2(j, i), mapping, alpha)\r\n a = p_ur - p_ul\r\n b = p_ll - p_ul\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n for p in triangle(p_ul, p_ur, p_ll, width, height):\r\n c = p - p_ul\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n a = p_lr - p_ll\r\n b = p_lr - p_ur\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n p_ulr = p_ur + p_ll - p_lr\r\n for p in triangle(p_ur, p_ll, p_lr, width, height):\r\n c = p - p_ulr\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n image_out = Image.fromarray(np.uint8(image_out))\r\n return image_out", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def forward_template(self, z_img):\n z_feat = self.backbone(z_img)\n if self.with_neck:\n z_feat = self.neck(z_feat)\n\n z_feat_center = []\n for i in range(len(z_feat)):\n left = (z_feat[i].size(3) - self.test_cfg.center_size) // 2\n right = left + self.test_cfg.center_size\n z_feat_center.append(z_feat[i][:, :, left:right, left:right])\n return tuple(z_feat_center)", "def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def morphPointSet(v_1, v_2, warp_frac):\n v = []\n for y in range(len(v_1)):\n v.append([(v_1[y][0] * (1 - warp_frac) + v_2[y][0] * warp_frac) , (v_1[y][1] * (1 - warp_frac) + v_2[y][1] * warp_frac)])\n return ginput_to_array(v)", "def normalise(image):", "def projective_inverse_warp_torch2(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img", "def map_overlay(img, positions, meter_per_pixel, img_origin, img_heading, color=(255, 0, 0), thickness=2):\r\n img_out = np.copy(img)\r\n new_pos = rescale_trajectory(positions, meter_per_pixel, img_origin, img_heading)\r\n for pos in new_pos:\r\n cv2.circle(img_out, (int(pos[0]), int(pos[1])), thickness, color, thickness)\r\n return img_out", "def four_point_transform(image, pts):\n rect = order_points(pts)\n width_first = np.sqrt(\n ((rect[2][0] - rect[3][0]) ** 2) + ((rect[2][1] - rect[3][1]) ** 2)\n )\n width_second = np.sqrt(\n ((rect[1][0] - rect[0][0]) ** 2) + ((rect[1][1] - rect[0][1]) ** 2)\n )\n max_width = max(int(width_first), int(width_second))\n height_first = np.sqrt(\n ((rect[1][0] - rect[2][0]) ** 2) + ((rect[1][1] - rect[2][1]) ** 2)\n )\n height_second = np.sqrt(\n ((rect[0][0] - rect[3][0]) ** 2) + ((rect[0][1] - rect[3][1]) ** 2)\n )\n max_height = max(int(height_first), int(height_second))\n dst = np.array(\n [\n [0, 0],\n [max_width - 1, 0],\n [max_width - 1, max_height - 1],\n [0, max_height - 1],\n ],\n dtype=\"float32\",\n )\n view_transform = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, view_transform, (max_width, max_height))\n return warped", "def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project_pts3_to_image(pts3, P):\n assert (len(pts3.shape) == 2)\n assert (pts3.shape[1] == 3)\n assert (P.shape == (3, 4))\n pts3 = np.hstack([pts3, np.ones((len(pts3), 1))]) # homogeneous\n pts2 = P.dot(pts3.T).T\n pts2 = pts2[:, :2] / (pts2[:, [-1]] + 1e-8)\n return pts2", "def draw_features(self, image):\n \n for x,y in self.new_points.reshape(-1,2):\n cv2.circle(image, (x,y), 2, (255,0,255), 2)\n return image", "def _binary_image_to_grid_points(binary_image_matrix):\n\n return numpy.where(binary_image_matrix)", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def project_to_image(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = points_proj[2,:]\n point_z = np.tile(point_depths, [3, 1])\n points_proj = np.divide(points_proj, point_z)\n if round_px:\n points_proj = np.round(points_proj)\n points_proj = points_proj[:2,:].astype(np.int16)\n\n valid_ind = np.where((points_proj[0,:] >= 0) & \\\n (points_proj[1,:] >= 0) & \\\n (points_proj[0,:] < self.width) & \\\n (points_proj[1,:] < self.height))[0]\n\n depth_data = np.zeros([self.height, self.width])\n depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind]\n return DepthImage(depth_data, frame=self.frame)", "def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )", "def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def firstorder(self):\n f = self.img\n x = self.x\n y = self.y\n self.x1 = sum(f*x)/sum(f)\n self.y1 = sum(f*y)/sum(f)", "def world_pos_from_img_pos(self, img_pos, img_shape, arm_pos, scale):\n centre_x = img_shape[0]/2\n centre_y = img_shape[1]/2\n #scale = 0.2*2/centre_x #m/pixel\n #print(\"centre x, y\")\n #print(centre_x)\n #print(centre_y)\n \n wld_x = arm_pos[0]\n wld_y = arm_pos[1]\n \n img_x = img_pos[0]\n img_y = img_pos[1]\n #print(\"img x, y\")\n #print(img_x)\n #print(img_y)\n \n img_dx = img_x - centre_x\n img_dy = img_y - centre_y\n #print(\"img dx, dy\")\n #print(img_dx)\n #print(img_dy)\n \n # +wld_x = -img_y ; +wld_y = -img_x\n wld_dx = -img_dy*scale\n wld_dy = -img_dx*scale\n\n #limit output\n #wld_dx = max(wld_dx, -centre_y*scale)\n #wld_dx = min(wld_dx, centre_y*scale)\n #wld_dy = max(wld_dy, -centre_x*scale)\n #wld_dy = min(wld_dy, centre_x*scale)\n \n new_wld_x = wld_x + wld_dx\n new_wld_y = wld_y + wld_dy\n \n return [new_wld_x, new_wld_y]", "def rec_transform(image, pts):\n ord_pts = order_points(pts)\n\n # find the dimension of the rectangular created by the given points", "def draw_points(in_img, points, colour=(255, 0, 0)):\n img = in_img.copy()\n\n radius = int(max(img.shape) / 100)\n\n img = convert_when_colour(colour, img)\n\n for point in points:\n img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)\n\n return img", "def _project_pointcloud(self, cloud):\n\n assert isinstance(cloud, PointCloud2)\n\n pc1 = PointCloud()\n pc1.header = cloud.header\n # hack the time! dont move the robot :-0\n pc1.header.stamp = rospy.Time.now()\n \n \n pc1.points = [Point32(*p) for p in pc2.read_points(cloud)]\n\n self._tf_listener.waitForTransform(pc1.header.frame_id,\n self._image_info.tf_frame, \n rospy.Time(0), \n rospy.Duration(4))\n\n image_frame_cloud = self._tf_listener.transformPointCloud (\n self._image_info.tf_frame, \n pc1)\n min_x, max_x, min_y, max_y = 640, 0, 480, 0 # TODO: remove hard coded image size!\n for pt in image_frame_cloud.points:\n u, v = self._image_info.project3dToPixel((pt.x, pt.y, pt.z))\n if v < min_y:\n min_y = int(v)\n if v > max_y:\n max_y = int(v)\n if u < min_x:\n min_x = int(u)\n if u > max_x:\n max_x = int(u)\n location = (((min_x, min_y), (max_x, max_y)))\n rospy.loginfo(\"Transformed cloud into image plane\")\n return location", "def detect_points(self):\r\n\r\n\t\r\n\r\n\t\tfeature_mask = np.zeros_like(self.gray) ## Create a mask so we only look for template features in the ROI\r\n\t\t\r\n\t\tfeature_mask[max(0,self.bb[1]):min(360,self.bb[1] + self.bb[3]),max(0,self.bb[0]):min(640,self.bb[0] + self.bb[2])] = 255\r\n\r\n\t\t# search for good points\r\n\t\tfeatures = cv2.goodFeaturesToTrack(self.gray, mask = feature_mask, **feature_params)\r\n\t\t# refine the corner locations\r\n\t\tcv2.cornerSubPix(self.gray,features, **subpix_params)\r\n\r\n\t\tself.features = features\r\n\r\n\t\tself.tracks = [[p] for p in features.reshape((-1,2))]\r\n\r\n\t\tself.prev_gray = self.gray", "def point_sample(img_meta,\n img_features,\n points,\n proj_mat,\n coord_type,\n img_scale_factor,\n img_crop_offset,\n img_flip,\n img_pad_shape,\n img_shape,\n aligned=True,\n padding_mode='zeros',\n align_corners=True):\n\n # apply transformation based on info in img_meta\n points = apply_3d_transformation(\n points, coord_type, img_meta, reverse=True)\n\n # project points to camera coordinate\n pts_2d = points_cam2img(points, proj_mat)\n\n # img transformation: scale -> crop -> flip\n # the image is resized by img_scale_factor\n img_coors = pts_2d[:, 0:2] * img_scale_factor # Nx2\n img_coors -= img_crop_offset\n\n # grid sample, the valid grid range should be in [-1,1]\n coor_x, coor_y = torch.split(img_coors, 1, dim=1) # each is Nx1\n\n if img_flip:\n # by default we take it as horizontal flip\n # use img_shape before padding for flip\n orig_h, orig_w = img_shape\n coor_x = orig_w - coor_x\n\n h, w = img_pad_shape\n coor_y = coor_y / h * 2 - 1\n coor_x = coor_x / w * 2 - 1\n grid = torch.cat([coor_x, coor_y],\n dim=1).unsqueeze(0).unsqueeze(0) # Nx2 -> 1x1xNx2\n\n # align_corner=True provides higher performance\n mode = 'bilinear' if aligned else 'nearest'\n point_features = F.grid_sample(\n img_features,\n grid,\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners) # 1xCx1xN feats\n\n return point_features.squeeze().t()", "def apply_projection_transform(Xb, batch_size, image_size):\n d = image_size * 0.3 * intensity\n for i in np.random.choice(batch_size, int(batch_size * p), replace = False): \n tl_top = random.uniform(-d, d) # Top left corner, top margin\n tl_left = random.uniform(-d, d) # Top left corner, left margin\n bl_bottom = random.uniform(-d, d) # Bottom left corner, bottom margin\n bl_left = random.uniform(-d, d) # Bottom left corner, left margin\n tr_top = random.uniform(-d, d) # Top right corner, top margin\n tr_right = random.uniform(-d, d) # Top right corner, right margin\n br_bottom = random.uniform(-d, d) # Bottom right corner, bottom margin\n br_right = random.uniform(-d, d) # Bottom right corner, right margin\n\n transform = ProjectiveTransform()\n transform.estimate(np.array((\n (tl_left, tl_top),\n (bl_left, image_size - bl_bottom),\n (image_size - br_right, image_size - br_bottom),\n (image_size - tr_right, tr_top)\n )), np.array((\n (0, 0),\n (0, image_size),\n (image_size, image_size),\n (image_size, 0)\n )))\n Xb[i] = warp(Xb[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')\n\n return Xb" ]
[ "0.647679", "0.6467655", "0.6214912", "0.61901563", "0.61635965", "0.60568637", "0.6051804", "0.6028781", "0.60097945", "0.5999861", "0.5963049", "0.59306324", "0.5910674", "0.5894894", "0.5861491", "0.5838941", "0.58371437", "0.5833094", "0.58130443", "0.58091205", "0.58082163", "0.58014", "0.5800616", "0.57892793", "0.5785864", "0.5767218", "0.5746882", "0.57395655", "0.5713253", "0.56932056", "0.5687942", "0.56767595", "0.5655838", "0.56449866", "0.564489", "0.562995", "0.56170034", "0.5613361", "0.56121486", "0.55980575", "0.55906326", "0.5585249", "0.5578487", "0.55744255", "0.55738556", "0.5566334", "0.5561104", "0.55505913", "0.5532022", "0.5527812", "0.5526278", "0.55245066", "0.55227864", "0.5521574", "0.5521143", "0.55129266", "0.55072165", "0.5499034", "0.5499034", "0.5478996", "0.5462744", "0.54564166", "0.5453955", "0.5447829", "0.54405534", "0.5440086", "0.54390574", "0.54367155", "0.54345685", "0.5429956", "0.54277253", "0.54257095", "0.5417456", "0.5409382", "0.5408983", "0.53995657", "0.5397905", "0.53978556", "0.5395516", "0.5394027", "0.538942", "0.5389307", "0.53874576", "0.5381563", "0.5374305", "0.5367197", "0.5364152", "0.5359386", "0.53522635", "0.53519523", "0.53508294", "0.5349109", "0.5341369", "0.53392214", "0.53322315", "0.5327712", "0.5324962", "0.53201914", "0.5319041", "0.53122246" ]
0.7286386
0
Transforming ground points to image points
def GroundToImage_RzRyRz(self, groundPoints): X0 = float(self.exteriorOrientationParameters[0]) Y0 = float(self.exteriorOrientationParameters[1]) Z0 = float(self.exteriorOrientationParameters[2]) xp = float(self.camera.principalPoint[0]) yp = float(self.camera.principalPoint[1]) R = self.rotationMatrix_RzRyRz r11 = float(R[0, 0]) r12 = float(R[0, 1]) r13 = float(R[0, 2]) r21 = float(R[1, 0]) r22 = float(R[1, 1]) r23 = float(R[1, 2]) r31 = float(R[2, 0]) r32 = float(R[2, 1]) r33 = float(R[2, 2]) f = self.camera.focalLength camPoints = [] for i in range(groundPoints.shape[0]): x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * ( groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * ( groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0)))) camPoints.append([x, y]) # return self.CameraToImage(np.array(camPoints)) return (np.array(camPoints))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GroundToImage(self, groundPoints):\n X0 = float(self.exteriorOrientationParameters[0])\n Y0 = float(self.exteriorOrientationParameters[1])\n Z0 = float(self.exteriorOrientationParameters[2])\n\n xp = float(self.camera.principalPoint[0])\n yp = float(self.camera.principalPoint[1])\n\n R = self.rotationMatrix\n r11 = float(R[0, 0])\n r12 = float(R[0, 1])\n r13 = float(R[0, 2])\n r21 = float(R[1, 0])\n r22 = float(R[1, 1])\n r23 = float(R[1, 2])\n r31 = float(R[2, 0])\n r32 = float(R[2, 1])\n r33 = float(R[2, 2])\n\n f = self.camera.focalLength\n\n camPoints = []\n\n for i in range(groundPoints.shape[0]):\n x = xp - (f) * (((r11 * (groundPoints[i, 0] - X0) + r21 * (groundPoints[i, 1] - Y0) + r31 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n y = yp - (f) * (((r12 * (groundPoints[i, 0] - X0) + r22 * (groundPoints[i, 1] - Y0) + r32 * (\n groundPoints[i, 2] - Z0)) / (r13 * (groundPoints[i, 0] - X0) + r23 * (\n groundPoints[i, 1] - Y0) + r33 * (groundPoints[i, 2] - Z0))))\n\n camPoints.append([x, y])\n\n # return self.CameraToImage(np.array(camPoints))\n return (np.array(camPoints))", "def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img", "def four_point_transform(self, image, pts):\n rect = []\n for j in range(4):\n rect.append([pts[j * 2], pts[j * 2 + 1]])\n\n rect = np.array(rect, dtype=\"float32\")\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype=\"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped", "def _point_scale2img(points, _H, _W):\n # with tf.variable_scope(\"_point_scale2img\", reuse=False):\n points = points * tf.constant([_H - 1, _W - 1], \"float32\")\n return points", "def imageFromCamera(self, points, hide_backpoints=True):\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array([-points[..., 0] * self.focallength_x_px / points[..., 2] + self.center_x_px,\n points[..., 1] * self.focallength_y_px / points[..., 2] + self.center_y_px]).T\n if hide_backpoints:\n transformed_points[points[..., 2] > 0] = np.nan\n return transformed_points", "def ImageToGround_GivenZ(self, imagePoints, Z_values):\n cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = cameraPoints.T\n pars = self.exteriorOrientationParameters\n X0 = pars[0]\n Y0 = pars[1]\n Z0 = pars[2]\n\n T = np.array([[X0], [Y0], [Z0]])\n\n omega = pars[3]\n phi = pars[4]\n kappa = pars[5]\n R = Compute3DRotationMatrix(omega, phi, kappa)\n\n f = self.camera.focalLength\n\n # allocating memory for return array\n groundPoints = []\n\n for i in range(len(cameraPoints[1])):\n camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f)\n lam = (Z_values - Z0) / (np.dot(R[2, :], camVec))\n\n X = X0 + lam * np.dot(R[0, :], camVec)\n Y = Y0 + lam * np.dot(R[1, :], camVec)\n\n xy = [X, Y, Z_values]\n groundPoints.append(xy)\n\n groundPoints = np.array(groundPoints)\n\n return groundPoints", "def getCartesianPointsImage(self, points):\n return getCartesianPointsImage(points, self)", "def transform(self, previousimage):", "def fourPointTransform(image, points):\r\n\r\n topLeft, topRight, bottomLeft, bottomRight = sortFourPoints(points)\r\n\r\n # Determine the maximum width\r\n topWidth = np.sqrt(((topRight[0] - topLeft[0]) ** 2) + ((topRight[1] - topLeft[1]) ** 2))\r\n bottomWidth = np.sqrt(((bottomRight[0] - bottomLeft[0]) ** 2) + ((bottomRight[1] - bottomLeft[1]) ** 2))\r\n width = max(int(topWidth), int(bottomWidth))\r\n\r\n # Determine the maximum height\r\n leftHeight = np.sqrt(((topLeft[0] - bottomLeft[0]) ** 2) + ((topLeft[1] - bottomLeft[1]) ** 2))\r\n rightHeight = np.sqrt(((topRight[0] - bottomRight[0]) ** 2) + ((topRight[1] - bottomRight[1]) ** 2))\r\n height = max(int(leftHeight), int(rightHeight))\r\n\r\n source = np.array([topLeft, topRight, bottomRight, bottomLeft], dtype=\"float32\")\r\n\r\n destination = np.array([[0, 0],\r\n [width - 1, 0],\r\n [width - 1, height - 1],\r\n [0, height - 1]], dtype=\"float32\")\r\n\r\n transformMatrix = cv2.getPerspectiveTransform(source, destination)\r\n\r\n return cv2.warpPerspective(image, transformMatrix, (width, height))", "def _update_imgs_and_pt_list(self, points, edge_points, segs, index):\n # index specifies whether to use the x or y coordinate in x_pts\n x_pts=[]\n for i in range(0, len(points)):\n pt=points[i]\n #edge_points[pt[0],pt[1]] = 255\n x_pts.append(pt[index])\n #segs[pt[0],pt[1]]=150\n\n return x_pts, segs, edge_points", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def transform(self,points):\n new_points = []\n for p in points:\n new_coordinates=p.coordinates\n new_coordinates = [(new_coordinates[i] - self.min_coordinate[i]) /\n (self.max_coordinate[i]-self.min_coordinate[i]) for i in range(len(p.coordinates))]\n new_points.append(Point(p.name, new_coordinates, p.label))\n return new_points", "def project_points_img(points, proj_mat, width, height):\n pixels = proj_mat.dot(points)\n pixels = np.divide(pixels[:2, :], pixels[2, :]).transpose().astype(np.int)\n\n # Remove pixels that are outside the image\n pixels[:, 0] = np.clip(pixels[:, 0], 0, width)\n pixels[:, 1] = np.clip(pixels[:, 1], 0, height)\n # mask_x = (pixels[:, 0] < width) & (pixels[:, 0] > 0)\n # mask_y = (pixels[:, 1] < height) & (pixels[:, 1] > 0)\n\n # # Return the pixels and points that are inside the image\n # pixels = pixels[mask_x & mask_y]\n return pixels", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def create_point_cloud(self):\n pixels = []\n colors = []\n my_pixels = []\n for j in range(self.height):\n for i in range(self.width):\n depth = self.depth[j, i]\n pixels.append(\n [i * depth, j * depth, depth]\n )\n my_pixels.append(\n [i, j, 1]\n )\n # make rgb with flip()\n colors.append(np.flip(self.bgr[j, i, :]))\n # colors.append(self.bgr[j, i, :])\n self.my_pixels = my_pixels\n pixels = np.array(pixels)\n\n # project pixels to camera space\n self.xyz_points = self.intrinsics_inv @ np.transpose(pixels)\n self.color_points = colors\n\n # now add 1s to the points for homogenous coordinates\n num_points = self.get_num_xyz_points()\n ones = np.ones((1, num_points))\n self.xyzw_points = np.concatenate((self.xyz_points, ones), axis=0)\n\n self.scene = None\n self.camera_pose = None\n self.nm = None\n self.nl = None\n self.nc = None\n self.create_mesh()", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def process_warp(src_img, result_img: np.zeros,\n tri_affines: np.matrix, dst_points: np.array,\n delaunay) -> None:\n roi_coords = grid_coordinates(dst_points)\n # indices to vertices. -1 if pixel is not in any triangle\n roi_tri_indices = delaunay.find_simplex(roi_coords)\n\n for simplex in enumerate(delaunay.simplices):\n coords = roi_coords[roi_tri_indices == simplex[0]]\n num_coords = len(coords)\n out_coords = np.dot(tri_affines[simplex[0]],\n np.vstack((coords.T, np.ones(num_coords))))\n x, y = coords.T\n result_img[y, x] = bilinear_interpolate(src_img, out_coords)\n\n return None", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def transform_images(img1,img2):", "def geo_transform(self):\n pass", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * np.arctan2(points[..., 1], np.sqrt(\n points[..., 0] ** 2 + points[..., 2] ** 2)) + self.center_y_px]).T\n\n # return the points\n return transformed_points", "def project(self):\n def _project(point):\n return (\n point[0]/(point[2]/Window.COP_DISTANCE+1),\n point[1]/(point[2]/Window.COP_DISTANCE+1))\n\n self._points = [list(map(_project, face)) for face in self._points]", "def get_point_coords_wrt_image(boxes_coords, point_coords):\n with torch.no_grad():\n point_coords_wrt_image = point_coords.clone()\n point_coords_wrt_image[:, :, 0] = point_coords_wrt_image[:, :, 0] * (\n boxes_coords[:, None, 2] - boxes_coords[:, None, 0]\n )\n point_coords_wrt_image[:, :, 1] = point_coords_wrt_image[:, :, 1] * (\n boxes_coords[:, None, 3] - boxes_coords[:, None, 1]\n )\n point_coords_wrt_image[:, :, 0] += boxes_coords[:, None, 0]\n point_coords_wrt_image[:, :, 1] += boxes_coords[:, None, 1]\n return point_coords_wrt_image", "def GenerateMapAffinity(img,nb_vertex,pointsInterest,objects_centroid,scale):\n\n # Apply the downscale right now, so the vectors are correct. \n img_affinity = Image.new(img.mode, (int(img.size[0]/scale),int(img.size[1]/scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))\n \n for i_pointsImage in range(len(pointsInterest)): \n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0]/scale),\n int(img.size[1]/scale),\n tuple((np.array(pointsImage[i_points])/scale).tolist()),\n tuple((np.array(center)/scale).tolist()), \n img_affinity = img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair)/2\n\n\n # Normalizing\n v = affinities[i_points].numpy() \n \n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero]/=norms[nonzero]\n yvec[nonzero]/=norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec],[yvec]]))\n affinities = torch.cat(affinities,0)\n\n return affinities", "def four_point_transform(image, pts):\n\n\tmax_x, max_y = np.max(pts[:, 0]).astype(np.int32), np.max(pts[:, 1]).astype(np.int32)\n\n\tdst = np.array([\n\t\t[0, 0],\n\t\t[image.shape[1] - 1, 0],\n\t\t[image.shape[1] - 1, image.shape[0] - 1],\n\t\t[0, image.shape[0] - 1]], dtype=\"float32\")\n\n\twarped = cv2.warpPerspective(image, cv2.getPerspectiveTransform(dst, pts), (max_x, max_y))\n\n\treturn warped", "def imageFromCamera(self, points, hide_backpoints=True):\n # ensure that the points are provided as an array\n points = np.array(points)\n # set small z distances to 0\n points[np.abs(points[..., 2]) < 1e-10] = 0\n # transform the points\n with np.errstate(divide='ignore', invalid='ignore'):\n transformed_points = np.array(\n [-self.focallength_x_px * np.arctan2(-points[..., 0], -points[..., 2]) + self.center_x_px,\n -self.focallength_y_px * points[..., 1] / np.linalg.norm(points[..., [0, 2]],\n axis=-1) + self.center_y_px]).T\n # ensure that points' x values are also nan when the y values are nan\n transformed_points[np.isnan(transformed_points[..., 1])] = np.nan\n # return the points\n return transformed_points", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def _convert_image_to_coordinates(self, vect) -> np.ndarray:\n xdim = vect.shape[0]\n ydim = vect.shape[1]\n\n # stride is used during averaging and length adjustment\n stride_x, stride_y = self._averaging, self._averaging\n\n # create empty vector of necessary shape\n # every \"pixel\" has 2 coordinates\n pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32)\n\n # create coordinate spacing for x-y\n # double the num of elements by doubling x sampling\n xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False)\n yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False)\n xv, yv = np.meshgrid(xspace, yspace)\n\n # assign coordinates (pos) to all pixels\n pos[:, 0] = xv.flatten()\n pos[:, 1] = yv.flatten()\n\n # pixel midpoints are the first x-values of positions\n midpt = np.zeros((xdim * ydim, 2), dtype=np.float32)\n midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2\n midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2\n\n # rotate coordinates about midpoint to represent angle and length\n pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 0]\n pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \\\n vect.reshape((xdim*ydim, 2))[:, 1]\n\n return pos", "def projectBack(points, proj):\n\n mpoints = MultiPoint(points)\n project = partial(\n pyproj.transform,\n proj,\n pyproj.Proj(proj='latlong', datum='WGS84'))\n gmpoints = transform(project, mpoints)\n coords = []\n for point in gmpoints.geoms:\n x, y = point.coords[0]\n coords.append((x, y))\n coords = np.array(coords)\n return coords", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def coordinates_to_imgpts(x, y):\n pts = np.array([np.flipud(np.transpose(np.vstack([x, y])))])\n return pts", "def problem2():\n \n pts_array, feats_array = p2.load_pts_features('data/pts_feats.npz')\n\n # points and features for image1 and image2\n pts1, pts2 = pts_array\n fts1, fts2 = feats_array\n\n # Loading images\n img1 = Image.open('data/img1.png')\n img2 = Image.open('data/img2.png')\n\n im1 = np.array(img1)\n im2 = np.array(img2)\n\n plt.figure(1)\n plt.subplot(1, 2, 1)\n plt.imshow(im1)\n plt.plot(pts1[:, 0], pts1[:, 1], 'ro', markersize=1.3)\n plt.subplot(1, 2, 2)\n plt.imshow(im2)\n plt.plot(pts2[:, 0], pts2[:, 1], 'ro', markersize=1.3)\n\n # display algined image\n H, ix1, ix2 = p2.final_homography(pts1, pts2, feats_array[0],\n feats_array[1])\n\n pts1 = pts1[ix1]\n pts2 = pts2[ix2]\n\n plt.figure(2)\n plt.subplot(1, 3, 1).set_title('Image 1')\n plt.imshow(im1)\n plt.plot(pts1[:, 0],\n pts1[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 2).set_title('Image 2')\n plt.imshow(im2)\n plt.plot(pts2[:, 0],\n pts2[:, 1],\n 'ro',\n markersize=2.3,\n markerfacecolor='none')\n plt.subplot(1, 3, 3).set_title('Algined image 1')\n\n H_inv = np.linalg.inv(H)\n H_inv /= H_inv[2, 2]\n im3 = img1.transform(size=(im1.shape[1], im1.shape[0]),\n method=Image.PERSPECTIVE,\n data=H_inv.ravel(),\n resample=Image.BICUBIC)\n\n plt.show()", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def projectToImage(pts_3D, P):\n pts_3D = np.reshape(pts_3D, (-1, 3))\n pts_3D = np.transpose(pts_3D)\n pts_3D = np.vstack([pts_3D, 1])\n pts_2D = np.matmul(P, pts_3D)\n pts_2D = pts_2D[:2]/pts_2D[-1]\n pts_2D = np.transpose(pts_2D)\n return pts_2D", "def ImageToRay(self, imagePoints):\n pass # delete after implementations", "def direction(self):\n import pylab\n i = 0\n j = 0\n vals = []\n vects = []\n kpx = self.keypoints.x\n kpy = self.keypoints.y\n sigma = self.keypoints.sigma\n img = self.raw\n pylab.figure()\n pylab.imshow(img, interpolation='nearest')\n\n for y, x, s in zip(kpy, kpx, sigma):\n s_patch = numpy.trunc(s * 2)\n\n if s_patch % 2 == 0 :\n s_patch += 1\n\n if s_patch < 3 : s_patch = 3\n\n if (x > s_patch / 2 and x < img.shape[1] - s_patch / 2 - 1 and y > s_patch / 2 and y < img.shape[0] - s_patch / 2):\n\n patch = img[y - (s_patch - 1) / 2:y + (s_patch - 1) / 2 + 1, x - (s_patch - 1) / 2:x + (s_patch - 1) / 2 + 1]\n x_patch = numpy.arange(s_patch)\n Gx = numpy.exp(-4 * numpy.log(2) * (x_patch - numpy.median(x_patch)) ** 2 / s)\n Gy = Gx[:, numpy.newaxis]\n dGx = -Gx * 4 * numpy.log(2) / s * 2 * (x_patch - numpy.median(x_patch))\n dGy = dGx[:, numpy.newaxis]\n d2Gx = -8 * numpy.log(2) / s * ((x_patch - numpy.median(x_patch)) * dGx + Gx)\n d2Gy = d2Gx[:, numpy.newaxis]\n\n Hxx = d2Gx * Gy\n Hyy = d2Gy * Gx\n Hxy = dGx * dGy\n\n d2x = (Hxx.ravel() * patch.ravel()).sum()\n d2y = (Hyy.ravel() * patch.ravel()).sum()\n dxy = (Hxy.ravel() * patch.ravel()).sum()\n H = numpy.array([[d2y, dxy], [dxy, d2x]])\n val, vect = numpy.linalg.eig(H)\n\n# print 'new point'\n# print x, y\n# print val\n# print vect\n# print numpy.dot(vect[0],vect[1])\n e = numpy.abs(val[0] - val[1]) / numpy.abs(val[0] + val[1])\n j += 1\n# print j\n# print e\n if numpy.abs(val[1]) < numpy.abs(val[0]): # reorganisation des valeurs propres et vecteurs propres\n val[0],val[1] = val[1],val[0]\n vect = vect[-1::-1,:]\n\n\n pylab.annotate(\"\", xy=(x + vect[0][0] * val[0], y + vect[0][1] * val[0]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n\n pylab.annotate(\"\", xy=(x + vect[1][0] * val[1], y + vect[1][1] * val[1]), xytext=(x, y),\n arrowprops=dict(facecolor='red', shrink=0.05),)\n pylab.plot(x, y, 'og')\n vals.append(val)\n vects.append(vect)\n return vals, vects", "def extract_poses(self, labels):\n height, width = self.topdown_view.shape\n n_gridpoints_width, n_gridpoints_height = (\n width // self.dist - 1,\n height // self.dist - 1,\n )\n self.gridpoints = []\n for h in range(n_gridpoints_height):\n for w in range(n_gridpoints_width):\n point = (self.dist + h * self.dist, self.dist + w * self.dist)\n if self.valid_point(*point):\n self.gridpoints.append(point)\n\n # Find the closest point of the target class to each gridpoint\n poses = []\n self.cpis = []\n for point in self.gridpoints:\n closest_point_of_interest, label = self._bfs(point, labels)\n if closest_point_of_interest is None:\n continue\n\n poses.append((point, closest_point_of_interest, label))\n self.cpis.append(closest_point_of_interest)\n\n # Convert from topdown map coordinate system to that of the pathfinder\n startw, starty, starth = self._get_pathfinder_reference_point()\n for i, pose in enumerate(poses):\n pos, cpi, label = pose\n r1, c1 = pos\n r2, c2 = cpi\n new_pos = np.array(\n [\n startw + c1 * self.pixels_per_meter,\n starty,\n starth + r1 * self.pixels_per_meter,\n ]\n )\n new_cpi = np.array(\n [\n startw + c2 * self.pixels_per_meter,\n starty,\n starth + r2 * self.pixels_per_meter,\n ]\n )\n cam_normal = new_cpi - new_pos\n new_rot = self._compute_quat(cam_normal)\n poses[i] = (new_pos, new_rot, label)\n\n return poses", "def generate_pointcloud(rgb_file, mask_file,depth_file,ply_file):\n rgb = Image.open(rgb_file)\n # depth = Image.open(depth_file)\n depth = Image.open(depth_file).convert('I')\n mask = Image.open(mask_file).convert('I')\n\n # if rgb.size != depth.size:\n # raise Exception(\"Color and depth image do not have the same resolution.\")\n # if rgb.mode != \"RGB\":\n # raise Exception(\"Color image is not in RGB format\")\n # if depth.mode != \"I\":\n # raise Exception(\"Depth image is not in intensity format\")\n\n\n points = [] \n for v in range(rgb.size[1]):\n for u in range(rgb.size[0]):\n color = rgb.getpixel((u,v))\n # Z = depth.getpixel((u,v)) / scalingFactor\n # if Z==0: continue\n # X = (u - centerX) * Z / focalLength\n # Y = (v - centerY) * Z / focalLength\n if (mask.getpixel((u,v))<55):\n Z = depth.getpixel((u, v))*.22 \n if Z == 0: continue\n Y = .22 * v\n X = .22 * u\n points.append(\"%f %f %f %d %d %d 0\\n\"%(X,Y,Z,color[0],color[1],color[2]))\n file = open(ply_file,\"w\")\n file.write('''ply\nformat ascii 1.0\nelement vertex %d\nproperty float x\nproperty float y\nproperty float z\nproperty uchar red\nproperty uchar green\nproperty uchar blue\nproperty uchar alpha\nend_header\n%s\n'''%(len(points),\"\".join(points)))\n file.close()", "def imageFromCamera(self, points): # pragma: no cover\n # to be overloaded by the child class.\n return None", "def project_points(self, points3d, withmask=False, binary_mask=True):\n if withmask:\n return Geometry.reproject_points_to_2d(\n points3d, self.rvec, self.tvec, self.K, self.w, self.h,\n distCoef=self.distCoef, binary_mask=binary_mask)\n else:\n assert type(points3d) == np.ndarray, \"Points3d has to be a numpy array\"\n\n # Treatment for empty points\n # Add row index of None points to new list and remove the empty elements\n indexes_of_empty_points = np.unique(np.where(points3d == None)[0])\n points_to_interpolate = np.float32(points3d.copy())\n points_to_interpolate = np.delete(points_to_interpolate, indexes_of_empty_points, axis=0)\n\n # Project all the points in the camera if points_to_interpolate is not empty\n if points_to_interpolate.size != 0:\n batch_size = 1000000 \n if len(points_to_interpolate) > batch_size:\n # Create batches\n num_batches = int(len(points_to_interpolate) / batch_size)\n last_batch_length = len(points_to_interpolate) % batch_size\n\n pts2d = [] \n for i in range(num_batches):\n indices = range(i*batch_size,i*batch_size + batch_size)\n\n batch = np.take(points_to_interpolate, indices, axis=0) \n pts2d_batch, _ = cv2.projectPoints(batch, self.rvec, self.tvec, self.K, self.distCoef)\n pts2d.append(pts2d_batch)\n\n if last_batch_length > 0:\n indices = range(num_batches*batch_size,num_batches*batch_size + last_batch_length)\n batch = np.take(points_to_interpolate, indices, axis=0) \n pts2d_batch, _ = cv2.projectPoints(batch, self.rvec, self.tvec, self.K, self.distCoef)\n pts2d.append(pts2d_batch)\n\n pts2d = np.concatenate(pts2d, axis=0)\n else:\n pts2d, _ = cv2.projectPoints(points_to_interpolate,\n self.rvec,\n self.tvec,\n self.K, self.distCoef)\n else:\n pts2d = np.array([])\n\n pts2d = np.squeeze(pts2d)\n if len(pts2d.shape) == 1:\n pts2d = np.expand_dims(pts2d, axis=0)\n\n # Add empty(None) points again\n pts2d = pts2d.tolist()\n for index in indexes_of_empty_points:\n pts2d.insert(index, [None, None])\n return np.array(pts2d)", "def triangulate(left_pts, right_pts, P, P1):\n left_pts = np.array(left_pts).astype(float)\n right_pts = np.array(right_pts).astype(float)\n my_points = cv2.triangulatePoints(P,P1,left_pts.T,right_pts.T)\n\n # convert to inhomogeneous coordinates\n for i in range(my_points.shape[1]):\n my_points[0,i] /= my_points[3,i]\n my_points[1,i] /= my_points[3,i]\n my_points[2,i] /= my_points[3,i]\n my_points[3,i] /= my_points[3,i]\n\n return create_pointcloud(my_points.T)", "def inv_projmap(self, img, nside=None):\n pass", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def warping(src, dst, H, ymin, ymax, xmin, xmax, direction='b'):\r\n\r\n h_src, w_src, ch = src.shape\r\n h_dst, w_dst, ch = dst.shape\r\n H_inv = np.linalg.inv(H)\r\n\r\n # TODO: 1.meshgrid the (x,y) coordinate pairs\r\n x = np.linspace(xmin, xmax-1, xmax-xmin)\r\n y = np.linspace(ymin, ymax-1, ymax-ymin)\r\n x, y = np.meshgrid(x, y)\r\n x = x.reshape(-1).astype(int)\r\n y = y.reshape(-1).astype(int)\r\n u = np.vstack((x, y, np.ones(len(x))))\r\n\r\n # TODO: 2.reshape the destination pixels as N x 3 homogeneous coordinate\r\n\r\n if direction == 'b':\r\n # TODO: 3.apply H_inv to the destination pixels and retrieve (u,v) pixels, then reshape to (ymax-ymin),(xmax-xmin)\r\n H_inv = np.linalg.inv(H)\r\n v = H_inv @ u\r\n vx = np.round(v[0] / v[2]).astype(int)\r\n vy = np.round(v[1] / v[2]).astype(int)\r\n\r\n # TODO: 4.calculate the mask of the transformed coordinate (should not exceed the boundaries of source image)\r\n mask = (vx >= 0) & (vx < w_src) & (vy >= 0) & (vy < h_src)\r\n\r\n # TODO: 5.sample the source image with the masked and reshaped transformed coordinates\r\n x = x[mask]\r\n y = y[mask]\r\n vx = vx[mask]\r\n vy = vy[mask]\r\n\r\n # TODO: 6. assign to destination image with proper masking\r\n dst[y, x] = src[vy, vx]\r\n\r\n elif direction == 'f':\r\n # TODO: 3.apply H to the source pixels and retrieve (u,v) pixels, then reshape to (ymax-ymin),(xmax-xmin)\r\n v = H @ u\r\n vx = np.round(v[0] / v[2]).astype(int)\r\n vy = np.round(v[1] / v[2]).astype(int)\r\n\r\n # TODO: 4.calculate the mask of the transformed coordinate (should not exceed the boundaries of destination image)\r\n mask = (vx >= 0) & (vx < w_dst) & (vy >= 0) & (vy < h_dst)\r\n\r\n # TODO: 5.filter the valid coordinates using previous obtained mask\r\n x = x[mask]\r\n y = y[mask]\r\n vx = vx[mask]\r\n vy = vy[mask]\r\n\r\n # TODO: 6. assign to destination image using advanced array indicing\r\n dst[vy, vx] = src[y, x]\r\n\r\n return dst", "def projectPoints(self, points):\n return [self.projectPoint(point) for point in points]", "def translate(self, source, destination):\n\n # Based on https://www.onlinemathlearning.com/transformation-review.html\n x_diff = destination.x - source.x\n y_diff = destination.y - source.y\n return Collection(\n ColouredPoint(x=p.x+x_diff, y=p.y+y_diff, color=p.color) for p in self.points\n )", "def get_point_coords_wrt_image(boxes, point_coords):\n # with tf.variable_scope(\"get_point_coords_wrt_image\", reuse=False):\n boxes = tf.stop_gradient(boxes)\n point_coords = tf.stop_gradient(point_coords)\n h = boxes[:, None, 2] - boxes[:, None, 0]\n w = boxes[:, None, 3] - boxes[:, None, 1]\n y1 = boxes[:, None, 0]\n x1 = boxes[:, None, 1]\n scale = tf.stack([h, w], axis=-1)\n trans = tf.stack([y1, x1], axis=-1)\n point_coords = point_coords * scale\n point_coords = point_coords + trans\n return point_coords", "def apply_transformation(self, points):\n assert (points.shape[0] == 3)\n n = points.shape[1]\n points_ = np.vstack((points, np.ones((1, n))))\n points_trans_ = np.matmul(self.pose_mat, points_)\n points_transformed = np.true_divide(points_trans_[:3, :], points_trans_[[-1], :])\n return points_transformed", "def pose_2d_pts(self,image):\n '''\n image- rgb image \n return:-\n pts - list of 2d pose landmarks as img coords\n image- rgb image on which the 2d pose landmarks are drawn\n ''' \n pts=[]\n imgRGB=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)\n results=pose.process(imgRGB)\n if results.pose_landmarks:\n mpDraw.draw_landmarks(image,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\n for id,lm in enumerate(results.pose_landmarks.landmark):\n h,w,c=image.shape\n imgx,imgy=int(lm.x*w),int(lm.y*h)\n \n pts.append((imgx,imgy)) \n return pts,image", "def spatial(self):", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def compute_positions(self):\n return (self.x + DIRECTIONS[self.facing_direction][0]) % (self.image.shape[0] - 1), \\\n (self.y + DIRECTIONS[self.facing_direction][1]) % (self.image.shape[1] - 1)", "def draw_points(self, pic_path, points_data):\n # Pupil Finding here\n pupils = get_eye_locations_in_image(pic_path)\n img = cv2.imread(pic_path)\n frame_number = int(re.findall(r'\\d+', pic_path.split('/')[-1])[0])\n dets = detector(img)\n shape = None\n height, width, channels = img.shape\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n if(not shape):\n return\n\n pointList = []\n c = 0\n for b in range(68):\n # sanitizing input points\n point = Point(shape.part(b).x, shape.part(b).y)\n points_data[c] = [point.x, point.y]\n c = c + 1\n # some points might be out of bound\n # so, move them to the closest boundary\n if(point.x < 0):\n point.x = 0\n elif(point.x >= width):\n point.x = width - 1\n if(point.y < 0):\n point.y = 0\n elif(point.y >= height):\n point.y = height - 1\n\n pointList.append(point)\n\n roll = findRoll(pointList)\n #print(\"roll is \" + str(roll) + ' angles')\n yaw = findYaw(pointList)\n #print(\"yaw is \" + str(yaw) + ' angles')\n pitch = findPitch(pointList)\n #print(\"pitch is \" + str(pitch) + ' angles')\n self.data[frame_number] = [roll, yaw, pitch]\n counter = 0\n for point in pointList:\n cv2.circle(img, (point.x, point.y), ImageProcessor.POINT_SIZE, ImageProcessor.POINT_COLOR, -1)\n counter = counter + 1\n\n self.draw_triangles(img, pointList)\n \n for pupil in pupils:\n cv2.circle(img, (pupil.left.x, pupil.left.y), 5, (0,0,255), -1)\n cv2.circle(img, (pupil.right.x, pupil.right.y), 5, (0,0,255), -1)\n points_data[-1] = [pupil.left.x, pupil.left.y]\n points_data[-2] = [pupil.right.x, pupil.right.y]\n #print(pupil.left.x, \", \", pupil.left.y)\n #print(pupil.right.x, \", \", pupil.right.y)\n\n cv2.imwrite(pic_path, img)", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def GenerateMapAffinity(img, nb_vertex, pointsInterest, objects_centroid, scale):\n\n # Apply the downscale right now, so the vectors are correct.\n img_affinity = Image.new(img.mode, (int(img.size[0] / scale), int(img.size[1] / scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2, int(img.size[1] / scale), int(img.size[0] / scale)))\n\n for i_pointsImage in range(len(pointsInterest)):\n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0] / scale),\n int(img.size[1] / scale),\n tuple((np.array(pointsImage[i_points]) / scale).tolist()),\n tuple((np.array(center) / scale).tolist()),\n img_affinity=img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair) / 2\n\n # Normalizing\n v = affinities[i_points].numpy()\n\n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero] /= norms[nonzero]\n yvec[nonzero] /= norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec], [yvec]]))\n affinities = torch.cat(affinities, 0)\n\n return affinities", "def projectToImage_kitti(pts_3D, P):\n # project in image\n mat = np.vstack((pts_3D, np.ones((pts_3D.shape[1]))))\n\n pts_2D = np.dot(P, mat)\n\n # scale projected points\n pts_2D[0, :] = pts_2D[0, :] / pts_2D[2, :]\n pts_2D[1, :] = pts_2D[1, :] / pts_2D[2, :]\n pts_2D = np.delete(pts_2D, 2, 0)\n\n return pts_2D", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def project_points_undist(self, points3d):\n pts2d, _ = cv2.projectPoints(points3d,\n self.rvec,\n self.tvec,\n self.K_new, 0)\n pts2d = np.squeeze(pts2d)\n if len(pts2d.shape) == 1:\n pts2d = np.expand_dims(pts2d, axis=0)\n return pts2d", "def convert_image_point_to_global_coordinates(points, camera_location):\n # TODO: The camera should take photos which record the camera_location, and scale factors etc.\n # This should be a method on such an image.\n\n # Convert to numpy object for a clean notation\n points = np.array(points)\n camera_location = np.array(camera_location)\n scale_factors = np.array([config.Y_PIXELS_TO_MILLIMETRE_SCALE, config.X_PIXELS_TO_MILLIMETRE_SCALE])\n camera_resolution = np.array(config.CAMERA_RESOLUTION)\n\n # Do the computation\n image_centre = camera_resolution / 2\n return camera_location + scale_factors * (points - image_centre)", "def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]", "def evaluate(self, points):\n points = np.array(points, np.float64)\n output_shape = points.shape[1:]\n points.shape = (points.shape[0], seq_prod(output_shape))\n cmapi = self.image.coordmap.inverse()\n voxels = cmapi(points.T).T\n V = map_coordinates(self.data,\n voxels,\n order=self.order,\n mode=self.mode,\n cval=self.cval,\n prefilter=False)\n # ndimage.map_coordinates returns a flat array,\n # it needs to be reshaped to the original shape\n V.shape = output_shape\n return V", "def warpImag(src_img: np.ndarray, dst_img: np.ndarray) -> None:\r\n\r\n dst_p = []\r\n fig1 = plt.figure()\r\n size = src_img.shape\r\n # no need to take the coordinates of the second image in order to do the homography just pick the corners\r\n # coordinates\r\n pts_src = np.array(\r\n [\r\n [0, 0],\r\n [size[1] - 1, 0],\r\n [size[1] - 1, size[0] - 1],\r\n [0, size[0] - 1]\r\n ], dtype=float\r\n )\r\n def onclick_1(event):\r\n x = event.xdata\r\n y = event.ydata\r\n print(\"Loc: {:.0f},{:.0f}\".format(x, y))\r\n\r\n plt.plot(x, y, '*r')\r\n dst_p.append([x, y])\r\n\r\n if len(dst_p) == 4:\r\n plt.close()\r\n plt.show()\r\n\r\n # display image 1\r\n cid = fig1.canvas.mpl_connect('button_press_event', onclick_1)\r\n plt.imshow(dst_img)\r\n plt.show()\r\n dst_p = np.array(dst_p)\r\n\r\n ##### Your Code Here ######\r\n h = computeHomography(pts_src, dst_p) # my function to find the homography matrix in order to do projection\r\n # to the coordinates by this equations from opencv dst(x,y) = src(m11x + m12y +m13/ m31x +m32y +m33\r\n # , m21x + m22y +m23/ m31x +m32y +m33)\r\n im_temp = warpPerspective(src_img , h, (dst_img.shape[1],dst_img.shape[0]))\r\n plt.imshow(im_temp)\r\n plt.show()\r\n im_dst2 = im_temp + dst_img\r\n plt.imshow(im_dst2.astype('uint8'))\r\n plt.show()\r\n\r\n pass", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def get_graph(self, points):\n\n gridmap = cv2.imread(self.ruta_imagen, -1)\n\n gridmap = self.four_point_transform(gridmap, points)\n\n gridmap[(gridmap >= 179) & (gridmap <= 238)] = 0\n gridmap[(gridmap >= 241) & (gridmap <= 255)] = 255\n\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))\n gridmap_dilatated = cv2.dilate(cv2.bitwise_not(gridmap), kernel, iterations=1)\n gridmap_dilatated = cv2.bitwise_not(gridmap_dilatated)\n\n scale_percent = 25 # percent of original size\n width = int(gridmap_dilatated.shape[1] * scale_percent / 100)\n height = int(gridmap_dilatated.shape[0] * scale_percent / 100)\n dim = (width, height)\n gridmap_resized = cv2.resize(gridmap_dilatated, dim, interpolation=cv2.INTER_NEAREST)\n\n self.gridmap2graph(gridmap_resized, width, height)\n\n return gridmap_resized, width, height", "def _image_to_point_space(pixel_coordinates: np.array, boundary_radius: int, resolution: int) -> np.array:\n pix_origin = np.array([resolution / 2, resolution / 2])\n return np.array((pixel_coordinates - pix_origin) * (2 * boundary_radius) / resolution)", "def _point_to_image_space(point: Union[Point, np.array], boundary_radius: int, resolution: int) -> np.array:\n origin = np.array([resolution / 2, resolution / 2])\n if type(point) is Point:\n point = point.numpy()\n return (point * resolution / (2 * boundary_radius) + origin).round().astype(np.uint16)", "def GeneratePointsImg(self, n, ppa):\n x = np.linspace(0,self.camera.sensorSize,n)+ppa[0]\n y = np.linspace(0,self.camera.sensorSize,n)+ppa[1]\n\n return np.meshgrid(x, y)", "def charuco_img_points(images, objpoint, board, a_dict):\r\n #Criteria for subpixel refinement\r\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\r\n\r\n objpoints = [] # 3d point in world space\r\n imgpoints = [] # 2d point in image plane\r\n\r\n for img in images:\r\n gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n corners, ids, rejpoints = cv2.aruco.detectMarkers(gray, a_dict)\r\n if len(corners)>0:\r\n res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)\r\n if res2[1] is not None:\r\n cv2.cornerSubPix(gray,res2[1],(3,3),(-1,1),criteria)\r\n imgpoints.append(res2[1].T[:,0,:])\r\n objpoints.append(objpoint[:,res2[2].flatten()])\r\n cv2.aruco.drawDetectedCornersCharuco(img,res2[1],res2[2])\r\n cv2.imshow(\"frame\",img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n return objpoints,imgpoints", "def generate_image(self,true_dist):\n N_POINTS = 128\n RANGE = 3\n\n points = np.zeros((N_POINTS, N_POINTS, 2), dtype='float32')\n points[:, :, 0] = np.linspace(-RANGE, RANGE, N_POINTS)[:, None]\n points[:, :, 1] = np.linspace(-RANGE, RANGE, N_POINTS)[None, :]\n points = points.reshape((-1, 2))\n\n plt.clf()\n\n #true_dist = true_dist.cpu().data.numpy()\n samples = self.G(self.sample_z_)\n print('generate size is',samples.size())\n samples = samples.cpu().data.numpy()\n\n x = y = np.linspace(-RANGE, RANGE, N_POINTS)\n # plt.contour(x, y, disc_map.reshape((len(x), len(y))).transpose())\n\n plt.scatter(true_dist[:, 0], true_dist[:, 1], c='orange', marker='+')\n plt.scatter(samples[:, 0], samples[:, 1], c='green', marker='+')\n plt.show()", "def converte_coord(valor):\n\n pts1 = ([0,0],[24,0],[24,44],[0,44])\n pts1 = np.asarray(pts1, dtype = np.float32)\n pts2 = np.float32([[0,0],[100,0], [100,100], [0,100]])\n\n M = cv.getPerspectiveTransform(pts1,pts2)\n img2 = cv.warpPerspective(valor,M,(100,100))\n return img2", "def rectangular_perpective_transform(image, points):\n # We first order our points so they go clockwise from the top left. Top left point must have the\n # lowest coordinate sum, while the bottom right must have the largest\n ordered_pts = np.empty((4, 2), dtype = 'float32')\n pt_sum = np.sum(points, axis = 1)\n ordered_pts[0] = points[np.argmin(pt_sum)]\n ordered_pts[2] = points[np.argmax(pt_sum)]\n\n # the top right should have smallest coordinate difference, bottom left the largest\n pt_diff = np.diff(points, axis = 1)\n ordered_pts[1] = points[np.argmin(pt_diff)]\n ordered_pts[3] = points[np.argmax(pt_diff)]\n\n # for convenience, we store the points as variables for convenience in calculating width / height\n (top_left, top_right, bottom_right, bottom_left) = ordered_pts\n\n top_width = np.linalg.norm(top_right - top_left)\n bottom_width = np.linalg.norm(bottom_right - bottom_left)\n width = int(max(top_width, bottom_width))\n\n left_height = np.linalg.norm(bottom_left - top_left)\n right_height = np.linalg.norm(bottom_right - top_right)\n height = int(max(left_height, right_height))\n\n # create destination coordinate points to give us a top-down view of the subimage enclosed by the original points\n dest_points = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]], dtype = 'float32')\n transform_matrix = cv2.getPerspectiveTransform(ordered_pts, dest_points)\n return cv2.warpPerspective(image, transform_matrix, (width, height))", "def transform_image(image, transform, mapping, alpha = 1, incr_x = 10, incr_y = 10):\r\n background = [255, 255, 255, 0]\r\n width, height = image.size\r\n image_in = np.array(image.convert(\"RGBA\"))\r\n image_out = [[background[:] for j in range(width)] for i in range(height)]\r\n transform_row = []\r\n for i in range(0, width + incr_x, incr_x):\r\n transform_row.append(transform(vec2(i, 0), mapping, alpha))\r\n for i in range(incr_y, height + incr_y, incr_y):\r\n p_ur = transform_row[0]\r\n p_lr = transform_row[0] = transform(vec2(0, i), mapping, alpha)\r\n for j in range(incr_x, width + incr_x, incr_x):\r\n p_ul = p_ur\r\n p_ll = p_lr\r\n p_ur = transform_row[j//incr_x]\r\n p_lr = transform_row[j//incr_x] = transform(vec2(j, i), mapping, alpha)\r\n a = p_ur - p_ul\r\n b = p_ll - p_ul\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n for p in triangle(p_ul, p_ur, p_ll, width, height):\r\n c = p - p_ul\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n a = p_lr - p_ll\r\n b = p_lr - p_ur\r\n det = a.x*b.y - a.y*b.x\r\n if (det != 0.0):\r\n p_ulr = p_ur + p_ll - p_lr\r\n for p in triangle(p_ur, p_ll, p_lr, width, height):\r\n c = p - p_ulr\r\n rx = (b.y*c.x - b.x*c.y)/det\r\n ry = (a.x*c.y - a.y*c.x)/det\r\n image_out[p.y][p.x] = image_in[min(height - 1, max(0, round(i + (ry - 1)*incr_y)))][min(width - 1, max(0, round(j + (rx - 1)*incr_x)))]\r\n image_out = Image.fromarray(np.uint8(image_out))\r\n return image_out", "def make_maps_of_2x1_pix_coordinates (sp) : \n x_rhs = np.arange(sp.colsh)*sp.pixs + sp.pixw - sp.pixsh\n x_rhs[0] = sp.pixwh # set x-coordinate of the wide pixel \n x_arr = np.hstack([-x_rhs[::-1],x_rhs])\n\n y_arr = np.arange(sp.rows) * sp.pixs\n y_arr -= y_arr[-1]/2 # move origin to the center of array\n\n sp.x_map2x1, sp.y_map2x1 = np.meshgrid(x_arr, y_arr)", "def forward_template(self, z_img):\n z_feat = self.backbone(z_img)\n if self.with_neck:\n z_feat = self.neck(z_feat)\n\n z_feat_center = []\n for i in range(len(z_feat)):\n left = (z_feat[i].size(3) - self.test_cfg.center_size) // 2\n right = left + self.test_cfg.center_size\n z_feat_center.append(z_feat[i][:, :, left:right, left:right])\n return tuple(z_feat_center)", "def phantom_ellipses(n_points,E):\n \n #Rescaling according to image size \n E[:,0] = E[:,0]*n_points/2 #semiaxis a\n E[:,1] = E[:,1]*n_points/2 #semiaxis b\n E[:,2] = E[:,2]*n_points/2 #x\n E[:,3] = E[:,3]*n_points/2 #y\n E[:,4] = E[:,4]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 )\n nrow,ncol = E.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow))\n\n for k in range(nrow): #itero sulle ellissi\n x_new = x - E[k,2]\n y_new = y - E[k,3]\n\n #find(( (x.*cosp + y.*sinp).^2)./asq + ((y.*cosp - x.*sinp).^2)./bsq <= 1); \n cosp = math.cos(E[k,4])\n sinp = math.sin(E[k,4])\n cond = np.square( x_new * cosp + y_new * sinp )*1/(E[k,0]*E[k,0]) + \\\n np.square(y_new * cosp - x_new * sinp)*1/(E[k,1]*E[k,1]) - 1\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] <= 0.0):\n phantom1[i,j,k] = E[k,5]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def morphPointSet(v_1, v_2, warp_frac):\n v = []\n for y in range(len(v_1)):\n v.append([(v_1[y][0] * (1 - warp_frac) + v_2[y][0] * warp_frac) , (v_1[y][1] * (1 - warp_frac) + v_2[y][1] * warp_frac)])\n return ginput_to_array(v)", "def normalise(image):", "def projective_inverse_warp_torch2(\n img, depth, pose, src_intrinsics, tgt_intrinsics, tgt_height, tgt_width, ret_flows=False):\n batch, height, width, channels = img.shape\n # Construct pixel grid coordinates (x, y, 1) for each pixel.\n # Duplicated for N (e.g. 4) of INPUT images (batch)\n pixel_coords = meshgrid_abs_torch(batch, tgt_height, tgt_width, img.device, False)\n\n # Note: \"target\" here means actually \"ref image\", forget about the ground truth targets!\n # You project pixels from \"target\" to the multiple inputs, not the other way round\n # Convert pixel coordinates to the target camera frame, 3D camera coords (X, Y, Z), seems OK so far...\n # Note: these are points in 3D camera coords (C) of the target camera, not world coords (W) !!!\n cam_coords = pixel2cam_torch(depth, pixel_coords, tgt_intrinsics)\n\n # Construct a 4x4 intrinsic matrix, why? wouldn't 3x4 suffice?\n filler = torch.tensor([[[0., 0., 0., 1.]]], device=img.device)\n filler = filler.repeat(batch, 1, 1)\n src_intrinsics4 = torch.cat([src_intrinsics, torch.zeros([batch, 3, 1], device=img.device)], axis=2)\n src_intrinsics4 = torch.cat([src_intrinsics4, filler], axis=1)\n\n # Get a 4x4 transformation matrix from 'target' camera frame to 'source'\n # pixel frame, looks OK\n proj_tgt_cam_to_src_pixel = torch.matmul(src_intrinsics4, pose)\n src_pixel_coords = cam2pixel_torch(cam_coords, proj_tgt_cam_to_src_pixel)\n\n # print(f'src_pixel_coords shape {src_pixel_coords.shape}')\n # print(f'src_pixel_coords {L(src_pixel_coords[:, :, :3,:])}')\n\n # Now we get trouble !\n if False:\n print(('src_pixel_coords', src_pixel_coords.shape, src_pixel_coords.dtype))\n for i in range(2):\n t = src_pixel_coords[0, :, :, i]\n print((i, t.min().item(), t.max().item()))\n sys.exit(0)\n\n # src_pixel_coords = (src_pixel_coords + torch.tensor([0.5, 0.5], device=img.device)) / torch.tensor([width, height],\n # device=img.device)\n\n src_pixel_coords = src_pixel_coords / torch.tensor([width-1, height-1], device=img.device)\n\n output_img = resampler_wrapper_torch(img, src_pixel_coords)\n if ret_flows:\n return output_img, src_pixel_coords - cam_coords\n else:\n return output_img", "def map_overlay(img, positions, meter_per_pixel, img_origin, img_heading, color=(255, 0, 0), thickness=2):\r\n img_out = np.copy(img)\r\n new_pos = rescale_trajectory(positions, meter_per_pixel, img_origin, img_heading)\r\n for pos in new_pos:\r\n cv2.circle(img_out, (int(pos[0]), int(pos[1])), thickness, color, thickness)\r\n return img_out", "def four_point_transform(image, pts):\n rect = order_points(pts)\n width_first = np.sqrt(\n ((rect[2][0] - rect[3][0]) ** 2) + ((rect[2][1] - rect[3][1]) ** 2)\n )\n width_second = np.sqrt(\n ((rect[1][0] - rect[0][0]) ** 2) + ((rect[1][1] - rect[0][1]) ** 2)\n )\n max_width = max(int(width_first), int(width_second))\n height_first = np.sqrt(\n ((rect[1][0] - rect[2][0]) ** 2) + ((rect[1][1] - rect[2][1]) ** 2)\n )\n height_second = np.sqrt(\n ((rect[0][0] - rect[3][0]) ** 2) + ((rect[0][1] - rect[3][1]) ** 2)\n )\n max_height = max(int(height_first), int(height_second))\n dst = np.array(\n [\n [0, 0],\n [max_width - 1, 0],\n [max_width - 1, max_height - 1],\n [0, max_height - 1],\n ],\n dtype=\"float32\",\n )\n view_transform = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, view_transform, (max_width, max_height))\n return warped", "def place_images(self, final_list, points):\n\t\tfor i in range(8): \n # Please change this (8) into a class-level variable --KOH\n\t\t\timage_object = final_list[i]\n#\t\tif type(image_object) == 'CorrectImage':\n#\t\t\t\tself.correct = [i, points[i]]\n\t\t\timage = pygame.image.load(image_object.file_path)\n # Why can't these be stored as a property of the class --KOH\n\t\t\timagerect = image.get_rect()\n\t\t\treimage = pygame.transform.scale(image, image_object.size)\n\t\t\tself.screen.blit(reimage, points[i])", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project_pts3_to_image(pts3, P):\n assert (len(pts3.shape) == 2)\n assert (pts3.shape[1] == 3)\n assert (P.shape == (3, 4))\n pts3 = np.hstack([pts3, np.ones((len(pts3), 1))]) # homogeneous\n pts2 = P.dot(pts3.T).T\n pts2 = pts2[:, :2] / (pts2[:, [-1]] + 1e-8)\n return pts2", "def draw_features(self, image):\n \n for x,y in self.new_points.reshape(-1,2):\n cv2.circle(image, (x,y), 2, (255,0,255), 2)\n return image", "def _binary_image_to_grid_points(binary_image_matrix):\n\n return numpy.where(binary_image_matrix)", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def project_to_image(self, point_cloud, round_px=True):\n if not isinstance(point_cloud, PointCloud) and not (isinstance(point_cloud, Point) and point_cloud.dim == 3):\n raise ValueError('Must provide PointCloud or 3D Point object for projection')\n if point_cloud.frame != self._frame:\n raise ValueError('Cannot project points in frame %s into camera with frame %s' %(point_cloud.frame, self._frame))\n\n points_proj = self.S.dot(point_cloud.data) + self.t\n if len(points_proj.shape) == 1:\n points_proj = points_proj[:, np.newaxis]\n point_depths = points_proj[2,:]\n point_z = np.tile(point_depths, [3, 1])\n points_proj = np.divide(points_proj, point_z)\n if round_px:\n points_proj = np.round(points_proj)\n points_proj = points_proj[:2,:].astype(np.int16)\n\n valid_ind = np.where((points_proj[0,:] >= 0) & \\\n (points_proj[1,:] >= 0) & \\\n (points_proj[0,:] < self.width) & \\\n (points_proj[1,:] < self.height))[0]\n\n depth_data = np.zeros([self.height, self.width])\n depth_data[points_proj[1,valid_ind], points_proj[0,valid_ind]] = point_depths[valid_ind]\n return DepthImage(depth_data, frame=self.frame)", "def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )", "def project_points(points, cam_matrix, trans, rot):\n\n # STEP 1: Transform pointcloud into new reference frame.\n points = np.dot(rot, points) + trans[:, None]\n\n # STEP 2: Project new pointcloud onto image frame using K matrix.\n # gives a 3 x N array of image plane coordinates in homogenous coordinates.\n homo_pixel_coords = np.dot(cam_matrix, points)\n\n # STEP 3: Convert homogenous coordinates to regular 2D coordinates.\n # To do this, you need to divide the first two coordinates of homo_pixel_coords\n # by the third coordinate.\n pixel_coords = homo_pixel_coords[:2] / homo_pixel_coords[2]\n\n # STEP 4: Convert to integers. Take the floor of pixel_coords then cast it\n # to an integer type, like numpy.int32\n pixel_coords = np.int32(np.floor(pixel_coords))\n\n return pixel_coords", "def process_image(img):\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n return img.cpu().numpy().transpose((1, 2, 0))", "def firstorder(self):\n f = self.img\n x = self.x\n y = self.y\n self.x1 = sum(f*x)/sum(f)\n self.y1 = sum(f*y)/sum(f)", "def world_pos_from_img_pos(self, img_pos, img_shape, arm_pos, scale):\n centre_x = img_shape[0]/2\n centre_y = img_shape[1]/2\n #scale = 0.2*2/centre_x #m/pixel\n #print(\"centre x, y\")\n #print(centre_x)\n #print(centre_y)\n \n wld_x = arm_pos[0]\n wld_y = arm_pos[1]\n \n img_x = img_pos[0]\n img_y = img_pos[1]\n #print(\"img x, y\")\n #print(img_x)\n #print(img_y)\n \n img_dx = img_x - centre_x\n img_dy = img_y - centre_y\n #print(\"img dx, dy\")\n #print(img_dx)\n #print(img_dy)\n \n # +wld_x = -img_y ; +wld_y = -img_x\n wld_dx = -img_dy*scale\n wld_dy = -img_dx*scale\n\n #limit output\n #wld_dx = max(wld_dx, -centre_y*scale)\n #wld_dx = min(wld_dx, centre_y*scale)\n #wld_dy = max(wld_dy, -centre_x*scale)\n #wld_dy = min(wld_dy, centre_x*scale)\n \n new_wld_x = wld_x + wld_dx\n new_wld_y = wld_y + wld_dy\n \n return [new_wld_x, new_wld_y]", "def rec_transform(image, pts):\n ord_pts = order_points(pts)\n\n # find the dimension of the rectangular created by the given points", "def draw_points(in_img, points, colour=(255, 0, 0)):\n img = in_img.copy()\n\n radius = int(max(img.shape) / 100)\n\n img = convert_when_colour(colour, img)\n\n for point in points:\n img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)\n\n return img", "def _project_pointcloud(self, cloud):\n\n assert isinstance(cloud, PointCloud2)\n\n pc1 = PointCloud()\n pc1.header = cloud.header\n # hack the time! dont move the robot :-0\n pc1.header.stamp = rospy.Time.now()\n \n \n pc1.points = [Point32(*p) for p in pc2.read_points(cloud)]\n\n self._tf_listener.waitForTransform(pc1.header.frame_id,\n self._image_info.tf_frame, \n rospy.Time(0), \n rospy.Duration(4))\n\n image_frame_cloud = self._tf_listener.transformPointCloud (\n self._image_info.tf_frame, \n pc1)\n min_x, max_x, min_y, max_y = 640, 0, 480, 0 # TODO: remove hard coded image size!\n for pt in image_frame_cloud.points:\n u, v = self._image_info.project3dToPixel((pt.x, pt.y, pt.z))\n if v < min_y:\n min_y = int(v)\n if v > max_y:\n max_y = int(v)\n if u < min_x:\n min_x = int(u)\n if u > max_x:\n max_x = int(u)\n location = (((min_x, min_y), (max_x, max_y)))\n rospy.loginfo(\"Transformed cloud into image plane\")\n return location", "def detect_points(self):\r\n\r\n\t\r\n\r\n\t\tfeature_mask = np.zeros_like(self.gray) ## Create a mask so we only look for template features in the ROI\r\n\t\t\r\n\t\tfeature_mask[max(0,self.bb[1]):min(360,self.bb[1] + self.bb[3]),max(0,self.bb[0]):min(640,self.bb[0] + self.bb[2])] = 255\r\n\r\n\t\t# search for good points\r\n\t\tfeatures = cv2.goodFeaturesToTrack(self.gray, mask = feature_mask, **feature_params)\r\n\t\t# refine the corner locations\r\n\t\tcv2.cornerSubPix(self.gray,features, **subpix_params)\r\n\r\n\t\tself.features = features\r\n\r\n\t\tself.tracks = [[p] for p in features.reshape((-1,2))]\r\n\r\n\t\tself.prev_gray = self.gray", "def point_sample(img_meta,\n img_features,\n points,\n proj_mat,\n coord_type,\n img_scale_factor,\n img_crop_offset,\n img_flip,\n img_pad_shape,\n img_shape,\n aligned=True,\n padding_mode='zeros',\n align_corners=True):\n\n # apply transformation based on info in img_meta\n points = apply_3d_transformation(\n points, coord_type, img_meta, reverse=True)\n\n # project points to camera coordinate\n pts_2d = points_cam2img(points, proj_mat)\n\n # img transformation: scale -> crop -> flip\n # the image is resized by img_scale_factor\n img_coors = pts_2d[:, 0:2] * img_scale_factor # Nx2\n img_coors -= img_crop_offset\n\n # grid sample, the valid grid range should be in [-1,1]\n coor_x, coor_y = torch.split(img_coors, 1, dim=1) # each is Nx1\n\n if img_flip:\n # by default we take it as horizontal flip\n # use img_shape before padding for flip\n orig_h, orig_w = img_shape\n coor_x = orig_w - coor_x\n\n h, w = img_pad_shape\n coor_y = coor_y / h * 2 - 1\n coor_x = coor_x / w * 2 - 1\n grid = torch.cat([coor_x, coor_y],\n dim=1).unsqueeze(0).unsqueeze(0) # Nx2 -> 1x1xNx2\n\n # align_corner=True provides higher performance\n mode = 'bilinear' if aligned else 'nearest'\n point_features = F.grid_sample(\n img_features,\n grid,\n mode=mode,\n padding_mode=padding_mode,\n align_corners=align_corners) # 1xCx1xN feats\n\n return point_features.squeeze().t()", "def apply_projection_transform(Xb, batch_size, image_size):\n d = image_size * 0.3 * intensity\n for i in np.random.choice(batch_size, int(batch_size * p), replace = False): \n tl_top = random.uniform(-d, d) # Top left corner, top margin\n tl_left = random.uniform(-d, d) # Top left corner, left margin\n bl_bottom = random.uniform(-d, d) # Bottom left corner, bottom margin\n bl_left = random.uniform(-d, d) # Bottom left corner, left margin\n tr_top = random.uniform(-d, d) # Top right corner, top margin\n tr_right = random.uniform(-d, d) # Top right corner, right margin\n br_bottom = random.uniform(-d, d) # Bottom right corner, bottom margin\n br_right = random.uniform(-d, d) # Bottom right corner, right margin\n\n transform = ProjectiveTransform()\n transform.estimate(np.array((\n (tl_left, tl_top),\n (bl_left, image_size - bl_bottom),\n (image_size - br_right, image_size - br_bottom),\n (image_size - tr_right, tr_top)\n )), np.array((\n (0, 0),\n (0, image_size),\n (image_size, image_size),\n (image_size, 0)\n )))\n Xb[i] = warp(Xb[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')\n\n return Xb" ]
[ "0.7286386", "0.6467655", "0.6214912", "0.61901563", "0.61635965", "0.60568637", "0.6051804", "0.6028781", "0.60097945", "0.5999861", "0.5963049", "0.59306324", "0.5910674", "0.5894894", "0.5861491", "0.5838941", "0.58371437", "0.5833094", "0.58130443", "0.58091205", "0.58082163", "0.58014", "0.5800616", "0.57892793", "0.5785864", "0.5767218", "0.5746882", "0.57395655", "0.5713253", "0.56932056", "0.5687942", "0.56767595", "0.5655838", "0.56449866", "0.564489", "0.562995", "0.56170034", "0.5613361", "0.56121486", "0.55980575", "0.55906326", "0.5585249", "0.5578487", "0.55744255", "0.55738556", "0.5566334", "0.5561104", "0.55505913", "0.5532022", "0.5527812", "0.5526278", "0.55245066", "0.55227864", "0.5521574", "0.5521143", "0.55129266", "0.55072165", "0.5499034", "0.5499034", "0.5478996", "0.5462744", "0.54564166", "0.5453955", "0.5447829", "0.54405534", "0.5440086", "0.54390574", "0.54367155", "0.54345685", "0.5429956", "0.54277253", "0.54257095", "0.5417456", "0.5409382", "0.5408983", "0.53995657", "0.5397905", "0.53978556", "0.5395516", "0.5394027", "0.538942", "0.5389307", "0.53874576", "0.5381563", "0.5374305", "0.5367197", "0.5364152", "0.5359386", "0.53522635", "0.53519523", "0.53508294", "0.5349109", "0.5341369", "0.53392214", "0.53322315", "0.5327712", "0.5324962", "0.53201914", "0.5319041", "0.53122246" ]
0.647679
1
Transforms Image point to a Ray in world system
def ImageToRay(self, imagePoints): pass # delete after implementations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def localize_pixel(img_pos,camera : Camera,lidar : Lidar, scan : LaserScan) -> tuple:\n\n # ---OBJ--\n # x r1 /\\ r2 x\n # / \\\n #cam_ray / \\ average_ray\n # / \\\n # / \\\n # CAM ----> LID\n # \n\n # has to be 2d\n assert (img_pos.size == 2)\n\n cam_ray = camera.get_ray_through_image(img_pos)\n\n cam_ray_robot = camera.get_ray_in_robot_frame(cam_ray)\n\n cam_ray_lidar = lidar.get_ray_in_lidar_frame(cam_ray_robot)\n\n # flatten camera ray\n cam_ray_lidar_flat = lidar.get_ray_projection(cam_ray_lidar)\n\n # figure out which lidar rays correspond to the camera ray\n (ray1,ray2) = lidar.get_corresponding_lidar_rays(cam_ray_lidar_flat,scan)\n\n # if no rays found corresponding to scan data\n if ray1 is None or ray2 is None:\n return (None,None)\n\n # get the normal to the lidar hit\n intersection_normal = lidar.get_normal_to_plane(ray1,ray2)\n\n # get the distance data in horizontal plane, from lidar to object\n lidar_to_target_length = lidar.get_camera_ray_length(cam_ray_lidar_flat,ray1,ray2)\n\n # get the vector from camera to lidar (flattened to lidar plane)\n # i.e. origin of lidar frame in camera frame\n lidar_to_cam_vec = cam_ray_lidar_flat.origin\n cam_to_lidar_flat = Ray(lidar_to_cam_vec,-lidar_to_cam_vec,np.linalg.norm(lidar_to_cam_vec))\n \n # now workout the lidar to object ray, i.e. interpolate between ray1's and ray2's tips\n lidar_to_object_flat = interpolated_ray(ray1,ray2,0.5,lidar_to_target_length)\n\n # now finally workout the vector from camera to object (flattened)\n # this lets us access the true z-distance in the camera\n cam_to_object_flat = lidar_to_object_flat.get_vec() + cam_to_lidar_flat.get_vec()\n \n cam_to_object_flat_length = np.linalg.norm(cam_to_object_flat)\n\n # angle from horizontal on camera ray\n cam_ray_theta = angle_between(cam_ray_lidar.get_vec(),cam_to_object_flat)\n\n # length of original camera ray (knowing the length of its projection)\n # will fail if ray is pointing straight up or down\n cam_ray_robot.length = cam_to_object_flat_length / math.cos(cam_ray_theta)\n\n\n object_robot = cam_ray_robot.get_vec()+cam_ray_robot.origin\n\n return (object_robot,intersection_normal)", "def world_to_camera(self, X):\n raise NotImplementedError", "def generate_ray(self, img_point):\n # TODO A5 copy your implementation from A4\n i = img_point[0]\n j = img_point[1]\n dist_vector = self.target - self.eye\n proj_dist = np.linalg.norm(dist_vector)\n height = 2 * proj_dist * np.tan(self.vfov / 2.0)\n width = self.aspect * height\n left = (-1) * width / 2.0\n bottom = (-1) * height / 2.0\n u = i * width + left\n v = j * height + bottom\n ray_origin = self.eye\n ray_direction = ((-1) * proj_dist * self.w) + u * self.u + v * self.v\n return Ray(ray_origin, ray_direction)", "def rays(self):\n pixels = np.array([\n [u, v, 1.]\n for u, v in product(range(self.width), range(self.height))\n ], dtype=np.int32).T\n rays = project(self.camera.P_pinv, pixels)\n\n return self._camera.center, rays.T", "def cam_to_world(cam_point, world_to_cam):\n # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]])\n\n obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1))\n world_point = np.dot(world_to_cam, obj_vector)\n\n world_point = [p[0] for p in world_point]\n return world_point[0:3]", "def ray(self, pixel):\n # Ensure pixel is in homogenous coordinates\n if len(pixel) == 2:\n pixel = np.vstack((pixel, [1]))\n\n ray = project(self._camera.P_pinv, pixel.astype(np.float32))\n assert ray.shape == (4, 1)\n\n return self._camera.center, ray", "def camera_to_world(self, X):\n raise NotImplementedError", "def camera_transform(image):\n img = np.zeros((image.shape[0], image.shape[1], 3))\n for y in range(image.shape[0]):\n for x in range(image.shape[1]):\n img[y][x] = (x - 320) / 575.5 * image[y, x], (240 - y) / 575.5 * image[y, x], image[\n y, x]\n return img", "def frusrum_ray(self, param_x, param_y):\n l, r, b, t, n, f = self.body.dim\n # convert normalized into near frustum space\n sm = ScaleMat(x=r - l, y=t - b)\n # .5 to compensate origin difference between OpenGL space and pane space\n offset = MoveMat(-.5, -.5, -n)\n frustum_point = sm * offset * Pnt(x=param_x, y=param_y, z=0)\n ray = gt.Ray([0, 0, 0], frustum_point.xyz)\n return self.tripod.plane.TM * ray", "def world_pos_from_img_pos(self, img_pos, img_shape, arm_pos, scale):\n centre_x = img_shape[0]/2\n centre_y = img_shape[1]/2\n #scale = 0.2*2/centre_x #m/pixel\n #print(\"centre x, y\")\n #print(centre_x)\n #print(centre_y)\n \n wld_x = arm_pos[0]\n wld_y = arm_pos[1]\n \n img_x = img_pos[0]\n img_y = img_pos[1]\n #print(\"img x, y\")\n #print(img_x)\n #print(img_y)\n \n img_dx = img_x - centre_x\n img_dy = img_y - centre_y\n #print(\"img dx, dy\")\n #print(img_dx)\n #print(img_dy)\n \n # +wld_x = -img_y ; +wld_y = -img_x\n wld_dx = -img_dy*scale\n wld_dy = -img_dx*scale\n\n #limit output\n #wld_dx = max(wld_dx, -centre_y*scale)\n #wld_dx = min(wld_dx, centre_y*scale)\n #wld_dy = max(wld_dy, -centre_x*scale)\n #wld_dy = min(wld_dy, centre_x*scale)\n \n new_wld_x = wld_x + wld_dx\n new_wld_y = wld_y + wld_dy\n \n return [new_wld_x, new_wld_y]", "def pinhole_projection_world_to_image(world_pos, K, camera_to_world=None):\n\n world_pos_vec = np.append(world_pos, 1)\n\n # transform to camera frame if camera_to_world is not None\n if camera_to_world is not None:\n world_pos_vec = np.dot(np.linalg.inv(camera_to_world), world_pos_vec)\n\n # scaled position is [X/Z, Y/Z, 1] where X,Y,Z is the position in camera frame\n scaled_pos = np.array([world_pos_vec[0]/world_pos_vec[2], world_pos_vec[1]/world_pos_vec[2], 1])\n uv = np.dot(K, scaled_pos)[:2]\n return uv", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)", "def project(points, camera_params, theta):\n \"\"\"\n Function takes input of 3d_points, transformations and Convert 3-D points to 2-D by projecting onto images. \n Input:\n points: 3D points in world frame\n camera_params: parameters of camera corrosponding to the point\n theta: Needed For PAL camera to specify the sub camera index for the points\n Output:\n points_proj: 2D reprojected points for 3D points \n\n \"\"\"\n # Convert the 3D points to Camera Frame by rotaion followes by translation\n points_proj1 = rotate(points[:,0:3], camera_params[:, :3])\n points_proj1 += camera_params[:, 3:6]\n # FOR PAL: Converting into the Sub-camera Frame by respective rotation\n thetas = theta * np.pi / 3 \n points_proj = np.copy(points_proj1)\n points_proj[:,0] = points_proj1[:,0]*np.cos(thetas) - points_proj1[:,2]*np.sin(thetas)\n points_proj[:,2] = points_proj1[:,0]*np.sin(thetas) + points_proj1[:,2]*np.cos(thetas)\n # Avoiding Zero error\n for i in range(len(points_proj)):\n if(points_proj[i,2]==0):\n points_proj[i,0] = 0\n points_proj[i,1] = 0\n points_proj[i,2] = 1\n # 2D projection\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj**2, axis=1)\n r = 1 + k1 * n + k2 * n**2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def tanp_to_world(self, x, y):\n crpix1, crpix2 = self._wcs.wcs.crpix\n x = x + crpix1\n y = y + crpix2\n ra, dec = self._wcslin.all_pix2world(x, y, 1)\n return ra, dec", "def project_point_along_2Dvector(): \n \n # 2d vector \n a = vec2( 1 , 1 )\n b = vec2( -1 , -1 )\n com = vec2() \n\n #fb = pixel_op() \n #fb.create_buffer(800, 800)\n #fb.graticule(pixels_per_unit)\n\n vecs = [a,b]\n pts = [com.project_pt(a, b, 2)]\n\n bloody_simple_2drender('2d_render.png', vecs=vecs, pts=pts, gridsize=40)", "def toworld(self, *args, **kwargs):\n return _image.image_toworld(self, *args, **kwargs)", "def obj_ray_cast(obj, matrix):\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None", "def transform(self,image,landmarks,s0=None):\n if s0 is None:\n s0 = np.array([[127.6475, 227.8161], [79.1608, 87.0376], [176.8392, 87.0376]], np.float32)\n idx = [8,36,45] #\"\"\"Anchor points\"\"\"\n pts = np.float32(landmarks[idx,:])\n M = cv2.getAffineTransform(pts,s0)\n dst = cv2.warpAffine(image, M, (256,256))\n return dst", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv @ ray_origin\n ray_target_obj = matrix_inv @ ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n\n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index\n else:\n return None, None, None", "def _compute_pixel_ray_direction(\n u: float, v: float, fx: float, fy: float, img_w: int, img_h: int\n) -> NDArrayFloat:\n if not np.isclose(fx, fy, atol=1e-3):\n raise ValueError(\n f\"Focal lengths in the x and y directions must match: {fx} != {fy}\"\n )\n\n # approximation for principal point\n px = img_w / 2\n py = img_h / 2\n\n # the camera coordinate frame (where Z is out, x is right, y is down).\n\n # compute offset from the center\n x_center_offs = u - px\n y_center_offs = v - py\n\n ray_dir: NDArrayFloat = np.array([x_center_offs, y_center_offs, fx])\n ray_dir /= np.linalg.norm(ray_dir)\n return ray_dir", "def pinhole_projection_image_to_world(uv, z, K):\n\n u_v_1 = np.array([uv[0], uv[1], 1])\n pos = z * np.matmul(inv(K),u_v_1)\n return pos", "def project_point_cloud_to_orthographic_depth_image(pts, campos, viewdir, up, lrbt, im_hw):\n Rt = transforms.lookat_matrix(campos, campos + viewdir, up=up)\n transformed = Rt.dot(np.hstack((pts, np.ones([pts.shape[0], 1]))).T).T\n\n x = (transformed[:, 0] - lrbt[0]) / (lrbt[1] - lrbt[0]) * (im_hw[1])\n y = (transformed[:, 1] - lrbt[2]) / (lrbt[3] - lrbt[2]) * (im_hw[0])\n d = transformed[:, 2]\n\n ret = np.full(im_hw, fill_value=np.nan)\n for i in range(x.shape[0]):\n yi = im_hw[0] - int(round(y[i]))\n xi = int(round(x[i]))\n if yi < 0 or yi >= im_hw[0] or xi < 0 or xi >= im_hw[1]:\n continue\n if np.isnan(ret[yi, xi]):\n ret[yi, xi] = d[i]\n else:\n ret[yi, xi] = min(ret[yi, xi], d[i])\n\n return ret", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def relative_pose_cam_to_body(\n relative_scene_pose, Rt_cam2_gt\n ):\n relative_scene_pose = (\n np.linalg.inv(Rt_cam2_gt)\n @ relative_scene_pose\n @ Rt_cam2_gt\n )\n return relative_scene_pose", "def obj_ray_cast(obj, matrix):\n\n # get the ray relative to the object\n matrix_inv = matrix.inverted()\n ray_origin_obj = matrix_inv * ray_origin\n ray_target_obj = matrix_inv * ray_target\n ray_direction_obj = ray_target_obj - ray_origin_obj\n \n # cast the ray\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\n\n if success:\n return location, normal, face_index, ray_target\n else:\n return None, None, None, ray_target", "def point_on_ray(self, t=0.5):\n\n assert 0. <= t <=1., 't must be between 0 and 1'\n\n\n return self.detector_origin + (self._origin - self.detector_origin) * t", "def project(face_image, u):\n \n # finding the magnitude of each component\n a = np.matmul(face_image, u)\n \n # use a's to get the projection back\n res = np.matmul(u, a.T)\n\n return res", "def transform(self, x, y):\n # return self.transform_2D(x, y)\n return self.transform_perspective(x, y)", "def main_ray_cast(self, context, event):\r\n # get the context arguments\r\n MPM = bpy.context.window_manager.MPM\r\n scene = context.scene\r\n region = context.region\r\n rv3d = context.region_data\r\n coord = event.mouse_region_x, event.mouse_region_y\r\n \r\n # get the ray from the viewport and mouse\r\n view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\r\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\r\n \r\n ray_target = ray_origin + view_vector\r\n \r\n def visible_objects_and_duplis():\r\n \"\"\"Loop over (object, matrix) pairs (mesh only)\"\"\"\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()\r\n \r\n def obj_ray_cast(obj, matrix):\r\n \"\"\"Wrapper for ray casting that moves the ray into object space\"\"\"\r\n \r\n # get the ray relative to the object\r\n matrix_inv = matrix.inverted()\r\n ray_origin_obj = matrix_inv * ray_origin\r\n ray_target_obj = matrix_inv * ray_target\r\n ray_direction_obj = ray_target_obj - ray_origin_obj\r\n \r\n # cast the ray\r\n success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)\r\n \r\n if success:\r\n return location, normal, face_index\r\n else:\r\n return None, None, None\r\n \r\n # cast rays and find the closest object\r\n best_length_squared = -1.0\r\n best_obj = None\r\n \r\n for obj, matrix in visible_objects_and_duplis():\r\n if obj.type == 'MESH':\r\n hit, normal, face_index = obj_ray_cast(obj, matrix)\r\n if hit is not None:\r\n hit_world = matrix * hit\r\n length_squared = (hit_world - ray_origin).length_squared\r\n if best_obj is None or length_squared < best_length_squared:\r\n best_length_squared = length_squared\r\n best_obj = obj\r\n \r\n if best_obj is not None:\r\n if self.on_curve:\r\n if best_obj != bpy.context.active_object:\r\n if self.choose_start:\r\n bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap = None if bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap == best_obj else best_obj\r\n self.choose_start = False\r\n \r\n if self.choose_end:\r\n bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap = None if bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap == best_obj else best_obj\r\n self.choose_end = False\r\n \r\n if self.choose_profile:\r\n \r\n curve = bpy.context.active_object.modifiers[\"Curve\"].object\r\n start = bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap if bpy.context.active_object.modifiers[\"Array_on_curve\"].start_cap else \"\"\r\n end = bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap if bpy.context.active_object.modifiers[\"Array_on_curve\"].end_cap else \"\"\r\n \r\n bpy.ops.object.modifier_remove(modifier = \"Array_on_curve\")\r\n bpy.ops.object.modifier_remove(modifier = \"Curve\")\r\n bpy.context.active_object.select = False\r\n best_obj.select = True\r\n bpy.context.scene.objects.active = best_obj\r\n best_obj.modifiers.new(\"Array_on_curve\", 'ARRAY')\r\n MPM.array_name = \"Array_on_curve\"\r\n best_obj.modifiers[\"Array_on_curve\"].relative_offset_displace[self.axis_value] = 1\r\n for i in range(3):\r\n if i != self.axis_value:\r\n best_obj.modifiers[\"Array_on_curve\"].relative_offset_displace[i]=0\r\n best_obj.modifiers[\"Array_on_curve\"].fit_type = 'FIT_CURVE'\r\n best_obj.modifiers[\"Array_on_curve\"].curve = curve\r\n best_obj.modifiers[\"Array_on_curve\"].use_merge_vertices = True\r\n if start:\r\n best_obj.modifiers[\"Array_on_curve\"].start_cap = start if start != best_obj else None\r\n \r\n if end:\r\n best_obj.modifiers[\"Array_on_curve\"].end_cap = end if end != best_obj else None\r\n \r\n # setup the curve modifier\r\n best_obj.modifiers.new(\"Curve\", 'CURVE')\r\n best_obj.modifiers[\"Curve\"].object = curve\r\n self.setup_deform_axis(best_obj.modifiers, self.axis_value)\r\n \r\n self.choose_profile = False", "def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])", "def get_planet_image(image_path):\n img = Image.open(image_path)\n img_mirror = img.transpose(Image.FLIP_TOP_BOTTOM)\n\n pano = np.asarray(img_mirror)\n\n input_shape = pano.shape\n output_shape = (1080, 1080)\n\n def output_coord_to_r_theta(coords):\n \"\"\"\n Convert co-ordinates in the output image to r, theta co-ordinates.\n The r co-ordinate is scaled to range from 0 to 1.\n The theta co-ordinate is scaled to range from 0 to 1.\n\n A Nx2 array is returned with r being the first column and theta being the second.\n \"\"\"\n # Calculate x- and y-co-ordinate offsets from the centre:\n x_offset = coords[:,0] - (output_shape[1]/2)\n y_offset = coords[:,1] - (output_shape[0]/2)\n\n # Calculate r and theta in pixels and radians:\n r = np.sqrt(x_offset ** 2 + y_offset ** 2)\n theta = np.arctan2(y_offset, x_offset)\n\n # The maximum value r can take is the diagonal corner:\n max_x_offset, max_y_offset = output_shape[1]/2, output_shape[0]/2\n max_r = np.sqrt(max_x_offset ** 2 + max_y_offset ** 2)\n\n # Scale r to lie between 0 and 1\n r = r / max_r\n\n # arctan2 returns an angle in radians between -pi and +pi. Re-scale\n # it to lie between 0 and 1\n theta = (theta + np.pi) / (2*np.pi)\n\n # Stack r and theta together into one array. Note that r and theta are initially\n # 1-d or \"1xN\" arrays and so we vertically stack them and then transpose\n # to get the desired output.\n return np.vstack((r, theta)).T\n\n\n def r_theta_to_input_coords(r_theta):\n \"\"\"Convert a Nx2 array of r, theta co-ordinates into the corresponding\n co-ordinates in the input image.\n\n Return a Nx2 array of input image co-ordinates.\n\n \"\"\"\n # Extract r and theta from input\n r, theta = r_theta[:,0], r_theta[:,1]\n\n # Theta wraps at the side of the image. That is to say that theta=1.1\n # is equivalent to theta=0.1 => just extract the fractional part of\n # theta\n theta = theta - np.floor(theta)\n\n # Calculate the maximum x- and y-co-ordinates\n max_x, max_y = input_shape[1]-1, input_shape[0]-1\n\n # Calculate x co-ordinates from theta\n xs = theta * max_x\n\n # Calculate y co-ordinates from r noting that r=0 means maximum y\n # and r=1 means minimum y\n ys = (1-r) * max_y\n\n # Return the x- and y-co-ordinates stacked into a single Nx2 array\n return np.hstack((xs, ys))\n\n def get_planet(coords):\n \"\"\"Chain our two mapping functions together.\"\"\"\n r_theta = output_coord_to_r_theta(coords)\n input_coords = r_theta_to_input_coords(r_theta)\n return input_coords\n\n # Delete all residual images\n folder = \"static/images/planet\"\n\n name = next(tempfile._get_candidate_names())\n planet_path = \"%s/%s.png\" % (folder, name)\n planet_bgr = warp(pano, get_planet, output_shape=output_shape)\n b,g,r = cv2.split(planet_bgr)\n im = cv2.merge([r,g,b])\n cv2.imwrite(planet_path, 255*im)\n\n\n return planet_path", "def world_to_object(self, point: Point) -> Point:\n if self.parent:\n point = self.parent.world_to_object(point)\n result = self.transform.inverse() * point\n return result", "def imageTransform(self):\n ims = self.imageShape\n acs = self.activeShape\n dx = self.colVector\n dy = self.rowVector\n\n p0 = self.activeOrigin\n p1 = p0 + acs[2] * dx\n p2 = p0 + acs[1] * dy\n\n # print p0, p1, p2\n # print acs, dx, dy\n\n localPts = list(map(pg.Vector, [[0,0], [ims[2],0], [0,ims[1]], [0,0,1]])) # w and h of data of image in pixels.\n globalPts = list(map(pg.Vector, [p0, p1, p2, [0,0,1]]))\n m = pg.solve3DTransform(localPts, globalPts)\n m[:,2] = m[:,3]\n m[2] = m[3]\n m[2,2] = 1\n tr = Qt.QTransform(*m[:3,:3].transpose().reshape(9))\n return tr", "def img_map_transforms(ts):\n # XXX TODO: unchecked textures give error of variable referenced before assignment XXX\n # POV-Ray \"scale\" is not a number of repetitions factor, but ,its\n # inverse, a standard scale factor.\n # 0.5 Offset is needed relatively to scale because center of the\n # scale is 0.5,0.5 in blender and 0,0 in POV\n # Strange that the translation factor for scale is not the same as for\n # translate.\n # TODO: verify both matches with other blender renderers / internal in previous versions.\n image_map_transforms = \"\"\n image_map_transforms = \"scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % (\n ts.scale[0],\n ts.scale[1],\n ts.scale[2],\n ts.offset[0],\n ts.offset[1],\n ts.offset[2],\n )\n # image_map_transforms = (\" translate <-0.5,-0.5,0.0> scale <%.4g,%.4g,%.4g> translate <%.4g,%.4g,%.4g>\" % \\\n # ( 1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # (0.5 / ts.scale.x) + ts.offset.x,\n # (0.5 / ts.scale.y) + ts.offset.y,\n # ts.offset.z))\n # image_map_transforms = (\n # \"translate <-0.5,-0.5,0> \"\n # \"scale <-1,-1,1> * <%.4g,%.4g,%.4g> \"\n # \"translate <0.5,0.5,0> + <%.4g,%.4g,%.4g>\" % \\\n # (1.0 / ts.scale.x,\n # 1.0 / ts.scale.y,\n # 1.0 / ts.scale.z,\n # ts.offset.x,\n # ts.offset.y,\n # ts.offset.z)\n # )\n return image_map_transforms", "def get_mouse_ray(self, context, event):\n region, rv3d = context.region, context.region_data\n coord = event.mouse_region_x, event.mouse_region_y\n ray_direction = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)\n ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)\n return ray_origin, ray_direction", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n X_camera = np.matmul(R, X) + T\n X_camera = X_camera / X_camera[2, :] # Normalize\n\n if distortion_flag:\n radiusSq = (X_camera[0, :] * X_camera[0, :]) + (X_camera[1, :] * X_camera[1, :])\n X_camera = X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # X_camera = (X_camera * (1 + (distortion_params[0] * radiusSq) + (distortion_params[1] * (radiusSq * radiusSq)) + (distortion_params[4] * (radiusSq * radiusSq * radiusSq)))\n # + (2 * distortion_params[2] * X_camera[0,:] * X_camera[1,:]) + distortion_params[3] * (radiusSq + (2 * X_camera * X_camera)))\n\n X_camera[2, :] = 1.0\n X_camera = np.matmul(K, X_camera)\n X_camera = X_camera[:2, :]\n\n return X_camera", "def test_compute_pixel_ray_directions_vectorized_entireimage() -> None:\n fx = 10\n fy = 10\n\n img_w = 100\n img_h = 50\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n uv_list = []\n for u in range(img_w):\n for v in range(img_h):\n uv_list += [(u, v)]\n\n uv: NDArrayInt = np.array(uv_list)\n assert uv.shape == (img_w * img_h, 2)\n\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n # compare w/ vectorized, should be identical\n for i, ray_dir_vec in enumerate(ray_dirs):\n u, v = uv[i]\n ray_dir_nonvec = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n assert np.allclose(ray_dir_vec, ray_dir_nonvec)", "def _world_point(self, point_3d):\n return self.obj.matrix_world @ point_3d", "def project_point(self, point: Point3D) -> Point3D:\n x, y, z = point\n cam_x, cam_y, cam_z = self._pos\n x -= cam_x\n y -= cam_y\n z -= cam_z\n dx = self._cy*(self._sz*y + self._cz*x) - self._sy*z\n dy = self._sx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) + self._cx*(self._cz*y - self._sz*x)\n dz = self._cx*(self._sy*(self._sz*y + self._cz*x) + self._cy*z) - self._sx*(self._cz*y - self._sz*x)\n return self._scale * dx/dz, self._scale * dy/dz, dz", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def project_onto_image(self, points_in_camera_frame: ARRAY_LIKE, image: int = 0,\n temperature: Real = 0) -> np.ndarray:\n\n _, __, picture_locations = self.get_projections(points_in_camera_frame, image, temperature=temperature)\n\n return picture_locations", "def projectPoint(self, point):\n vector = self.normal_vector\n angle = vector.angle\n line = Line(point, angle, correct=False)\n projection = self.crossLine(line)\n return projection", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def project(points, camera_params):\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = camera_params[:, 6]\n k1 = camera_params[:, 7]\n k2 = camera_params[:, 8]\n n = np.sum(points_proj ** 2, axis=1)\n r = 1 + k1 * n + k2 * n ** 2\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def vision(image):\n vis_map = resize(image, alpha, beta)\n print(\"Resized map from the blue mask\")\n\n world = rotate(vis_map)\n\n plt.figure()\n plt.imshow(world[:, :, ::-1])\n plt.show()\n object_grid, occupancy_grid = detect_object(world)\n print(\"Result of the red mask\")\n plt.figure()\n plt.imshow(occupancy_grid)\n plt.show()\n return object_grid, occupancy_grid, world", "def render(self, image=False, **kwargs):\n import matplotlib.pyplot as plt\n\n source = self.alignment_transform.source.points\n target = self.alignment_transform.target.points\n # a factor by which the minimum and maximum x and y values of the warp\n # will be increased by.\n x_margin_factor, y_margin_factor = 0.5, 0.5\n # the number of x and y samples to take\n n_x, n_y = 50, 50\n # {x y}_{min max} is the actual bounds on either source or target\n # landmarks\n x_min, y_min = np.vstack([target.min(0), source.min(0)]).min(0)\n x_max, y_max = np.vstack([target.max(0), source.max(0)]).max(0)\n x_margin = x_margin_factor * (x_max - x_min)\n y_margin = y_margin_factor * (y_max - y_min)\n # {x y}_{min max}_m is the bound once it has been grown by the factor\n # of the spread in that dimension\n x_min_m = x_min - x_margin\n x_max_m = x_max + x_margin\n y_min_m = y_min - y_margin\n y_max_m = y_max + y_margin\n # build sample points for the selected region\n x = np.linspace(x_min_m, x_max_m, n_x)\n y = np.linspace(y_min_m, y_max_m, n_y)\n xx, yy = np.meshgrid(x, y)\n sample_points = np.concatenate(\n [xx.reshape([-1, 1]), yy.reshape([-1, 1])], axis=1\n )\n warped_points = self.alignment_transform.apply(sample_points)\n delta = warped_points - sample_points\n # plot the sample points result\n x, y, = (\n 0,\n 1,\n )\n if image:\n # if we are overlaying points onto an image,\n # we have to account for the fact that axis 0 is typically\n # called 'y' and axis 1 is typically called 'x'. Flip them here\n x, y = y, x\n plt.quiver(sample_points[:, x], sample_points[:, y], delta[:, x], delta[:, y])\n delta = target - source\n # plot how the landmarks move from source to target\n plt.quiver(\n source[:, x],\n source[:, y],\n delta[:, x],\n delta[:, y],\n angles=\"xy\",\n scale_units=\"xy\",\n scale=1,\n )\n # rescale to the bounds\n plt.xlim((x_min_m, x_max_m))\n plt.ylim((y_min_m, y_max_m))\n if image:\n # if we are overlaying points on an image, axis0 (the 'y' axis)\n # is flipped.\n plt.gca().invert_yaxis()\n return self", "def converte_coord(valor):\n\n pts1 = ([0,0],[24,0],[24,44],[0,44])\n pts1 = np.asarray(pts1, dtype = np.float32)\n pts2 = np.float32([[0,0],[100,0], [100,100], [0,100]])\n\n M = cv.getPerspectiveTransform(pts1,pts2)\n img2 = cv.warpPerspective(valor,M,(100,100))\n return img2", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def point_from_rays(self):\n print \"generating the 3d point from given clicked points\"\n \n #gather cams and points clicked \n uvs = []\n cams = []\n for iFrame in self.frames:\n if iFrame.lastClick : \n uv = numpy.multiply(iFrame.lastClick,self.reduceFactor)\n uvs.append(uv)\n cam = load_perspective_camera(self.camList[iFrame.currImg])\n cams.append(cam)\n point = get_3d_from_cams(cams, uvs)\n self.point3d = point;\n self.pointLabel.set(\"3d Point: \" + str(self.point3d))\n\n # project 3d point into each image, and gather intensities \n values = []\n ims = []\n for idx, img in enumerate(self.imgList):\n cam = load_perspective_camera(self.camList[idx])\n imgPoint = project_point(cam, point[0], point[1], point[2])\n imgPoint = numpy.divide(imgPoint, self.reduceFactor)\n self.allUVs.append(imgPoint)\n \n #grab float intensity value at this point \n imgView,ni,nj = load_image(img)\n val = pixel(imgView, imgPoint)\n if val > 0.0:\n values.append(val)\n ims.append(idx)\n \n #cleanup\n remove_from_db([imgView, cam])\n \n\n #write mean/std of intensities \n self.meanLabel.set(\"Mean: \" + str(numpy.mean(values)) )\n self.stdLabel.set(\"Std Dev: \" + str(numpy.std(values)) )\n #plot the intensities by image number \n self.f.clf();\n self.a = self.f.add_subplot(311)\n self.a.set_xlabel(\"img #\")\n self.a.set_ylabel(\"intensity\")\n self.a.plot(ims, values)\n #plot the histogram of intensities by image number \n pdf, bins, patches = plt.hist(values)\n self.b = self.f.add_subplot(313)\n self.b.set_xlabel(\"bin val\")\n self.b.set_ylabel(\"freq\")\n self.b.hist(values, 15, normed=1, facecolor=\"green\" )\n self.canvas.show();", "def _get_coord(self, person_depth, x, y):\n unit_vector = self.camera_model.projectPixelTo3dRay((x, y))\n normalized_vector = [i / unit_vector[2] for i in unit_vector]\n point_3d = [j * person_depth for j in normalized_vector]\n return point_3d", "def project(points, camera_params):\n # print(camera_params.shape)\n points_proj = rotate(points, camera_params[:, :3])\n points_proj += camera_params[:, 3:6]\n points_proj = -points_proj[:, :2] / points_proj[:, 2, np.newaxis]\n f = 2360*np.ones(camera_params.shape[0])\n # np.ones()\n # n = np.sum(points_proj**2, axis=1)\n r = 1\n points_proj *= (r * f)[:, np.newaxis]\n return points_proj", "def project_points(X, K, R, T, distortion_flag=False, distortion_params=None):\n # Project points from 3d world coordinates to 2d image coordinates\n\n #get projection matrix\n pmatrix = projection_matrix(R, T, K)\n\n #add 4th component to points\n ones = np.ones([1,len(X[0])])\n xones=np.row_stack((X,ones))\n\n #calculate pixel coordinates\n X_camera = pmatrix.dot(xones)\n\n return X_camera", "def triangulate(Kl, Kr, Twl, Twr, pl, pr, Sl, Sr):\r\n #--- FILL ME IN ---\r\n \r\n # Compute baseline (right camera translation minus left camera translation)\r\n Cr = (Twr)[0:3,-1] #left camera translaton\r\n Cl = (Twl)[0:3,-1] #right camera translation\r\n b = (Cr - Cl).reshape(3,1)\r\n \r\n \r\n # Unit vectors projecting from optical center to image plane points.\r\n # Use variables rayl and rayr for the rays.\r\n rayl = Twl[0:3,0:3].dot(inv(Kl)).dot(np.insert(pl,2,1, axis =0))\r\n rayl = rayl/norm(rayl) #convert to unit vector\r\n \r\n rayr = Twr[0:3,0:3].dot(inv(Kr)).dot(np.insert(pr,2,1, axis =0))\r\n rayr = rayr/norm(rayr) #convert to unit vector\r\n \r\n \r\n # Projected segment lengths.\r\n # Use variables ml and mr for the segment lengths.\r\n rLrR = rayl.T.dot(rayr)[0][0]\r\n ml = ((b.T.dot(rayl) - (b.T.dot(rayr))*(rLrR))/(1-rLrR**2))[0][0]\r\n mr = (rLrR*ml - b.T.dot(rayr))[0][0]\r\n \r\n # Segment endpoints.\r\n # User variables Pl and Pr for the segment endpoints.\r\n Pl = Cl.reshape(3,1) + rayl*ml\r\n Pr = Cr.reshape(3,1) + rayr*mr\r\n \r\n # Now fill in with appropriate ray Jacobians. These are \r\n # 3x4 matrices, but two columns are zeros (because the right\r\n # ray direction is not affected by the left image point and \r\n # vice versa).\r\n drayl = np.zeros((3, 4)) # Jacobian left ray w.r.t. image points.\r\n drayr = np.zeros((3, 4)) # Jacobian right ray w.r.t. image points.\r\n \r\n # Add code here...\r\n #rayl = f(x)_l/g(x)_l = r/norm(r). Equation for unit vector provided in the assignment\r\n #drayl = d/dx[f(x)_l/g(x)_l] = ( d/dx[f(x)_l]*g(x)_l - f(x)_l*d/dx[g(x)_l] / [g(x)_l]^2 )\r\n #where x is the image plane points in the left camera ul (i.e pl[0][0]), vl (i.e pl[1][0]), \r\n #and right camera ur (i.e pr[0][0]), vr (i.e pr[1][0])\r\n \r\n #As per equation in the assignment. I.e column vector (c1*u, c2*v, c3)\r\n fxl = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[pl[0][0]],[pl[1][0]],[1]]))\r\n \r\n #f(x)_l = column vector(c1*ul, c2*vl + c3). \r\n #Therefore f(x)_l w.r.t u = f(x)l_u = column vector (c1, 0, 0,)\r\n fxl_u = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[1],[0],[0]]))\r\n #Therefore f(x)_l w.r.t v = f(x)l_v = column vector (0, c2, 0,)\r\n fxl_v = Twl[:3,:3].dot(inv(Kl)).dot(np.array([[0],[1],[0]]))\r\n \r\n #Same math applied as with f(x)_l shown above - only that it is with the right camera\r\n fxr = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[pr[0][0]],[pr[1][0]],[1]]))\r\n fxr_u = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[1],[0],[0]]))\r\n fxr_v = Twr[:3,:3].dot(inv(Kr)).dot(np.array([[0],[1],[0]]))\r\n \r\n #Recall from above that g(x)_l = norm(r)\r\n gxl = norm(fxl)\r\n #g(x)_l wrt to u is; u*c1^2/norm(r). Where u*c1^2 = fxl_u.T.dot(fxl)\r\n # and gxl = norm(r)\r\n gxl_u = fxl_u.T.dot(fxl)/gxl \r\n #g(x)_l wrt to v is; v*c2^2/norm(r). Where v*c2^2 = fxl_v.T.dot(fxl)\r\n # and gxl = norm(r) \r\n gxl_v = fxl_v.T.dot(fxl)/gxl\r\n \r\n # same as above except with the right camera\r\n gxr = norm(fxr)\r\n gxr_u = fxr_u.T.dot(fxr)/gxr\r\n gxr_v = fxr_v.T.dot(fxr)/gxr\r\n \r\n #Fill in Jacobian results with results from above \r\n drayl[:,0] = ((fxl_u.dot(gxl) - fxl.dot(gxl_u))/(gxl*gxl)).reshape(3,)\r\n drayl[:,1] = ((fxl_v.dot(gxl) - fxl.dot(gxl_v))/(gxl*gxl)).reshape(3,) \r\n drayr[:,2] = ((fxr_u.dot(gxr) - fxr.dot(gxr_u))/(gxr*gxr)).reshape(3,)\r\n drayr[:,3] = ((fxr_v.dot(gxr) - fxr.dot(gxr_v))/(gxr*gxr)).reshape(3,)\r\n \r\n \r\n \r\n #------------------\r\n \r\n # Compute dml and dmr (partials wrt segment lengths).\r\n # Compute dml and dmr (partials wrt segment lengths).\r\n u = np.dot(b.T, rayl) - np.dot(b.T, rayr)*np.dot(rayl.T, rayr)\r\n v = 1 - np.dot(rayl.T, rayr)**2\r\n\r\n du = (b.T@drayl).reshape(1, 4) - \\\r\n (b.T@drayr).reshape(1, 4)*np.dot(rayl.T, rayr) - \\\r\n np.dot(b.T, rayr)*((rayr.T@drayl) + (rayl.T@drayr)).reshape(1, 4)\r\n \r\n dv = -2*np.dot(rayl.T, rayr)*((rayr.T@drayl).reshape(1, 4) + \\\r\n (rayl.T@drayr).reshape(1, 4))\r\n\r\n m = np.dot(b.T, rayr) - np.dot(b.T, rayl)@np.dot(rayl.T, rayr)\r\n n = np.dot(rayl.T, rayr)**2 - 1\r\n\r\n dm = (b.T@drayr).reshape(1, 4) - \\\r\n (b.T@drayl).reshape(1, 4)*np.dot(rayl.T, rayr) - \\\r\n np.dot(b.T, rayl)@((rayr.T@drayl) + (rayl.T@drayr)).reshape(1, 4)\r\n dn = -dv\r\n\r\n dml = (du*v - u*dv)/v**2\r\n dmr = (dm*n - m*dn)/n**2\r\n\r\n # Finally, compute Jacobian for P w.r.t. image points.\r\n JP = (ml*drayl + rayl*dml + mr*drayr + rayr*dmr)/2\r\n \r\n #--- FILL ME IN ---\r\n \r\n # 3D point.\r\n P = (Pl + Pr)/2\r\n \r\n # 3x3 landmark point covariance matrix (need to form\r\n # the 4x4 image plane covariance matrix first).\r\n M = np.zeros((4,4))\r\n M[0:2,0:2] = Sl\r\n M[2:4,2:4] = Sr\r\n \r\n S = JP.dot(M).dot(JP.T) #as per equation in the assignment\r\n\r\n # Check for correct outputs...\r\n correct = isinstance(Pl, np.ndarray) and Pl.shape == (3, 1) and \\\r\n isinstance(Pr, np.ndarray) and Pr.shape == (3, 1) and \\\r\n isinstance(P, np.ndarray) and P.shape == (3, 1) and \\\r\n isinstance(S, np.ndarray) and S.shape == (3, 3)\r\n\r\n if not correct:\r\n raise TypeError(\"Wrong type or size returned!\")\r\n\r\n return Pl, Pr, P, S", "def compute_ray(self, box):\n if box[0, -1] > 0:\n warnings.warn('Box should have negative Z values.')\n\n size_x = np.linalg.norm(box[5] - box[1])\n size_y = np.linalg.norm(box[3] - box[1])\n size_z = np.linalg.norm(box[2] - box[1])\n size = np.asarray([size_x, size_y, size_z])\n box_o = Box.UNIT_BOX * size\n box_oh = np.ones((4, 9))\n box_oh[:3] = np.transpose(box_o)\n\n box_ch = np.ones((4, 9))\n box_ch[:3] = np.transpose(box)\n box_cht = np.transpose(box_ch)\n\n box_oct = np.matmul(box_oh, box_cht)\n box_cct_inv = np.linalg.inv(np.matmul(box_ch, box_cht))\n transform = np.matmul(box_oct, box_cct_inv)\n return transform[:3, 3:].reshape((3))", "def do_intensity_projection(points, proj_W , proj_H, proj_fov_up, proj_fov_down, fn, idx):\n\n # print(points.shape)\n\n points = points[points.any(axis=1)]\n\n proj_range = np.zeros((proj_H, proj_W),\n dtype=np.float64)\n\n # unprojected range (list of depths for each point)\n unproj_range = np.zeros((0, 1), dtype=np.float32)\n\n # projected point cloud xyz - [H,W,3] xyz coord (-1 is no data)\n proj_xyz = np.full((proj_H, proj_W, 4), -1,\n dtype=np.float32)\n\n # projected remission - [H,W] intensity (-1 is no data)\n proj_remission = np.full((proj_H, proj_W), -1,\n dtype=np.float32)\n\n # projected index (for each pixel, what I am in the pointcloud)\n # [H,W] index (-1 is no data)\n proj_idx = np.full((proj_H, proj_W), -1,\n dtype=np.int32)\n\n # for each point, where it is in the range image\n proj_x = np.zeros((0, 1), dtype=np.int32) # [m, 1]: x\n proj_y = np.zeros((0, 1), dtype=np.int32) # [m, 1]: y\n\n # mask containing for each pixel, if it contains a point or not\n proj_mask = np.zeros((proj_H, proj_W),\n dtype=np.int32) # [H,W] mask\n\n\n\n\n # laser parameters\n fov_up = proj_fov_up / 180.0 * np.pi # field of view up in rad\n fov_down = proj_fov_down / 180.0 * np.pi # field of view down in rad\n fov = abs(fov_down) + abs(fov_up) # get field of view total in rad\n\n\n \n depth = np.linalg.norm(points[:,:3], 2, axis=1)\n\n # print(points[:10,:])\n \n\n # get scan components\n scan_x = points[:, 0]\n scan_y = points[:, 1]\n scan_z = points[:, 2]\n\n # get angles of all points\n yaw = -np.arctan2(scan_y, scan_x) \n pitch = np.arcsin(scan_z / depth)\n\n # get projections in image coords\n proj_x = 0.5 * (yaw / np.pi + 1.0) # in [0.0, 1.0]\n proj_y = 1.0 - (pitch + abs(fov_down)) / fov # in [0.0, 1.0]\n\n proj_x = np.nan_to_num(proj_x)\n\n proj_y = np.nan_to_num(proj_y)\n # scale to image size using angular resolution\n proj_x *= proj_W # in [0.0, W]\n proj_y *= proj_H # in [0.0, H]\n\n \n \n\n # round and clamp for use as index\n proj_x = np.floor(proj_x)\n proj_x = np.minimum(proj_W - 1, proj_x)\n proj_x = np.maximum(0, proj_x).astype(np.int32) # in [0,W-1]\n proj_x = np.copy(proj_x) # store a copy in orig order\n\n proj_y = np.floor(proj_y)\n proj_y = np.minimum(proj_H - 1, proj_y)\n proj_y = np.maximum(0, proj_y).astype(np.int32) # in [0,H-1]\n\n proj_y = np.copy(proj_y) # stope a copy in original order\n\n\n # # copy of depth in original order\n # unproj_range = np.copy(depth)\n\n # indices = np.arange(depth.shape[0])\n # order = np.argsort(depth)[::-1]\n # depth = depth[order]\n # indices = indices[order]\n # points = points[order]\n\n # proj_y = proj_y[order]\n # proj_x = proj_x[order]\n \n\n if DATASET_TYPE == \"kitti\":\n intensities = points[:,3]\n print(\"kitti\")\n # intensities = np.minimum(intensities, 1000)\n # i_min = intensities.min()\n # i_max = intensities.max()\n # intensities = (intensities - i_min)/(i_max - i_min)\n\n\n\n if DATASET_TYPE == \"mulran\" or DATASET_TYPE == \"mulran2\":\n intensities = points[:,3]\n intensities = np.minimum(intensities, 1000)\n i_min = intensities.min()\n i_max = intensities.max()\n \n intensities = (intensities - i_min)/(i_max - i_min)\n\n if DATASET_TYPE == \"dso\":\n \n \n \n intensities = points[:,4]\n \n\n minval = np.percentile(intensities, 2)\n maxval = np.percentile(intensities, 98)\n intensities = np.clip(intensities, minval, maxval)\n # intensities = np.maximum(intensities, 5000)\n # intensities = np.sqrt(intensities)\n\n \n\n\n \n i_min = intensities.min()\n i_max = intensities.max()\n\n intensities = (intensities - i_min)/(i_max - i_min)\n\n \n\n\n \n\n \n\n \n \n\n\n \n \n \n \n \n pixel_tracker = {}\n pc_tracker = {}\n # print(proj_x.shape)\n # print(scan_x.shape)\n\n \n proj_3d_corres = np.zeros((proj_H, proj_W, 3),\n dtype=np.float64)\n\n # print(proj_x[:20])\n # print(proj_y[:70])\n \n \n for i in range(proj_x.shape[0]):\n x_val = proj_x[i]\n y_val = proj_y[i]\n\n \n\n if proj_range[y_val, x_val] != 0:\n continue\n\n \n \n intensity = intensities[i]\n \n \n \n \n proj_range[y_val, x_val] = intensity\n \n proj_3d_corres[y_val,x_val, :] = np.array([scan_x[i], scan_y[i], scan_z[i]])\n \n\n\n \n proj_range *= 255\n\n\n \n \n \n \n proj_range = np.array(proj_range, dtype=np.uint8)\n\n \n newPicPath = None\n\n\n \n\n\n img = Image.fromarray(proj_range, 'L')\n pc_name = fn.split('.')[0]\n newPicPath = os.path.join(CURRENT_DIR, \"intensity_images\", \"mulran_\" + (str(idx)) + \".png\")\n img.save(newPicPath)\n\n\n return newPicPath, proj_3d_corres, proj_range", "def update_transform(self):\n\n self.a = self.scale * self.pixel_size * math.cos(self.angle)\n self.d = self.scale * self.pixel_size * math.sin(self.angle)\n self.b = self.d\n self.e = -self.a\n self.c = self.point.x() - self.a*self.width/2.0 - self.b*self.height/2.0\n self.f = self.point.y() - self.d*self.width/2.0 - self.e*self.height/2.0\n\n self.bounding_box = [[self.c,self.f],[self.c+self.a*self.width,self.f+self.d*self.width],[self.c+self.a*self.width+self.b*self.height,self.f+self.d*self.width+self.e*self.height],[self.c+self.b*self.height,self.f+self.e*self.height],]", "def elastic_transform(image, label):\n # Params taken from https://arxiv.org/pdf/1705.03820.pdf\n dx = dxs[np.random.randint(0, len(dxs))]\n dy = dys[np.random.randint(0, len(dys))]\n\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n distored_image = map_coordinates(image, indices, order=1, mode='reflect')\n distored_label = map_coordinates(np.expand_dims(label, -1), indices, order=1, mode='reflect')\n\n img, lab = distored_image.reshape(image.shape), distored_label.reshape(image.shape)[:, :, 0]\n return img, lab", "def _trace_ray(self, ray, depth=0, max_depth=5):\n\n color = Color()\n\n if depth >= max_depth:\n return color\n\n intersection = self._get_intersection(ray)\n if intersection is None:\n return color\n\n obj, dist = intersection\n intersection_pt = ray.point_at_dist(dist)\n surface_norm = obj.surface_norm(intersection_pt)\n\n # ambient light\n # color += obj.material.color * obj.material.ambient\n\n point_on_plane = ray.origin + dist*ray.direction\n imgx = point_on_plane.x\n imgy = np.sqrt(point_on_plane.y*point_on_plane.y + point_on_plane.z*point_on_plane.z)\n\n\n '''\n # Nearest Texel\n int_imgx = int(round(imgx))\n int_imgy = int(round(imgy))\n if int_imgx == 512:\n int_imgx = 511\n if int_imgy == 512:\n int_imgy = 511\n color += Color(img[int_imgx, int_imgy, 0], img[int_imgx, int_imgy, 1], img[int_imgx, int_imgy, 2])\n '''\n\n\n # Bilinearly Interpolated Texel\n ceilx = int(math.ceil(imgx))\n ceily = int(math.ceil(imgy))\n floorx = int(math.floor(imgx))\n floory = int(math.floor(imgy))\n if ceilx >= 512:\n ceilx = 511\n if ceily >= 512:\n ceily = 511\n if floorx >= 512:\n floorx = 511\n if floory >= 512:\n floory = 511\n interpolate_x1 = (ceilx - imgx) * (img[ceilx, ceily]) + (imgx - floorx) * (img[floorx, ceily])\n interpolate_x2 = (ceilx - imgx) * (img[ceilx, floory]) + (imgx - floorx) * (img[floorx, floory])\n interpolate_y = (ceily - imgy) * interpolate_x1 + (imgy - floory) * interpolate_x2\n color += Color(interpolate_y[0], interpolate_y[1], interpolate_y[2])\n # print color\n\n\n '''\n # lambert shading\n for light in self.lights:\n pt_to_light_vec = (light - intersection_pt).normalize()\n pt_to_light_ray = Ray(intersection_pt, pt_to_light_vec)\n if self._get_intersection(pt_to_light_ray) is None:\n lambert_intensity = surface_norm * pt_to_light_vec\n if lambert_intensity > 0:\n color += obj.material.color * obj.material.lambert * \\\n lambert_intensity\n\n \n # specular (reflective) light\n reflected_ray = Ray(\n intersection_pt, ray.direction.reflect(surface_norm).normalize())\n color += self._trace_ray(reflected_ray, depth + 1) * \\\n obj.material.specular\n '''\n return color", "def world2Pixel(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xDist = geoMatrix[1]\r\n yDist = geoMatrix[5]\r\n rtnX = geoMatrix[2]\r\n rtnY = geoMatrix[4]\r\n pixel = int((x - ulX) / xDist)\r\n line = int((ulY - y) / xDist)\r\n return (pixel, line)", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def _apply_transform(self, img: np.ndarray): \n img = self.transform(image=img)[\"image\"]\n return img", "def to_povray(vis,world,properties={}):\n #patch on vapory\n patch_vapory()\n \n #camera\n mat=vis.view.camera.matrix()\n pos=mat[1]\n right=mat[0][0:3]\n up=mat[0][3:6]\n dir=op.mul(mat[0][6:9],-1)\n tgt=op.add(mat[1],dir)\n #scale\n fovy=vis.view.fov*vis.view.h/vis.view.w\n fovx=math.atan(vis.view.w*math.tan(fovy*math.pi/360.)/vis.view.h)*360./math.pi\n right=op.mul(right,-float(vis.view.w)/vis.view.h)\n #camera\n camera_params=['orthographic' if vis.view.orthogonal else 'perspective',\n 'location',[pos[0],pos[1],pos[2]],\n 'look_at',[tgt[0],tgt[1],tgt[2]],\n 'right',[right[0],right[1],right[2]],\n 'up',[up[0],up[1],up[2]],\n 'angle',fovx,\n 'sky',get_property(properties,[],\"sky\",[0.,0.,1.])]\n camera=vp.Camera(*camera_params)\n \n #tempfile\n tempfile=get_property(properties,[],\"tempfile\",None)\n tempfile_path=os.path.dirname(tempfile) if tempfile is not None else '.'\n if not os.path.exists(tempfile_path):\n os.mkdir(tempfile_path)\n \n #objects\n objects=[]\n objs=[o for o in properties[\"visualObjects\"]] if \"visualObjects\" in properties else []\n objs+=[world.terrain(i) for i in range(world.numTerrains())]\n objs+=[world.rigidObject(i) for i in range(world.numRigidObjects())]\n for r in range(world.numRobots()):\n objs+=[world.robot(r).link(i) for i in range(world.robot(r).numLinks())]\n for obj in objs:\n transient=get_property(properties,[obj],\"transient\",default=True)\n if transient:\n objects+=geometry_to_povray(obj.appearance(),obj.geometry(),obj,None,properties=properties)\n else: \n path=tempfile_path+'/'+obj.getName()+'.pov'\n if not os.path.exists(path):\n R,t=obj.geometry().getCurrentTransform()\n obj.geometry().setCurrentTransform([1,0,0,0,1,0,0,0,1],[0,0,0])\n geom=geometry_to_povray(obj.appearance(),obj.geometry(),obj,None,properties=properties)\n if len(geom)>1:\n file_content=vp.Union(*geom)\n elif len(geom)>0: \n file_content=vp.Object(*geom)\n else: file_content=None\n if file_content is not None:\n f=open(path,'w')\n f.write(str(file_content))\n f.close()\n obj.geometry().setCurrentTransform(R,t)\n else: path=None\n #include \n if path is not None:\n R,t=obj.geometry().getCurrentTransform()\n objects.append(vp.Object('#include \"%s\"'%path,\"matrix\",R+t))\n \n #light\n if \"lights\" in properties:\n objects+=properties[\"lights\"]\n \n #scene\n gsettings=[]\n scene=vp.Scene(camera=camera,\n objects=objects,\n included=get_property(properties,[],\"included\",[]),\n global_settings=get_property(properties,[],\"global_settings\",[]))\n try:\n #this works with later version of vapory\n return \\\n render_povstring(str(scene), \\\n outfile=get_property(properties,[],\"outfile\",None), \\\n width=vis.view.w,height=vis.view.h, \\\n quality=get_property(properties,[],\"quality\",None), \\\n antialiasing=get_property(properties,[],\"antialiasing\",0.3), \\\n remove_temp=get_property(properties,[],\"remove_temp\",False), \\\n show_window=get_property(properties,[],\"show_window\",False), \\\n tempfile=tempfile, \\\n includedirs=get_property(properties,[],\"includedirs\",None), \\\n output_alpha=get_property(properties,[],\"output_alpha\",True))\n except:\n #this works with earlier version of vapory\n return \\\n render_povstring(str(scene), \\\n outfile=get_property(properties,[],\"outfile\",None), \\\n width=vis.view.w,height=vis.view.h, \\\n quality=get_property(properties,[],\"quality\",None), \\\n antialiasing=get_property(properties,[],\"antialiasing\",0.3), \\\n remove_temp=get_property(properties,[],\"remove_temp\",False))", "def elastic_transform(self, image, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n image = self.affine(image, random_state)\n #from ipdb import set_trace; set_trace()\n indices = self.stretch_indices(image, random_state)\n\n return map_coordinates(image, indices, order=1, mode='reflect').reshape(image.shape)", "def process_image(image):\n undist = calibrator.undistort(image)\n binarized = binarizer.process(undist)\n warped = warper.warp(binarized)\n\n lane.detect_lane(warped)\n\n debug_image = lane.get_debug_image(warped)\n\n visualizer.draw_debugging_output(undist, binarized, warped, debug_image)\n visualizer.draw_text_info(undist, lane.center_curvature, lane.center_offset)\n result = visualizer.draw_lane_on_road(undist, lane)\n\n return result", "def direct(sun_pos, grid):\n\n # for each pixel at top of grid pass sun rays in\n for i in xrange(grid.gr.shape[0]):\n \"\"\"\n Make an array starting at loc\n \"\"\"\n xpos = i * grid.xres\n ypos = grid.zres * grid.zsize\n pos = np.array(xpos, ypos)\n direction = pos - sun_pos / np.norm(pos - sun_pos) # this location minus \n r = ray(pos, direction)\n \"\"\"\n The ray now travels down through the canopy being\n altered by transmission and reflectance\n\n amount of scattering vs absorption is determined by leaf area density\n\n \"\"\"", "def process_img(img, mtx, dist, line):\n undistort = cv2.undistort(img, mtx, dist, None, mtx)\n binary_img = create_binary_image(img)\n M, Minv = perspective_transform()\n warped = cv2.warpPerspective(binary_img, M, (img.shape[1], img.shape[0]))\n warped *= 255\n\n # Use line.fine_lane method to find the lane\n left_fitx, right_fitx, ploty, curvature, distance = line.find_lane(warped)\n\n # Draw lanes and return it\n out_img = draw_lane_on_img(img, warped, left_fitx, right_fitx, ploty, Minv, curvature, distance)\n return out_img", "def projective_transform(self, x):\n\n x = np.asarray(x)\n # Assume no intensity column\n x0, y0, z0 = x\n\n # Camera coors to pixel coors\n u = ((x0 / z0) * self.f) + (self.sensor_size[0] // 2)\n v = ((y0 / z0) * self.f) + (self.sensor_size[1] // 2)\n\n u_min = np.min(u)\n v_min = np.min(v)\n\n n = len(u)\n u_list = []\n v_list = []\n if self.error_on_oob:\n for i in range(n):\n if (u[i] >= u_min and u[i] <= self.sensor_size[0] and v[i] >= v_min and v[i] <= self.sensor_size[1]):\n u_list.append(u[i])\n v_list.append(v[i])\n else:\n raise OutOfSensorBoundsError(\"Projected coordinate was outside the sensor\")\n else:\n for i in range(n):\n u_list.append(u[i])\n v_list.append(v[i])\n\n u = np.asarray(u_list)\n v = np.asarray(v_list)\n\n return np.vstack((u, v))", "def rasterize(self):\n\n for primitive in self._scene:\n bbox = primitive[\"bounding_box\"]\n # Loop through all pixels\n # You MUST use bounding boxes in order to speed up this loop\n for w in range(bbox[0][0], bbox[1][0]):\n x = w + 0.5\n for h in range(bbox[0][1], bbox[1][1]):\n y = h + 0.5\n # First, we check if the pixel center is inside the primitive\n im_x, im_y = w, self._height - (h + 1)\n if inside(x, y, primitive):\n # apply affine xfrom if needed\n if \"xform\" in primitive.keys():\n result = np.matmul(primitive[\"xform\"],\n [[im_x], [im_y], [1]])\n im_x, im_y = int(result[0][0]), int(result[1][0])\n\n self._image[im_y, im_x] = primitive[\"color\"]\n # break\n # break\n # break", "def localize(image):\n\n # Call the vision function in order to have the grid with the obstacle and the goal coordinate\n object_grid, occupancy_grid, world = vision(image)\n\n # Correction of the goal coordinate in order to fit the A* coordinate\n goal_x = object_grid[0][1]\n goal_y = WIDTH_G - object_grid[0][0]\n goal_coor = (goal_x, goal_y)\n\n return occupancy_grid, goal_coor", "def transform(self, image):\n # e) use cv2.warpPerspective() to warp your image to a top-down view\n # Warp the image using OpenCV warpPerspective()\n w, h = image.shape[1], image.shape[0]\n return cv2.warpPerspective(image, self.p_mat, (w, h))", "def getGazeDirection(self,img, facebox):\n facebox_list = facebox.getList()\n \n #extrat face box and downsampling\n face_img = img[facebox_list[1]: facebox_list[3],facebox_list[0]: facebox_list[2]]\n face_img = cv2.resize(face_img, (128, 128))\n face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)\n\n #marks detection\n marks = self.mark_detector.detect_marks([face_img])\n \n #scale and move back marks in original image coordinate\n marks *= (facebox_list[2] - facebox_list[0])\n marks[:, 0] += facebox_list[0]\n marks[:, 1] += facebox_list[1]\n shape = marks.astype(np.uint)\n\n #TODO:consider different points for surgery masks\n image_points = np.array([\n shape[30], # Nose tip\n shape[8], # Chin\n shape[36], # Left eye left corner\n shape[45], # Right eye right corne\n shape[48], # Left Mouth corner\n shape[54] # Right mouth corner\n ], dtype=\"double\")\n \n if self.debug > 1:\n for p in image_points:\n cv2.circle(img, (int(p[0]), int(p[1])), 3, (0,0,255), -1)\n\n #Solving PnP\n dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion\n (success, rotation_vector, translation_vector) = cv2.solvePnP(self.model_points, image_points, self.camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_UPNP)\n \n #Get a ortogal to tha face plane - x1 and x2 are two points definig the line in the projected space\n #TODO: remove and make a line going out from eyes\n \n \n # Calculate euler angle\n rotation_mat, _ = cv2.Rodrigues(rotation_vector)\n pose_mat = cv2.hconcat((rotation_mat, translation_vector))\n _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)\n\n\n x1, x2 = FaceMarksDetector.computeLineOfSigth(img, rotation_vector, translation_vector, self.camera_matrix)\n\n if self.debug > 0:\n #display the line\n cv2.line(img, tuple(x1), tuple(x2), (255, 255, 0), 2)\n\n for (x, y) in shape:\n cv2.circle(img, (x, y), 4, (255, 255, 0), -1)\n\n if self.debug > 0:\n self.mark_detector.draw_marks(img, marks, color=(0, 255, 0))\n \n return marks, (x1, x2) , euler_angles", "def apply_transform_to_image(self,img, transform, center=None):\n \n if center is None:\n center = (np.array(img.shape)[::-1]-1)/2.0\n \n displacement = np.dot(transform, center)\n shift = center - displacement\n \n img_tf = ndimage.interpolation.affine_transform(img, transform, offset=shift, mode=\"constant\", order=3, cval=0.0)\n return img_tf", "def scene_to_ingame(self, x, y):\n (scene_x, scene_y) = self.mainwindow.get_inverted_zoom_transform().map(x, y)\n new_x = scene_x//8\n # TODO: this y coord may be slightly off\n new_y = ((self.world.height*8) - scene_y)//8\n return (new_x, new_y)", "def ray(self):\n return self._ray", "def _render_image(self):\n\n self._render_static_image_annotation()\n\n if self._goal_robot_pose is not None:\n # Render the goal pose as the robot is driving to target...\n self._goal_robot_pose.header.stamp = self._image_time # AHHHHH THIS IS NOT \n self._tf_listener.waitForTransform('/map',\n self._image_info.tf_frame, \n self._image_time,\n rospy.Duration(4))\n\n self._goal_robot_pose.pose.position.z = 1.5 # force goal point to be 1.5m\n pose = self._tf_listener.transformPose(self._image_info.tf_frame,\n self._goal_robot_pose)\n u, v = self._image_info.project3dToPixel((pose.pose.position.x,\n pose.pose.position.y,\n pose.pose.position.z))\n self._goal_robot_pose.pose.position.z=1.45 # force goal point to be 1.5m\n pose = self._tf_listener.transformPose(self._image_info.tf_frame,\n self._goal_robot_pose)\n u2, v2 = self._image_info.project3dToPixel((pose.pose.position.x,\n pose.pose.position.y,\n pose.pose.position.z))\n radius = int(math.sqrt((u2-u)**2 + (v2-v)**2))\n if radius < 100:\n cv2.putText(self._image, \"Goal Location\", (int(u+radius+1), int(v+radius+1)),\n cv2.FONT_HERSHEY_SIMPLEX, radius/10.0, 255, radius/200 * 3)\n cv2.circle(self._image, (int(u),int(v)), radius, (0,0,255,127),-1)\n\n\n\n if self._point_clouds is not None:\n # Render the bouding boxes of objects...\n # Project each response cluster into image\n box_locations = []\n print\n for i, (cloud, label) in enumerate(zip(self._point_clouds, self._labels)):\n print \"Object \",i,\"/\",len(self._point_clouds)\n location = self._project_pointcloud(cloud)\n print location\n box_locations.append(location)\n cv2.rectangle(self._image,\n location[0],location[1],\n (255, 0, 0),\n 3)\n cv2.putText(self._image, label,\n (location[0][0], location[0][1]-10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255, 2)", "def _image_to_point_space(pixel_coordinates: np.array, boundary_radius: int, resolution: int) -> np.array:\n pix_origin = np.array([resolution / 2, resolution / 2])\n return np.array((pixel_coordinates - pix_origin) * (2 * boundary_radius) / resolution)", "def transform(self, x, y, z=None, crs=wgs84, nearest=False, maskout=False):\n\n x, y = np.ma.array(x), np.ma.array(y)\n\n # First to local proj\n _crs = check_crs(crs, raise_on_error=True)\n if isinstance(_crs, pyproj.Proj):\n x, y = transform_proj(_crs, self.proj, x, y)\n elif isinstance(_crs, Grid):\n x, y = _crs.ij_to_crs(x, y, crs=self.proj)\n\n # Then to local grid\n x = (x - self.x0) / self.dx\n y = (y - self.y0) / self.dy\n\n # See if we need to round\n if nearest:\n f = np.rint if self.pixel_ref == 'center' else np.floor\n x = f(x).astype(int)\n y = f(y).astype(int)\n\n # Mask?\n if maskout:\n if self.pixel_ref == 'center':\n mask = ~((x >= -0.5) & (x < self.nx-0.5) &\n (y >= -0.5) & (y < self.ny-0.5))\n else:\n mask = ~((x >= 0) & (x < self.nx) &\n (y >= 0) & (y < self.ny))\n x = np.ma.array(x, mask=mask)\n y = np.ma.array(y, mask=mask)\n\n return x, y", "def camera_pixels_to_camera_coords(left_pt, right_pt, nparrays=False):\n assert len(left_pt) == len(right_pt) == 2\n disparity = np.linalg.norm( np.array(left_pt) - np.array(right_pt) )\n (xx,yy,zz) = STEREO_MODEL.projectPixelTo3d( (left_pt[0],left_pt[1]), disparity )\n if nparrays:\n return np.array([xx,yy,zz])\n else:\n return [xx,yy,zz]", "def camera_to_object_transform(self):\n # form the full object to camera transform\n T_stp_camera = self.stp_to_camera_transform()\n T_obj_stp = self.object_to_stp_transform()\n T_obj_camera = T_stp_camera.dot(T_obj_stp)\n return T_obj_camera", "def to_world(self, x, y, **kwargs):", "def calculate_stereographic_projection(p):\n # P' = P * (2r / r + z)\n mu = 1 / (1 + p[2])\n x = p[0] * mu\n y = p[1] * mu\n return x, y", "def distort_point(point):\n fx, fy = _camera_tuned_matrix[0][0], _camera_tuned_matrix[1][1]\n cx, cy = _camera_tuned_matrix[0][2], _camera_tuned_matrix[1][2]\n x, y = (point.x - cx) / fx, (point.y - cy) / fy\n\n k1, k2, p1, p2, k3 = _camera_distortion[0]\n r2 = x ** 2 + y ** 2\n r4 = r2 * r2\n r6 = r2 * r4\n x = x * (1 + k1 * r2 + k2 * r4 + k3 * r6) + 2 * p1 * x * y + p2 * (r2 + 2 * x * x)\n y = y * (1 + k1 * r2 + k2 * r4 + k3 * r6) + p1 * (r2 + 2 * y * y) + 2 * p2 * x * y\n\n fx2, fy2 = _camera_matrix[0][0], _camera_matrix[1][1]\n cx2, cy2 = _camera_matrix[0][2], _camera_matrix[1][2]\n x2 = x * fx2 + cx2\n y2 = y * fy2 + cy2\n return ge.Point(x2, y2)", "def camera_coords_to_world_coords(point, cam_height, cam_angle):\n\n # adjust the axis order\n point = np.array([point[2], point[0], point[1]])\n\n # calculate the vectors of the camera axis in the desired coordinate system\n cam_direction = np.array([np.cos(cam_angle), 0, -np.sin(cam_angle)])\n z = cam_direction\n x = np.cross(np.array([0, 0, 1]), cam_direction)\n y = np.cross(z, x)\n\n # transposed rotation matrix\n rotation = np.vstack([x, y, z])\n\n # translation vector\n translation = np.array([0, 0, cam_height])\n\n return rotation @ (point - translation)", "def homograph_warp(img,pose,plane,intrinsics_a,intrinsics_b,rotation_mode='so3',padding_mode='zeros'):\n\n check_sizes(img, 'img', 'B3HW')\n check_sizes(plane, 'depth', 'B4')\n check_sizes(pose, 'pose', 'B6')\n check_sizes(intrinsics_a, 'intrinsics_a', 'B33')\n check_sizes(intrinsics_b, 'intrinsics_b', 'B33')\n\n batch_size, _, img_height, img_width = img.size()\n\n b, h, w = batch_size,img_height,img_width\n if (pixel_coords is None) or pixel_coords.size(2) < h:\n set_id_grid(img)\n\n homo_mat = homo_vec2mat(pose,plane,intrinsics_a,intrinsics_b,rotation_mode)\n print(homo_mat.squeeze())\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w) # [B, 3, H,W]\n src_pixel_coords = homo_project(current_pixel_coords,homo_mat) # [B,H,W,2]\n projected_img = None\n if torch.__version__ !='1.1.0.post2':\n projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode,align_corners=False)\n\n else:\n projected_img = F.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)\n valid_points = src_pixel_coords.abs().max(dim=-1)[0] <= 1\n\n return projected_img, valid_points", "def testCalculateTransform(self):\n # Create some points in the first frame.\n z = 4.0\n self.evaluator.camera_height = z\n first_points = numpy.array(\n [[0, 0, z], [2, 0, z], [2, 5, z], [0, 5, z]], dtype=numpy.float32)\n # Create a transformation that will move the camera\n R = numpy.array([[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n t = numpy.array([[3.0], [-5.0], [0.0]])\n expected_result = numpy.eye(4)\n expected_result[0:3, 0:3] = R\n expected_result[0:3, 3:] = t\n # Determine where the second points would be given that.\n second_points = (numpy.matmul(\n R, first_points.transpose()) + t).transpose()\n # Create a simple intrinsic matrix to project onto a fictional camera\n intrinsic = numpy.array(\n [[1.0, 0.0, 20.0], [0.0, 1.0, 20.0], [0.0, 0.0, 1.0]])\n # Use no distortion or transformations\n rvec = numpy.zeros((3, 1))\n tvec = rvec\n distortion = numpy.zeros((5, 1))\n # Project the points into the camera\n (camera_first_points, _) = cv2.projectPoints(\n first_points, rvec, tvec, intrinsic, distortion)\n camera_first_points = camera_first_points.squeeze()\n (camera_second_points, _) = cv2.projectPoints(\n second_points, rvec, tvec, intrinsic, distortion)\n camera_second_points = camera_second_points.squeeze()\n # Using these projected points, can the object recover the correct initial transform\n result = self.evaluator._calculateTransform(\n camera_first_points, camera_second_points, intrinsic)\n # The matrix comparisions aren't reliable near zero, so check elements manually.\n for i in range(expected_result.shape[0]):\n for j in range(expected_result.shape[1]):\n result_element = result[i, j]\n expected_element = expected_result[i, j]\n self.assertAlmostEqual(result_element, expected_element, 6,\n 'Matrix element ({0:d}, {1:d}) is incorrect.'.format(i, j))", "def apply_projection_transform(Xb, batch_size, image_size):\n d = image_size * 0.3 * intensity\n for i in np.random.choice(batch_size, int(batch_size * p), replace = False): \n tl_top = random.uniform(-d, d) # Top left corner, top margin\n tl_left = random.uniform(-d, d) # Top left corner, left margin\n bl_bottom = random.uniform(-d, d) # Bottom left corner, bottom margin\n bl_left = random.uniform(-d, d) # Bottom left corner, left margin\n tr_top = random.uniform(-d, d) # Top right corner, top margin\n tr_right = random.uniform(-d, d) # Top right corner, right margin\n br_bottom = random.uniform(-d, d) # Bottom right corner, bottom margin\n br_right = random.uniform(-d, d) # Bottom right corner, right margin\n\n transform = ProjectiveTransform()\n transform.estimate(np.array((\n (tl_left, tl_top),\n (bl_left, image_size - bl_bottom),\n (image_size - br_right, image_size - br_bottom),\n (image_size - tr_right, tr_top)\n )), np.array((\n (0, 0),\n (0, image_size),\n (image_size, image_size),\n (image_size, 0)\n )))\n Xb[i] = warp(Xb[i], transform, output_shape=(image_size, image_size), order = 1, mode = 'edge')\n\n return Xb", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def Pixel2World(geoMatrix, x, y):\r\n ulX = geoMatrix[0]\r\n ulY = geoMatrix[3]\r\n xdist = geoMatrix[1]\r\n ydist = geoMatrix[5]\r\n coorX = (ulX + (x * xdist))\r\n coorY = (ulY + (y * ydist))\r\n return (coorX, coorY)", "def get_landmarks(self, image): # from https://www.paulvangent.com/2016/08/05/emotion-recognition-using-facial-landmarks/\n # Ask the detector to find the bounding boxes of each face. The 1 in the\n # second argument indicates that we should upsample the image 1 time. This\n # will make everything bigger and allow us to detect more faces.\n detections = self.detector(image, 1)\n if len(detections) < 1: # Number of faces detected = 0\n # print(\"Number of faces detected: {}\".format(len(detections)))\n return None\n # Draw Facial Landmarks with the predictor class\n shape = self.predictor(image, detections[0])\n xlist = []\n ylist = []\n for i in range(68): # Store X and Y coordinates in two lists\n xlist.append(float(shape.part(i).x))\n ylist.append(float(shape.part(i).y))\n\n landmarks_vectorised = []\n landmarks_vectorised = self.our_ft_landmark(xlist, ylist)# Extaraction des features\n\n xmean = np.mean(xlist)\n ymean = np.mean(ylist)\n xcentral = [(x-xmean) for x in xlist]\n ycentral = [(y-ymean) for y in ylist]\n \n for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):\n landmarks_vectorised.append(w)\n landmarks_vectorised.append(z)\n # landmarks_vectorised.append(x)\n # landmarks_vectorised.append(y)\n meannp = np.asarray((ymean, xmean))\n coornp = np.asarray((z, w))\n dist = np.linalg.norm(coornp-meannp)# Distance euclidienne\n landmarks_vectorised.append(dist)\n landmarks_vectorised.append((math.atan2(y, x)*360)/(2*math.pi))# Calcule de l'ongle entre le moyenne et un point\n\n return landmarks_vectorised", "def project(self, win_width, win_height, fov, viewer_distance):\n factor = fov / (viewer_distance + self.z)\n x = self.x * factor + win_width / 2\n y = -self.y * factor + win_height / 2\n return Point3D(x, y, 1)", "def compute_perspective_transform(self, binary_image):\r\n transform_src = np.float32([[300, 309], [500, 315], [120, 381], [685, 392]])\r\n transform_dst = np.float32([ [0,0], [800, 0], [0,600], [800,600]])\r\n perspective_transform = cv2.getPerspectiveTransform(transform_src, transform_dst)\r\n inverse_perspective_transform = cv2.getPerspectiveTransform(transform_dst, transform_src)\r\n warped_image = cv2.warpPerspective(binary_image, perspective_transform, \r\n (binary_image.shape[1], binary_image.shape[0]), \r\n flags=cv2.INTER_NEAREST)\r\n\r\n return warped_image, inverse_perspective_transform", "def compute_right_camera_pose(left_camera_to_world, left_to_right):\n left_world_to_camera = np.linalg.inv(left_camera_to_world)\n right_world_to_camera = np.matmul(left_to_right, left_world_to_camera)\n right_camera_to_world = np.linalg.inv(right_world_to_camera)\n return right_camera_to_world", "def project_point(self, point: array_like) -> Point:\n # Vector from the point in space to the point on the plane.\n vector_to_plane = Vector.from_points(point, self.point)\n\n # Perpendicular vector from the point in space to the plane.\n vector_projected = self.normal.project_vector(vector_to_plane)\n\n return Point(point) + vector_projected" ]
[ "0.69458926", "0.6836335", "0.633762", "0.62048507", "0.6087586", "0.6003151", "0.5976462", "0.5972177", "0.5914467", "0.58434325", "0.58393484", "0.58110195", "0.57184476", "0.5707809", "0.570757", "0.5687363", "0.5659299", "0.56507516", "0.5646074", "0.5632133", "0.5627521", "0.55827916", "0.55540264", "0.55407435", "0.5528845", "0.5519932", "0.5513553", "0.55134356", "0.55119985", "0.5485858", "0.5475215", "0.5467099", "0.54670936", "0.5461926", "0.54593337", "0.5444985", "0.5433365", "0.5420512", "0.5416187", "0.53870386", "0.53789216", "0.5350785", "0.5348319", "0.5344144", "0.53421396", "0.5325166", "0.53172404", "0.5313135", "0.53057545", "0.528235", "0.5280021", "0.5274016", "0.5272212", "0.5260198", "0.52398884", "0.5237741", "0.52354866", "0.5230936", "0.5230899", "0.52304184", "0.5226605", "0.5222711", "0.52208555", "0.51970506", "0.51931155", "0.51879257", "0.51879257", "0.5184806", "0.5183722", "0.5180371", "0.51756227", "0.5162947", "0.5156416", "0.51522046", "0.5148344", "0.5147035", "0.5145813", "0.51457435", "0.51449305", "0.5122892", "0.5114144", "0.5105978", "0.51047045", "0.50949556", "0.5092202", "0.5090929", "0.5086944", "0.5081762", "0.5080508", "0.50801504", "0.5062368", "0.50616735", "0.5060612", "0.50604856", "0.5059149", "0.5055042", "0.5054711", "0.50542635", "0.5054164", "0.5052485" ]
0.7174655
0
Compute corresponding ground point given the height in world system
def ImageToGround_GivenZ(self, imagePoints, Z_values): cameraPoints = self.ImageToCamera(imagePoints) cameraPoints = cameraPoints.T pars = self.exteriorOrientationParameters X0 = pars[0] Y0 = pars[1] Z0 = pars[2] T = np.array([[X0], [Y0], [Z0]]) omega = pars[3] phi = pars[4] kappa = pars[5] R = Compute3DRotationMatrix(omega, phi, kappa) f = self.camera.focalLength # allocating memory for return array groundPoints = [] for i in range(len(cameraPoints[1])): camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f) lam = (Z_values - Z0) / (np.dot(R[2, :], camVec)) X = X0 + lam * np.dot(R[0, :], camVec) Y = Y0 + lam * np.dot(R[1, :], camVec) xy = [X, Y, Z_values] groundPoints.append(xy) groundPoints = np.array(groundPoints) return groundPoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def template_height(self, x, y):\n\n tx = float(x) / self.world_size\n tx = tx * self.global_template.size\n\n ty = float(y) / self.world_size\n ty = ty * self.global_template.size\n\n tx1 = int(tx)\n dx = tx - tx1\n tx2 = tx1 + 1\n\n ty1 = int(ty)\n dy = ty - ty1\n ty2 = ty1 + 1\n\n if tx2 > self.global_template.size-1:\n tx2 = tx1\n if ty2 > self.global_template.size-1:\n ty2 = ty1\n\n A = self.global_template[tx1, ty1]\n B = self.global_template[tx2, ty1]\n C = self.global_template[tx1, ty2]\n D = self.global_template[tx2, ty2]\n\n E = self.cosine_interpolate(A, B, dx)\n F = self.cosine_interpolate(C, D, dx)\n\n return self.cosine_interpolate(E, F, dy)\n #if G == 0:\n #return 0\n #if G > 0:\n #G = G ** (1/2.)\n #else:\n #G = -(abs(G) ** (1/2.))\n\n #G = int(round(G\n\n #return G", "def elevation(self):\n return self.altitude - self.heightAboveGround", "def llh(self):\n return Station._ellipsoid.geodetic(self.xyz())", "def ground_amp(self) -> torch.Tensor:\n return torch.abs(self.ground_vis)", "def get_ground_vector(self, label):\n raise NotImplementedError", "def ObserverGravity(latitude, height):\n s2 = math.sin(math.radians(latitude)) ** 2\n g0 = 9.7803253359 * (1.0 + 0.00193185265241*s2) / math.sqrt(1.0 - 0.00669437999013*s2)\n return g0 * (1.0 - (3.15704e-07 - 2.10269e-09*s2)*height + 7.37452e-14*height*height)", "def is_ground(f):\n return dmp_ground_p(f.rep, f.lev)", "def lower_bound(height):\n tan_108 = math.tan(math.radians(108))\n lower_boundary = 250 + height / tan_108\n return lower_boundary", "def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt", "def ground_phase(self) -> torch.Tensor:\n return torch.angle(self.ground_vis)", "def getPropOfGround(x):\n return K.sum(K.sum(x,axis = 1),axis = 1)/65536", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def getNearestGroundSurfaceZ(rootNode, height):\n \n if isinstance(rootNode, NodePath) and not rootNode.isEmpty():\n pFrom = Point3(rootNode.getPos(render))\n pDown = Point3(pFrom - Point3(0, 0, height))\n downTest = base.physicsWorld.rayTestClosest(pFrom, pDown, CIGlobals.FloorGroup | CIGlobals.StreetVisGroup)\n if downTest.hasHit():\n return downTest.getHitPos().z\n else:\n return -1\n else:\n raise Exception(\"#getNearestGroundSurfaceZ(): Requires a non-empty NodePath to ray test on!\")\n \n return -1", "def find_graveyard_inner_box():\n volumes = get_volume_list()\n graveyard = 0\n for v in volumes:\n if volume_is_graveyard( v ): \n graveyard = v\n break\n if graveyard == 0:\n raise DagmcError( 'Could not find a graveyard volume' )\n\n xyz_lo, xyz_hi = volume_boundary( graveyard )\n xyz_mid = numpy.array( [ (hi+lo)/2.0 for (hi,lo) in zip( xyz_hi, xyz_lo) ], dtype=numpy.float64 )\n\n result_lo = numpy.array( [0]*3, dtype=numpy.float64 )\n result_hi = numpy.array( [0]*3, dtype=numpy.float64 )\n\n for i in range(0,3):\n uvw = [0,0,0]\n uvw[i] = 1\n lo_mid = xyz_mid.copy()\n lo_mid[i] = xyz_lo[i]\n _, dist = fire_one_ray( graveyard, lo_mid, uvw )\n result_lo[i] = lo_mid[i] + dist\n uvw[i] = -1\n hi_mid = xyz_mid.copy()\n hi_mid[i] = xyz_hi[i]\n _, dist = fire_one_ray( graveyard, hi_mid, uvw )\n result_hi[i] = hi_mid[i] - dist\n \n return result_lo, result_hi", "def upper_bound(height):\n tan_72 = math.tan(math.radians(72))\n upper_boundary = 250 + height / tan_72\n return upper_boundary", "def get_h(self,cell,target = Cell(10,10,True)):\n \n return ( abs(cell.x-target.x) + abs(cell.y-target.y) )", "def ground_range(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return weapon.range\n return 0", "def area2(hedge, point) -> float:\r\n\r\n pa = hedge.twin.origin\r\n pb = hedge.origin\r\n pc = point\r\n return (pb.x - pa.x) * (pc[1] - pa.y) - (pc[0] - pa.x) * (pb.y - pa.y)", "def get_ly(self):\r\n return self.dy * self.ny - self.oy", "def get_haversine_displacement(x, y, home_center):\n sign_x = 1 if x > home_center.x else -1\n displacement_x = ti.geogr.point_distances.haversine_dist(\n x, home_center.y, home_center.x, home_center.y\n )[0]\n sign_y = 1 if y > home_center.y else -1\n displacement_y = ti.geogr.point_distances.haversine_dist(\n home_center.x, y, home_center.x, home_center.y\n )[0]\n return displacement_x * sign_x, displacement_y * sign_y", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in [TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value]), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def getBottom( self, X, Y, Z):\n xb,yb,zb = self.transform( X,Y,Z)\n \n gauss = beam( xb,yb,zb, self.w[0], self.w[1], self.l)\n intensity = (2/np.pi)* self.mW/1000. /self.w[0]/self.w[1] *gauss # W um^-2\n \n latticeBot = 4*np.sqrt(self.retro*self.alpha) \\\n + 1 + self.retro - 2*np.sqrt(self.retro*self.alpha)\n\n return uL(self.l)*intensity * latticeBot", "def distance_along_ground(wp1,wp2):\n\n #**************************************************************************\n # Convert UTM coordinates to global coordinates\n #**************************************************************************\n if isinstance(wp1, UTMWaypoint):\n wp1 = wp1.to_global_waypoint()\n\n if isinstance(wp2, UTMWaypoint):\n wp2 = wp2.to_global_waypoint()\n\n #**************************************************************************\n # Convert parameters if they are waypoint messages\n # Return None if they are not in GLOBAL frame\n #**************************************************************************\n if isinstance(wp1, msg.Waypoint):\n wp1 = GlobalWaypoint.from_waypoint_message(wp1)\n if wp1 is None:\n return None\n \n if isinstance(wp2, msg.Waypoint):\n wp2 = GlobalWaypoint.from_waypoint_message(wp2)\n if wp2 is None:\n return None\n\n #**************************************************************************\n # python math functions work in radians, so we need to convert\n # degrees to radians first\n #**************************************************************************\n lat1 = math.radians(wp1.latitude)\n lon1 = math.radians(wp1.longitude)\n lat2 = math.radians(wp2.latitude)\n lon2 = math.radians(wp2.longitude)\n\n #**************************************************************************\n # Estimate the distance along the ground using the Haversine formula\n #**************************************************************************\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n\n horz_dist = RADIUS_OF_EARTH_IN_METRES * c\n\n return horz_dist", "def ground_dps(self) -> Union[int, float]:\n if hasattr(self.type_data.proto, \"weapons\"):\n weapons = self.type_data.proto.weapons\n weapon = next(\n (weapon for weapon in weapons if weapon.type in {TARGET_TYPE.Ground.value, TARGET_TYPE.Any.value}), None\n )\n if weapon:\n return (weapon.damage * weapon.attacks) / weapon.speed\n return 0", "def get_footprint_area(self):\n x = self.dimension_along(0)\n y = self.dimension_along(1)\n return x * y", "def point2wgs84_9603(self, datum):\n \"\"\"\n h is the height above the ellipsoid. This is the height value that is \n delivered by GPS satellite observations but is not the gravity-related height \n value which is normally used for national mapping and levelling operations. The\n gravity-related height (H) is usually the height above mean sea level or an \n alternative level reference for the country. If one starts with a gravity-related \n height H, it will be necessary to convert it to an ellipsoid height (h) before \n using the above transformation formulas. See section 4.11.1. For the WGS 84 \n ellipsoid the difference between ellipsoid and mean sea level can vary between \n values of -100m in the Sri Lanka area to +80m in the North Atlantic.)\n \"\"\"\n h=0\n # a is the semi-major axis of the ellipsoid of the given datum.\n a = datum.axis\n\n # f is the flattening of the ellipsoid of the given datum \n # (get_flattening actually returns the inverse flattening).\n f = 1.0/datum.flattening\n \n # dx, dy, dz are the x, y, z offset parameters for the given datum transformation\n # to WGS84\n dx = datum.dx\n dy = datum.dy\n dz = datum.dz\n \n # latr, lngr are the latitude and longitude in radians\n latr = math.radians(self.lat)\n lngr = math.radians(self.lng)\n\n # e is the eccentricity of the ellipsoid\n e_squared = f*(2-f)\n\n # nu is the prime vertical radius of curvature at latr\n nu = a/math.pow((1-e_squared*sqr(math.sin(latr))),0.5)\n\n X = (nu+h)*math.cos(latr)*math.cos(vlambda)\n Y = (nu+h)*math.cos(latr)*math.sin(vlambda)\n Z = ((1 - math.pow(e,2))*nu + h)*math.sin(phi)\n\n Xwgs84 = X+dx\n Ywgs84 = Y+dy\n Zwgs84 = Z+dz\n\n epsilon = e_squared/(1-e_squared)\n b = a*(1-f)\n p = math.pow(sqr(Xwgs84)+sqr(Ywgs84),0.5)\n q = math.atan2((Zwgs84*a),(p*b))\n\n latrwgs84 = math.atan2( (Zwgs84 + epsilon*b*math.pow(math.sin(q)),3)), \\\n (p - e_squared*a*math.pow(math.cos(q),3) )\n lngrwgs84 = math.atan2(Ywgs84, Xwgs84)\n hwgs84 = (p/math.cos(latrwgs84))-nu\n newlng = lng180(math.degrees(lngrwgs84))\n newlat = math.degrees(latrwgs84)\n return Point(float(truncate(newlng,DEGREE_DIGITS)), float(truncate(newlat,DEGREE_DIGITS)))", "def estimate_ground_height(P):\n assert P.shape[1] == 4\n precision = 1024\n\n xrg = np.linspace(P[:, 0].min(), P[:, 0].max(), precision + 1)\n yrg = np.linspace(P[:, 1].min(), P[:, 1].max(), precision + 1)\n\n val_idx = P[:, 3] == 2\n\n npoints, _, _ = np.histogram2d(P[val_idx, 0], P[val_idx, 1], [xrg, yrg])\n total_height, _, _ = np.histogram2d(P[val_idx, 0], P[val_idx, 1],\n [xrg, yrg], weights=P[val_idx, 2])\n mean_height = total_height / npoints\n\n mean_height_large = infill_large_regions(mean_height)\n mean_height_fine = infill_small_regions(mean_height_large)\n results = {'original': mean_height,\n 'coarse': mean_height_large,\n 'fine': mean_height_fine,\n 'xgrid': .5 * (xrg[:-1] + xrg[1:]),\n 'ygrid': .5 * (yrg[:-1] + yrg[1:])}\n return results", "def getZCoord(self, x, y):\n n = self.normal()\n z = (-n.x * (x - self.p0.x) - n.y * (y - self.p0.y) + n.z * self.p0.z) / n.z\n return z", "def altitude(self):\r\n pressure = self.pressure # in Si units for hPascal\r\n return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903))", "def trap(height: List[int]) -> int:\n # No heights passed!\n if not height:\n return 0\n # Max from left\n max_L = 0\n L = len(height)\n left = [0] * L\n for i in range(L):\n if height[i] > max_L:\n max_L = height[i]\n left[i] = max_L\n # Max from right\n max_R = 0\n right = [0] * L\n for i in range(L-1, -1, -1):\n if height[i] > max_R:\n max_R = height[i]\n right[i] = max_R\n # Get water height / area at each point on map\n area = 0\n for i in range(1, L-1):\n area += max(0, min(left[i-1], right[i+1]) - height[i])\n return area", "def get_specific_gravity(self) -> float:\n try:\n datalist = self.get_data()\n data = datalist[3]\n if data.endswith('\\x00'):\n data = data.rstrip('\\x00')\n return float(data)\n else:\n return float(data)\n except Exception as err:\n print(f'get_specific_gravity error: {err}')\n return -1", "def exit_velocity(self):\n\n return self.exit_mach() * \\\n (self.specific_heat_ratio * self.gas_constant\n * self.exit_temperature()) ** (1 / 2)", "def _get_longitude(self, longitude, hemisphere):\n if not isinstance(longitude, float):\n longitude = float(longitude)\n if hemisphere.lower() == \"e\":\n return longitude\n if hemisphere.lower() == \"w\":\n return -1 * longitude", "def _world_point(self, point_3d):\n return self.obj.matrix_world @ point_3d", "def northing(self):\r\n x, y = self.lonlat2xy(self.longitude, self.latitude)\r\n return y", "def topography(x,y):\n \n z = -x/10\n \n N = len(x)\n for i in range(N):\n # Step\n if 10 < x[i] < 12:\n z[i] += 0.4 - 0.05*y[i]\n \n # Constriction\n if 27 < x[i] < 29 and y[i] > 3:\n z[i] += 2\n \n # Pole\n if (x[i] - 34)**2 + (y[i] - 2)**2 < 0.4**2:\n z[i] += 2\n \n return z", "def altitude(p):\r\n \r\n R = 290 #specific gas constant \r\n T = 93.65 #surface temperature K from A.Coustenis book\r\n g = 1.354 #surface gravity from A.Coustenis book\r\n p0 = 1467 #surface pressure in hPa 6.1 for mars\r\n \r\n z = np.empty_like(p)\r\n \r\n for i in range(p.shape[0]):\r\n z[i] = (-1)*(R*T/g)*np.log((p[i])/p0)/(10**3)\r\n \r\n # Make into an xarray DataArray\r\n z_xr = xr.DataArray(z, coords=[z], dims=['pfull'])\r\n z_xr.attrs['units'] = 'km'\r\n \r\n #below is the inverse of the calculation\r\n #p[i] = p0*np.exp((-1)*z[i]*(10**3)/((R*T/g)))\r\n \r\n return z_xr", "def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1", "def height(self, x):\n\t\treturn np.interp(x, self.x, self.z)", "def filter_ground(jparams):\n\n # load las file and relevant parameters\n point_cloud = File(jparams['input-las'], mode='r')\n scale = point_cloud.header.scale[0]\n print(point_cloud.header.min)\n print('- Flattening point cloud')\n gridded_pc = point_cloud_to_grid(point_cloud=point_cloud, tf=jparams['thinning-factor'],\n cell_size=int(jparams['gf-cellsize'] / scale))\n\n ground_points, unprocessed_points, ll_origin = gridded_pc[0], gridded_pc[1], gridded_pc[2]\n\n print('- Growing terrain')\n dt = startin.DT()\n dt.insert(list(ground_points))\n dt = grow_terrain(tin=dt, p=unprocessed_points, gp=ground_points,\n max_distance=int(jparams['gf-distance'] / scale),\n max_angle=jparams['gf-angle'])\n\n print('- Writing point cloud')\n with File(jparams['output-las'], mode='w', header=point_cloud.header) as out_file:\n gp = dt.all_vertices()[1:]\n out_file.X = [p[0] for p in gp]\n out_file.Y = [p[1] for p in gp]\n out_file.Z = [p[2] for p in gp]\n\n print('- Creating raster (TIN)\\n\\t- Interpolating (TIN)')\n dg = tin_interp(tin=dt, cell_size=int(jparams['grid-cellsize'] / scale))\n\n print('\\t- Writing Esri Ascii (TIN)')\n write_asc(grid=np.rot90(dg[0]) * scale + point_cloud.header.min[2],\n cell_size=jparams['grid-cellsize'],\n fn=jparams['output-grid-tin'],\n origin=(point_cloud.header.min[0]+dg[1][0]*scale, point_cloud.header.min[1] + dg[1][1]*scale),\n depth=2)\n\n print('- Creating raster (IDW)\\n\\t- Interpolating (IDW)')\n ig = idw_interp(tin=dt, cell_size=int(jparams['grid-cellsize'] / scale),\n radius=jparams['idw-radius'] / scale, \n power=jparams['idw-power'])\n\n print('\\t- Writing Esri Ascii (IDW)')\n write_asc(grid=np.rot90(ig[0]) * scale + point_cloud.header.min[2],\n cell_size=jparams['grid-cellsize'],\n fn=jparams['output-grid-idw'],\n origin=(point_cloud.header.min[0]+ig[1][0]*scale, point_cloud.header.min[1]+ig[1][1]*scale),\n depth=2)\n\n return", "def z(self):\n return self.coords[2]", "def value_at_location(self, xyz, interpolate=INTERP_LINEAR):\n x, y, z = xyz\n if self.is_depth:\n z = -z\n v = self.gxvoxe.value(x, y, z, interpolate)\n if v == gxapi.rDUMMY:\n return None\n return v", "def closest_cruising_altitude(altitude):\n return 1000 * ((altitude + 500) // 1000)", "def get_water_level(df):\n\n water,lat = [],[]\n #gets just ocean photons\n df = df.loc[df.Conf_ocean == 4]\n if len(df) == 0:\n return None\n #getting photons +- 2 of the median height of photons\n df = df.loc[(df.Height > df.Height.median() - 2) & (df.Height < df.Height.median() + 2)]\n\n #creating a df with just the latitude and height\n sea_level = pd.DataFrame([df.Height,df.Latitude]).T.dropna()\n sea_level.columns = ['water','latitude']\n\n #getting photons +- 1.25 of the median height of photons\n sea_level = sea_level.loc[(sea_level.water > sea_level.water.median() -1.25) & (sea_level.water < sea_level.water.median() +1.25)]\n\n #fitting linear line to remaining points\n z = np.polyfit(sea_level.latitude, sea_level.water,1)\n f = np.poly1d(z)\n\n #getting points with <2m abs error\n sea_level['abs_diff'] = np.abs(sea_level.water - f(sea_level.latitude))\n sea_level = sea_level.loc[sea_level.abs_diff < 2]\n #fitting a parabolic function to the remaining points\n z2 = np.polyfit(sea_level.latitude, sea_level.water,2)\n f2 = np.poly1d(z2)\n\n return f2", "def topographic_altitude(lat, lon):\n global __model\n type_output = type(lat)\n lat = prepare_input_array(lat)\n lon = prepare_input_array(lon)\n lon = np.mod(lon, 360)\n val = __model.topographic_altitude(lat, lon)\n val = np.maximum(val, 1e-7)\n return prepare_output_array(val, type_output) * u.km", "def find_hypocenter(self):\n ps_hypo = min(self.pointsources, key=lambda x: x.time_shift)\n self.hypocenter_longitude = ps_hypo.longitude\n self.hypocenter_latitude = ps_hypo.latitude\n self.hypocenter_depth_in_m = ps_hypo.depth_in_m", "def z(self):\r\n return self.position.z", "def project(self, (lng, lat)):\n x = lng * DEG_TO_RAD\n lat = max(min(MAX_LATITUDE, lat), -MAX_LATITUDE)\n y = lat * DEG_TO_RAD\n y = math.log(math.tan((math.pi / 4) + (y / 2)))\n return (x*EARTH_RADIUS, y*EARTH_RADIUS)", "def How_long_the_projectile_was_in_the_air(i_vy , g , i_h):\n return float((i_vy + math.sqrt((math.pow(-i_vy,2) -4*(.5*g)*(-i_h))))/g)", "def z_halo(self): \n return self.coords_halo[2]", "def _get_midpoint(self, p1: Point3D, p2: Point3D, displace: bool) -> Point3D:\n key = (p1, p2) if p1 < p2 else (p2, p1)\n if key not in self._height_cache:\n x1, y1, z1 = p1\n x2, y2, z2 = p2\n if displace:\n displacement = random.gauss(0, math.hypot(x1-x2, z1-z2) * self._scale)\n else:\n displacement = 0\n self._height_cache[key] = (x1+x2)/2, (y1+y2)/2 + displacement, (z1+z2)/2\n return self._height_cache[key]", "def get_dimensions(self):\n x = max(self.bodies, key=lambda p: p.position[0]).position[0]\n y = max(self.bodies, key=lambda p: p.position[1]).position[1]\n return max(x, y) * 1.2", "def get_pos(self, off_w=0, off_l=0, off_h=0):\n try:\n return self.world_grid[self.w + off_w][self.l + off_l][self.h + off_h]\n except IndexError:\n return blocks['wall']", "def __getxyB(x, y):\n\t\treturn x*3+y", "def get_height(self, p1: Point3D, p2: Point3D, p3: Point3D) -> float:\n return self._heightmap[self._get_heightmap_key(p1,p2,p3)]", "def pressure(altitude):\n t = temperature(altitude) # R\n if altitude <= 36152:\n p = 2116*(t/518.6)**5.256 # psf\n else:\n p = 473.1*exp(1.73-0.000048*altitude) # psf\n return p", "def sea_still_water_pressure(z, t1, rho=1.025, g=9.81):\r\n\r\n if z <= t1:\r\n return rho * g * (t1 - z)\r\n else:\r\n return 0", "def get_specific_heat() -> float:\n return 1006.0", "def altitude(self):\n if self.__altitude:\n return sum(self.__altitude) / len(self.__altitude)\n else:\n return -9999", "def getSlantRangeElevationHeight(self, groundRange, z):\r\n \r\n lat = self.ctrLat * pi / 180.0\r\n \r\n #figure out earth's radius at radar's lat ... non-spherical earth model\r\n e2 = self.eccen # First eccentricity squared - WGS-84 value = 0.00669437999013\r\n a = self.Requator # Equatorial radius - WGS-84 value = 6378137.0\r\n Rearth = a/sqrt(1-e2*(sin(lat))**2) # radius of curvature\r\n \r\n Rprime = self.effectiveRadiusMultiplier * self.Requator\r\n \r\n h = array(z - self.ctrAlt, dtype='float64')\r\n s = array(groundRange, dtype='float64')\r\n \r\n # Use law of cosines (Side-Angle-Side triangle theorem) with \r\n # R', R'+h as sides and s/R' as the angle to get slant range\r\n r = sqrt(Rprime**2.0 + (Rprime+h)**2.0 - 2*(Rprime+h)*Rprime*cos(s/Rprime))\r\n # Inverse of eq. 2.28c in Doviak and Zrnic 1993\r\n # Will return NaN for r=0\r\n el = arccos((Rprime+h) * sin(s/Rprime) / r) \r\n el *= 180.0 / pi\r\n \r\n return r, el", "def bottom(self) -> float:\n points = self.get_adjusted_points()\n y_points = [point[1] for point in points]\n return min(y_points)", "def to_ground(f):\n return dmp_to_ground(f.rep, f.lev, f.dom)", "def impact(self, ground):\n return self.position[1] > ground", "def ground(array: np.ndarray, value: float = 0) -> np.ndarray:\n return array - array.min() + value", "def get_height_of_surface_gate(data, setup={}):\n idx = get_index_of_surface_gate(data, setup)\n nt = range(len(idx))\n return data['alt'][nt, idx]", "def g(lat, z) :\n return (g0(lat) - (3.085462 * 1.e-4 + 2.27 * 1.e-7 * np.cos(2*lat*np.pi/180.)) * z\n + (7.254 * 1e-11 + 1e-13 * np.cos(2*lat*np.pi/180.)) * z**2\n - (1.517 * 1e-17 + 6 * 1e-20 * np.cos(2*lat*np.pi/180.)) * z**3)", "def get_cell(self, business):\n x = self.longitudes.searchsorted(business.longitude) - 1\n y = self.latitudes.searchsorted(business.latitude) - 1\n return x, y", "def __getitem__(self,point):\n point=point.normalize(self.size)\n return self.terrain[point.y][point.x]", "def spatial_var(map_):\n expx, expy = spatial_expval(map_)\n map_ = map_ / np.sum(map_)\n x, y = np.meshgrid(np.arange(map_.shape[1]), np.arange(map_.shape[0]))\n\n return np.sum(map_ * ((x - expx) ** 2 + (y - expy) ** 2))", "def _raw_phys_to_eng(self, physics_value):\n y = [val - physics_value for val in self.y]\n new_pp = PchipInterpolator(self.x, y)\n roots = new_pp.roots()\n if len(roots) == 1:\n x = roots[0]\n return x\n else:\n raise UniqueSolutionException(\"The function does not have any solution.\")", "def predict_qth(self):\n return (self.latitude, -self.longitude, self.altitude)", "def _get_grounding_from_name(self):\n grounding_name = remove_article(self.grounding)\n\n for area_name, area in self.map.areas.iteritems():\n if grounding_name == area_name:\n grounding = area\n\n for object_name, object_ in self.map.objects.iteritems():\n if grounding_name == object_name:\n grounding = object_\n\n for cop_name, cop in self.map.cops.iteritems():\n if grounding_name == cop_name:\n grounding = cop\n break\n else:\n if grounding_name == 'Deckard':\n logging.debug(\"No grounding available for Deckard yet.\")\n return None\n\n try:\n grounding\n except NameError:\n logging.error(\"No grounding available for {}\".format(grounding_name))\n return None\n\n return grounding", "def get_wind(self, time, alt, lat, lng, pressure_heights=None):\n t_val = time / 3.0\n t_idx = int(t_val)\n t_lerp = t_val - t_idx\n t_lerp_m = 1.0 - t_lerp\n \n lat_val = (lat + 90.0) * 2.0\n lat_idx = int(lat_val)\n lat_lerp = lat_val - lat_idx\n lat_lerp_m = 1.0 - lat_lerp\n\n lng_val = lng * 2.0\n lng_idx = int(lng_val)\n lng_lerp = lng_val - lng_idx\n lng_lerp_m = 1.0 - lng_lerp\n \n if pressure_heights is None:\n pressure_heights = self.get_pressure_heights(time, lat, lng)\n\n p_idx = bisect.bisect(pressure_heights, alt) - 1\n\n if p_idx < 0:\n p_idx = 0\n elif p_idx > self.shape[1] - 1:\n p_idx = self.shape[1] - 2\n\n a_llll = self._read_var(t_idx, p_idx, 0, lat_idx, lng_idx)\n a_lllr = self._read_var(t_idx, p_idx, 0, lat_idx, lng_idx + 1)\n a_llrl = self._read_var(t_idx, p_idx, 0, lat_idx + 1, lng_idx)\n a_llrr = self._read_var(t_idx, p_idx, 0, lat_idx + 1, lng_idx + 1)\n a_lrll = self._read_var(t_idx, p_idx + 1, 0, lat_idx, lng_idx)\n a_lrlr = self._read_var(t_idx, p_idx + 1, 0, lat_idx, lng_idx + 1)\n a_lrrl = self._read_var(t_idx, p_idx + 1, 0, lat_idx + 1, lng_idx)\n a_lrrr = self._read_var(t_idx, p_idx + 1, 0, lat_idx + 1, lng_idx + 1)\n a_rlll = self._read_var(t_idx + 1, p_idx, 0, lat_idx, lng_idx)\n a_rllr = self._read_var(t_idx + 1, p_idx, 0, lat_idx, lng_idx + 1)\n a_rlrl = self._read_var(t_idx + 1, p_idx, 0, lat_idx + 1, lng_idx)\n a_rlrr = self._read_var(t_idx + 1, p_idx, 0, lat_idx + 1, lng_idx + 1)\n a_rrll = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx, lng_idx)\n a_rrlr = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx, lng_idx + 1)\n a_rrrl = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx + 1, lng_idx)\n a_rrrr = self._read_var(t_idx + 1, p_idx + 1, 0, lat_idx + 1,\n lng_idx + 1)\n\n u_llll = self._read_var(t_idx, p_idx, 1, lat_idx, lng_idx)\n u_lllr = self._read_var(t_idx, p_idx, 1, lat_idx, lng_idx + 1)\n u_llrl = self._read_var(t_idx, p_idx, 1, lat_idx + 1, lng_idx)\n u_llrr = self._read_var(t_idx, p_idx, 1, lat_idx + 1, lng_idx + 1)\n u_lrll = self._read_var(t_idx, p_idx + 1, 1, lat_idx, lng_idx)\n u_lrlr = self._read_var(t_idx, p_idx + 1, 1, lat_idx, lng_idx + 1)\n u_lrrl = self._read_var(t_idx, p_idx + 1, 1, lat_idx + 1, lng_idx)\n u_lrrr = self._read_var(t_idx, p_idx + 1, 1, lat_idx + 1, lng_idx + 1)\n u_rlll = self._read_var(t_idx + 1, p_idx, 1, lat_idx, lng_idx)\n u_rllr = self._read_var(t_idx + 1, p_idx, 1, lat_idx, lng_idx + 1)\n u_rlrl = self._read_var(t_idx + 1, p_idx, 1, lat_idx + 1, lng_idx)\n u_rlrr = self._read_var(t_idx + 1, p_idx, 1, lat_idx + 1, lng_idx + 1)\n u_rrll = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx, lng_idx)\n u_rrlr = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx, lng_idx + 1)\n u_rrrl = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx + 1, lng_idx)\n u_rrrr = self._read_var(t_idx + 1, p_idx + 1, 1, lat_idx + 1,\n lng_idx + 1)\n\n v_llll = self._read_var(t_idx, p_idx, 2, lat_idx, lng_idx)\n v_lllr = self._read_var(t_idx, p_idx, 2, lat_idx, lng_idx + 1)\n v_llrl = self._read_var(t_idx, p_idx, 2, lat_idx + 1, lng_idx)\n v_llrr = self._read_var(t_idx, p_idx, 2, lat_idx + 1, lng_idx + 1)\n v_lrll = self._read_var(t_idx, p_idx + 1, 2, lat_idx, lng_idx)\n v_lrlr = self._read_var(t_idx, p_idx + 1, 2, lat_idx, lng_idx + 1)\n v_lrrl = self._read_var(t_idx, p_idx + 1, 2, lat_idx + 1, lng_idx)\n v_lrrr = self._read_var(t_idx, p_idx + 1, 2, lat_idx + 1, lng_idx + 1)\n v_rlll = self._read_var(t_idx + 1, p_idx, 2, lat_idx, lng_idx)\n v_rllr = self._read_var(t_idx + 1, p_idx, 2, lat_idx, lng_idx + 1)\n v_rlrl = self._read_var(t_idx + 1, p_idx, 2, lat_idx + 1, lng_idx)\n v_rlrr = self._read_var(t_idx + 1, p_idx, 2, lat_idx + 1, lng_idx + 1)\n v_rrll = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx, lng_idx)\n v_rrlr = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx, lng_idx + 1)\n v_rrrl = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx + 1, lng_idx)\n v_rrrr = self._read_var(t_idx + 1, p_idx + 1, 2, lat_idx + 1,\n lng_idx + 1)\n\n a_lll = a_llll * t_lerp_m + a_rlll * t_lerp\n a_llr = a_lllr * t_lerp_m + a_rllr * t_lerp\n a_lrl = a_llrl * t_lerp_m + a_rlrl * t_lerp\n a_lrr = a_llrr * t_lerp_m + a_rlrr * t_lerp\n a_rll = a_lrll * t_lerp_m + a_rrll * t_lerp\n a_rlr = a_lrlr * t_lerp_m + a_rrlr * t_lerp\n a_rrl = a_lrrl * t_lerp_m + a_rrrl * t_lerp\n a_rrr = a_lrrr * t_lerp_m + a_rrrr * t_lerp\n\n u_lll = u_llll * t_lerp_m + u_rlll * t_lerp\n u_llr = u_lllr * t_lerp_m + u_rllr * t_lerp\n u_lrl = u_llrl * t_lerp_m + u_rlrl * t_lerp\n u_lrr = u_llrr * t_lerp_m + u_rlrr * t_lerp\n u_rll = u_lrll * t_lerp_m + u_rrll * t_lerp\n u_rlr = u_lrlr * t_lerp_m + u_rrlr * t_lerp\n u_rrl = u_lrrl * t_lerp_m + u_rrrl * t_lerp\n u_rrr = u_lrrr * t_lerp_m + u_rrrr * t_lerp\n\n v_lll = v_llll * t_lerp_m + v_rlll * t_lerp\n v_llr = v_lllr * t_lerp_m + v_rllr * t_lerp\n v_lrl = v_llrl * t_lerp_m + v_rlrl * t_lerp\n v_lrr = v_llrr * t_lerp_m + v_rlrr * t_lerp\n v_rll = v_lrll * t_lerp_m + v_rrll * t_lerp\n v_rlr = v_lrlr * t_lerp_m + v_rrlr * t_lerp\n v_rrl = v_lrrl * t_lerp_m + v_rrrl * t_lerp\n v_rrr = v_lrrr * t_lerp_m + v_rrrr * t_lerp\n\n a_ll = a_lll * lat_lerp_m + a_lrl * lat_lerp\n a_lr = a_llr * lat_lerp_m + a_lrr * lat_lerp\n a_rl = a_rll * lat_lerp_m + a_rrl * lat_lerp\n a_rr = a_rlr * lat_lerp_m + a_rrr * lat_lerp\n\n u_ll = u_lll * lat_lerp_m + u_lrl * lat_lerp\n u_lr = u_llr * lat_lerp_m + u_lrr * lat_lerp\n u_rl = u_rll * lat_lerp_m + u_rrl * lat_lerp\n u_rr = u_rlr * lat_lerp_m + u_rrr * lat_lerp\n\n v_ll = v_lll * lat_lerp_m + v_lrl * lat_lerp\n v_lr = v_llr * lat_lerp_m + v_lrr * lat_lerp\n v_rl = v_rll * lat_lerp_m + v_rrl * lat_lerp\n v_rr = v_rlr * lat_lerp_m + v_rrr * lat_lerp\n\n a_l = a_ll * lng_lerp_m + a_lr * lng_lerp\n a_r = a_rl * lng_lerp_m + a_rr * lng_lerp\n\n u_l = u_ll * lng_lerp_m + u_lr * lng_lerp\n u_r = u_rl * lng_lerp_m + u_rr * lng_lerp\n\n v_l = v_ll * lng_lerp_m + v_lr * lng_lerp\n v_r = v_rl * lng_lerp_m + v_rr * lng_lerp\n\n p_lerp = ((alt - a_l) / (a_r - a_l))\n p_lerp_m = 1.0 - p_lerp\n\n u = u_l * p_lerp_m + u_r * p_lerp\n v = v_l * p_lerp_m + v_r * p_lerp\n\n return u, v", "def bottom(self) -> float:\n bottom = 0\n for part in self.line_parts:\n if part.state.rise < 0 and -part.state.rise > bottom:\n bottom = -part.state.rise\n return bottom", "def get_obj_z(self, goal):\n # half of midpoint\n if self.object_width < 0:\n rospy.loginfo('no image logged')\n return self.pwm_center\n\n # object y is up-down\n odiff = self.framecenter_y - self.object_y\n zout = odiff * self.obj_p\n # limit output if necassary\n if abs(zout) > self.obj_pmax:\n if zout < 0:\n zout = -self.obj_pmax\n else:\n zout = self.obj_pmax\n zout += self.pwm_center\n return zout", "def calculateWorldValues(self):\n # 等值面\n for x in range(self.worldSize):\n for y in range(self.worldSize):\n for z in range(self.worldSize):\n if self.t=='b':\n self.world[x][y][z] = math.cos(x) + math.cos(y) + math.cos(z)\n elif self.t=='n':\n self.world[x][y][z] = math.cos(x)*math.cos(y)*math.cos(z) - math.sin(x)*math.sin(y)*math.sin(z)\n elif self.t=='m':\n self.world[x][y][z] = math.sin(x)*math.cos(y) +math.sin(z)*math.cos(x)+math.sin(y)*math.cos(z)", "def __ComputeObservationVector(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def impact_prediction(\n radius: float,\n target_altitude: float,\n vertical_speed: float,\n horizontal_speed: float,\n surface_gravity: float,\n ut: float,\n):\n fall_speed = -vertical_speed\n downward_acceleration = (\n surface_gravity - horizontal_speed * horizontal_speed / radius\n )\n\n # do we already landed?\n if target_altitude < 0:\n return None, None\n\n # do we land?\n if downward_acceleration < 0:\n max_fall_distance = -(fall_speed * fall_speed) / (\n 2.0 * downward_acceleration\n )\n if max_fall_distance < (target_altitude + 1.0):\n return None, None\n\n sec_until_impact = (\n -fall_speed\n + math.sqrt(\n fall_speed * fall_speed\n + 2.0 * downward_acceleration * target_altitude\n )\n ) / downward_acceleration\n\n vertical_speed_at_impact = (\n fall_speed + sec_until_impact * downward_acceleration\n )\n impact_speed = math.sqrt(\n vertical_speed_at_impact * vertical_speed_at_impact\n + horizontal_speed * horizontal_speed\n )\n impact_ut = sec_until_impact + ut\n return impact_ut, impact_speed", "def area(self):\n\n return (self.x1 - self.x0) * (self.y1 - self.y0)", "def return_zeropoint():\n return 22.5", "def distanceFromGhost(self, state):\n pacPos = state.getPacmanPosition()\n ghoPos = state.getGhostPositions()\n d = abs(pacPos[1]-ghoPos[0][1]) + abs(pacPos[0]-ghoPos[0][0])\n return d", "def geten(self):\n lat = self.getlatlon()[0]\n return (0.5*self._sm*(self._vr**2 + self._vt**2 + self._vp**2) +\n forces.wgs84_pot(self._r, lat)*self._sm)\n # G*self._mm*self._sm/self._r)", "def ellipsoidHeight(X,Y,Z):\n a = 6378137.\n f = 1./298.2572236\n e2 = 2*f-f*f\n epsilon = e2/(1.-e2)\n b = a*(1.-f)\n p = sqrt(X*X+Y*Y)\n q = atan((Z*a)/(p*b))\n phi = atan( (Z+epsilon*b*sin(q)**3)/(p-e2*a*cos(q)**3) )\n nu = a / sqrt( 1-e2*sin(phi)**2)\n h = p/cos(phi) - nu\n return h", "def accelerationCalcY(x,y):\r\n global G #glabel variables are initialised\r\n global massEarth\r\n return (-G*massEarth*y)/((x**2+y**2)**(3/2))# here from Newton's law of gravitation we return the acceleration on a projectile in the y direction\r", "def _define_height_coord(height) -> AuxCoord:\n return AuxCoord(np.array(height, dtype=np.float32), \"height\", units=\"m\",)", "def __getitem__(self, item):\n # TODO: add cache to hard\n if item in self:\n return dict.__getitem__(self, item)\n else:\n # generate perlin height\n x, y = item\n if x < 0:\n x = x + self.world_size\n if y < 0:\n y = y + self.world_size\n\n if x >= self.world_size:\n x = x - self.world_size\n if y >= self.world_size:\n y = y - self.world_size\n\n height = self.template_height(x, y)\n p = self.perlin[0](x, y) ** 2\n height += height * p\n p = self.perlin[1](x, y)\n height += height * p * 0.1\n p = self.perlin[2](x, y)\n height += height * p * 0.05\n p = self.perlin[3](x, y)\n height += height * p * 0.02\n p = self.perlin[4](x, y)\n h = height + (10 * p)\n if height >= 1:\n if h < 1:\n h = 1\n elif height < 1:\n if h >= 1:\n h = 0\n height = h\n\n # rivers\n #if height > -4.:\n #r = self.river_perlin(x, y)\n #if r >= 0.1 and r <= 0.101:\n #height = -10. + (self.river_perlin_height(x, y) * 10.)\n\n self[item] = int(height)\n return int(height)", "def project_gravity_core(xyz):\n ver = []\n hor = []\n \n # mean for each axis\n G = [np.mean(xyz[:, 0]), np.mean(xyz[:, 1]), np.mean(xyz[:, 2])]\n G_norm = G/np.sqrt(sum(np.power(G, 2)) + 0.0000001)\n \n # The projection is here\n for i in range(len(xyz[:, 0])):\n ver.append(float(np.dot([xyz[i, :]], G)))\n hor.append(float(np.sqrt(np.dot(xyz[i, :]-ver[i]*G_norm, xyz[i, :]-ver[i]*G_norm))))\n \n ver = np.reshape(np.asarray(ver), len(ver))\n return Vectors.dense(ver), Vectors.dense(hor)", "def z(self):\n return self._coords[2]", "def get_points(self):\r\n return self.nx*self.ny*self.nz", "def ground_range(self) -> Union[int, float]:\n return self.ground_weapon and self.ground_weapon.range", "def get_latitude0(self):\n return self.B", "def dz(self):\n if self._uniform_cell_size[2] == gxapi.rDUMMY:\n return None\n return self._uniform_cell_size[2]", "def ground_vis(self) -> torch.Tensor:\n\n return utils.packed_cube_to_ground_cube(self.vis)", "def _calc_grav(self):\n if self.change_y == 0:\n self.change_y = 1\n else:\n self.change_y += 1.35\n\n # See if we are on the ground.\n # if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.change_y >= 0:\n # self.change_y = 0\n # self.rect.y = SCREEN_HEIGHT - self.rect.height", "def height_at(self, x, z):\n\n return self.heightmap[x * 16 + z]", "def easting(self):\r\n x, y = self.lonlat2xy(self.longitude, self.latitude)\r\n return x", "def calc_body_pos(component, time):\r\n # Convert to radians\r\n M = radians(eval(component.orbit['M'])(time))\r\n w = radians(eval(component.orbit['w'])(time))\r\n i = radians(eval(component.orbit['i'])(time))\r\n N = radians(eval(component.orbit['N'])(time))\r\n a = component.orbit['a']\r\n e = eval(component.orbit['e'])(time)\r\n # Compute eccentric anomaly\r\n E = M + e * sin(M) * (1.0 + e * cos(M))\r\n if degrees(E) > 0.05:\r\n E = compute_e(E, M, e)\r\n # http://stjarnhimlen.se/comp/tutorial.html\r\n # Compute distance and true anomaly\r\n xv = a * (cos(E) - e)\r\n yv = a * (sqrt(1.0 - e * e) * sin(E))\r\n v = atan2(yv, xv)\r\n r = sqrt(xv * xv + yv * yv)\r\n xh = r * (cos(N) * cos(v + w) - sin(N) * sin(v + w) * cos(i))\r\n yh = r * (sin(N) * cos(v + w) + cos(N) * sin(v + w) * cos(i))\r\n if is2d:\r\n zh = 0\r\n else:\r\n zh = r * (sin(v + w) * sin(i))\r\n '''The above assumes looking \"down\" on the solar system where x and y are\r\n the circle and z = 0 is on the heliocentric ecliptic. Panda3d uses the above.\r\n Opengl is y up and z depth so we switch the y and z coordinates below..'''\r\n #TODO: z+ is out of the screen so check if we need to flip the sign of zh\r\n '''xh = r * (cos(N) * cos(v + w) - sin(N) * sin(v + w) * cos(i))\r\n zh = r * (sin(N) * cos(v + w) + cos(N) * sin(v + w) * cos(i))\r\n if is2d:\r\n yh = 0\r\n else:\r\n yh = r * (sin(v + w) * sin(i))'''\r\n position = LPoint3d(xh, yh, zh)\r\n # If we are not a moon then our orbits are done in au.\r\n # Moons are done in km\r\n # Our units in panda are m, so we convert to m\r\n if component.kind != cel_comp.TYPES['moon']:\r\n position *= 149598000\r\n position *= 1000\r\n return position", "def topographic_altitude(self, lat_d, lon_d):\n return self.altitude(lat_d, lon_d)", "def topographic_altitude(self, lat_d, lon_d):\n return self.altitude(lat_d, lon_d)", "def return_map(self):\n idx1 = self.y[1]>np.pi\n idx2 = self.y[1]<np.pi\n \n if np.sum(idx1) > 0:\n self.y[1][idx1] = self.y[1][idx1]-np.floor((self.y[1][idx1]+np.pi)/(2*np.pi))*2*np.pi\n if np.sum(idx2) > 0:\n self.y[1][idx2] = self.y[1][idx2]-np.ceil((self.y[1][idx2]-np.pi)/(2*np.pi))*2*np.pi", "def gravity():\n\tg = _scene.gravity()\n\treturn Vector3(g[0], g[1], g[2])" ]
[ "0.6243619", "0.5950408", "0.5930064", "0.5907398", "0.5722841", "0.5635606", "0.56119394", "0.55470324", "0.5499355", "0.5497724", "0.5488828", "0.547526", "0.5454269", "0.5429254", "0.5421403", "0.5410944", "0.5403357", "0.53891635", "0.5388756", "0.5382602", "0.5380469", "0.53706384", "0.53680027", "0.5358023", "0.5345807", "0.5344522", "0.5317162", "0.5298363", "0.52778935", "0.5264358", "0.52567035", "0.5255637", "0.52545613", "0.5254192", "0.5253843", "0.52526796", "0.52442575", "0.5235478", "0.5230516", "0.52196383", "0.5213582", "0.52085656", "0.52067757", "0.5205278", "0.51966035", "0.5187817", "0.5160865", "0.5151987", "0.51519656", "0.51489866", "0.5145372", "0.5144773", "0.5137732", "0.5136552", "0.513318", "0.512887", "0.51250577", "0.51070845", "0.5100918", "0.50945324", "0.50941175", "0.50811255", "0.50779915", "0.5067713", "0.5058589", "0.5057174", "0.5054166", "0.50522614", "0.50480974", "0.50467694", "0.5044594", "0.50402766", "0.5028729", "0.5025721", "0.50236183", "0.50186515", "0.5015532", "0.5011256", "0.50092125", "0.5007716", "0.5005566", "0.50039357", "0.50000685", "0.49989408", "0.49941322", "0.49937642", "0.4991815", "0.49916986", "0.49907947", "0.49897262", "0.49855956", "0.4983557", "0.49812827", "0.49776962", "0.49729386", "0.4970364", "0.49699137", "0.496967", "0.496967", "0.49689248", "0.49611512" ]
0.0
-1
calculates area of the footprint on the ground focalLength and sensorsize in mm
def castSize(self, scale): return self.camera.sensorSize * scale
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def surfaceArea(self):\n surfaceArea = self.sideLength**2 * 6\n return surfaceArea", "def area(self):\n return self.length*self.length", "def calculateDetectorArea(self):\n area = 0.0\n r = self.geoParam['CylinderLightGuideRadius']\n while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):\n area -= math.pow(r,2)\n r += self.geoParam['DetectorThickness']\n area += math.pow(r,2)\n r += self.geoParam['DetectorSpacing']\n return math.pi*area", "def calculatearea(self):\r\n return self.width * self.height", "def surface_area(self) -> float:\n return 4 * np.pi * self.radius**2", "def area(self) -> float:\n raise NotImplementedError", "def _calc_area(LMTD, U, Q, ft) -> 'Area':\n return Q/(U*LMTD*ft)", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area", "def area(self):\n return self.__size ** 2", "def area(self):\n area = self.__size * self.__size\n return(area)", "def area(self):\n area = self.__size * self.__size\n return area", "def area(self):\n area = self._lengths[0] * self._lengths[1] * math.sin(math.radians(self._angles[0]))\n area += self._lengths[2] * self._lengths[3] * math.sin(math.radians(self._angles[0]))\n return float('{:.2f}'.format(area * 0.5))", "def area(self):\n geometry_properties = GProp_GProps()\n brepgprop_SurfaceProperties(self.topods_shape(), geometry_properties)\n return geometry_properties.Mass()", "def area(self):\n geometry_properties = GProp_GProps()\n brepgprop_SurfaceProperties(self.topods_shape(), geometry_properties)\n return geometry_properties.Mass()", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return (self.__size ** 2)", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size ** 2", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def area(self):\n return self.__size * self.__size", "def getSurfaceArea(self) -> float:\n return self.area()", "def area(self):\n\t\treturn self.height * self.height", "def area(self):\n\t\treturn self.width * self.height", "def area(self):\n return (self.__size * self.__size)", "def area(self):\n return (self.__size * self.__size)", "def area(self):\n return (self.__size * self.__size)", "def area(self):\n return int(self.__size) * int(self.__size)", "def area(self):\n area = self.__length * self.__width\n\n return area", "def area(self) -> npt.NDArray[np.float_]:\n return np.sum(self.faces.area)", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def area(self):\r\n return self.width * self.height", "def area(self):\n semi_perimeter = self.perimeter() / 2\n area = semi_perimeter\n for l in self._lengths:\n area *= (semi_perimeter - l)\n return float('{:.2f}'.format(area**0.5))", "def calculate_area(length: int, width: int) -> int:\n\n # process\n area = length * width\n\n # output\n return area", "def calculate_area(building, pixel_size=1):\n return len(building.points) * (pixel_size**2)", "def _calculate_area_overlap(self, wake_velocities, freestream_velocities, turbine):\n count = np.sum(freestream_velocities - wake_velocities <= 0.05)\n return (turbine.grid_point_count - count) / turbine.grid_point_count", "def area(self):\n\t\treturn self.width() * self.height()", "def area(self):\n area = 0\n last = self._coordinates[-1]\n for c in self._coordinates:\n area += (last[0] * c[1] - last[1] * c[0])\n last = c\n return float(\"{:.2f}\".format(abs(area) * 0.5))", "def area(self):\n area = 0.25*self._sides*self._length**2 / math.tan(math.radians(180/self._sides))\n return float('{:.2f}'.format(area))", "def area(self):\n return(self.__width * self.__height)", "def area(self):\n return self.width*self.height", "def area(self):\n return math.pi * self._r ** 2", "def get_footprint_area(self):\n x = self.dimension_along(0)\n y = self.dimension_along(1)\n return x * y", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return math.pi * self.radius ** 2", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def area(self):\n return self.__height * self.__width", "def area(self):\n return (self.width * self.height)", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def area(self):\n return self.width * self.height", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0", "def getArea(self):\n return math.pi * self.radius ** 2", "def area(self):\n return self.radius*self.radius*math.pi", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n return self.__width * self.__height", "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def area(self):\n return self._width * self._height", "def area(self):\n area = self.__width * self.__height\n return area", "def area(self):\n return (self.__radius ** 2 * math.pi)", "def area(self):\r\n return math.pi*(self.__radius**2)", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(self):\n return math.pi * math.pow(self.radius, 2)", "def area(self):\n return (self.__width * self.__height)", "def area(self):\n return (self.__width * self.__height)", "def _area(bounds):\n return (bounds[0, 1] - bounds[0, 0]) * (bounds[1, 1] - bounds[1, 0])", "def getArea(self):\n return math.pi * self.__radius * self.__radius", "def getArea(self):\n asum = 0.0\n for quad in self._quadrilaterals:\n w = get_quad_width(quad)\n l = get_quad_length(quad)\n asum = asum + w * l\n return asum", "def compute_surface_area(self):\n return np.sum(self._find_triangle_areas())", "def area(self):\n return self.width() * self.height()", "def area(self):\n return math.pi*self._radius*self._radius", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def get_binary_rf_area(self):\n\n if self.thr is None:\n raise LookupError('To th area, the receptive field should be thresholded!!')\n\n alt_step = abs(np.mean(np.diff(self.altPos).astype(np.float)))\n azi_step = abs(np.mean(np.diff(self.aziPos).astype(np.float)))\n\n return len(self.weights) * alt_step * azi_step", "def area(self):\n\n return self.__width * self.__height", "def area(self):\n\n return self.__width * self.__height", "def area(self):\n\n return self.__width * self.__height", "def calculate(self):\n\n return self._calculate_area(self.ground_truth, self.slice_number)", "def area(base, height):\n\n return base * height", "def area(width, height):\n return width * height", "def beamarea_pix(self):\n beamsigma1 = self.header['BMAJ'] / self.wcs.wcs.cdelt[0]\n beamsigma2 = self.header['BMIN'] / self.wcs.wcs.cdelt[0]\n return (np.pi * beamsigma1 * beamsigma2) / (4 * np.log(2))", "def areaTriangulo(base,altura):\n\treturn (base*altura)/2", "def get_area(self):\n ### Original\n from pyresample.spherical_geometry import get_polygon_area\n\n return get_polygon_area(self.corners)\n ### End Original\n #from .spherical import SphPolygon\n #shell()\n #log.info('RUNNING SPHERICAL in get_area')\n\n #return SphPolygon(self.corners).area", "def surface_area(self):\n return self._surface_area", "def calc_area_integral(data, area,mask,field,file_type):\n\t#if field!='calving':\n\t#\t#if field=='mass':\n\t#\t#\tTotal = np.sum(data *mask)\n\t#\t#else:\n\t#\t#\tTotal = np.sum(area * data *mask)\n\tTotal = np.sum(area * data *mask)\n\t\n\tif file_type=='ice':\n\t\t#Note that the cell area in the ice_month field is measured in units of sphere.\n\t\tArea_of_earth=510.1*(10**12) #in m^2\n\t\tTotal=Total*Area_of_earth\n\n\t#print data.shape, area.shape\n\tif field=='calving':\n\t\tTotal = np.sum( data *mask)\n\n\n return Total", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def get_coverage_area(self) -> float:\n return math.sqrt(self.norm_hull.volume)" ]
[ "0.68501455", "0.6728362", "0.67006356", "0.66854936", "0.6640243", "0.6634641", "0.66335106", "0.6622647", "0.6602097", "0.6584502", "0.65486264", "0.6531883", "0.65018195", "0.6479644", "0.6479644", "0.6473053", "0.6473053", "0.6473053", "0.646595", "0.646595", "0.646595", "0.646595", "0.64466494", "0.64466494", "0.64466494", "0.64466494", "0.64464056", "0.64427257", "0.6378526", "0.6369124", "0.6369124", "0.6369124", "0.6329408", "0.6327934", "0.62978685", "0.6280676", "0.6277822", "0.62747705", "0.62633944", "0.625372", "0.6245734", "0.62181187", "0.6207775", "0.6205875", "0.620389", "0.61994743", "0.6191048", "0.6174917", "0.61733073", "0.61733073", "0.61464596", "0.61464596", "0.61464596", "0.6144879", "0.6143646", "0.6143646", "0.6143646", "0.6143646", "0.6143646", "0.6143646", "0.6143646", "0.6143646", "0.61112773", "0.61064935", "0.61035824", "0.60972047", "0.608744", "0.608744", "0.608744", "0.608744", "0.608744", "0.608564", "0.606452", "0.6058845", "0.60526884", "0.60512996", "0.6046002", "0.6043144", "0.6033484", "0.6033484", "0.6024524", "0.60195386", "0.60104716", "0.60024923", "0.5992889", "0.59782577", "0.59611034", "0.59479225", "0.59448063", "0.59448063", "0.59448063", "0.5941858", "0.5936883", "0.59341043", "0.5912494", "0.5907119", "0.5896899", "0.5896356", "0.58940804", "0.586931", "0.58543086" ]
0.0
-1
Generating grid of points biased by ppa (principal point delta)
def GeneratePointsImg(self, n, ppa): x = np.linspace(0,self.camera.sensorSize,n)+ppa[0] y = np.linspace(0,self.camera.sensorSize,n)+ppa[1] return np.meshgrid(x, y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modify_pt(pt=None,):\n global dim\n mod_rand_pt = []\n\n for i_ in range(dim):\n for j_ in range(i_, dim):\n mod_rand_pt.append(pt[i_] * pt[j_])\n\n mod_rand_pt.append(1.)\n return mod_rand_pt", "def projection_P(P_prime):\n sorted_prime = -np.sort(-P_prime, axis=1) # Descending order sort\n cumsum_sorted = np.cumsum(sorted_prime, axis=1) # Compute cumulative sum of lines\n rho_availability = sorted_prime > (cumsum_sorted - 1) / np.arange(1, P_prime.shape[\n 1] + 1) # Compute non-zero rho candidates\n rho = np.count_nonzero(rho_availability, axis=1) # Compute number of non-zero values in final line (rho)\n theta = (cumsum_sorted[np.arange(len(rho)), rho - 1] - 1) / (rho) # Compute lagrange multiplier theta\n P = (P_prime.transpose() - theta).transpose().clip(min=0) # subtract multiplier, clip negatives\n\n return P", "def to_grid(point: np.array) -> np.array:\n return np.array((2.5, 2.5)) + point * 5", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]", "def to_points(self, divisions=100):", "def beta_gen_posmnt(p):\n return np.array([0.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def g(arr, n, points):\n P, a, b = arr # extract\n xCoord = P.x # extract x coord\n xCoord = bin(P.x) # get binary representation\n xCoord = \"0\" * 4 + xCoord[2:] # pad front with 0's\n ind = int(xCoord[-4:], 2) # get random point by \"hashing P\"\n Q = points[ind] # extract random point\n return P + Q[0], (a + Q[1]) % n, (b + Q[2]) % n # return the addition", "def gen_points(lo, hi, N):\n\treturn np.linspace(lo, hi, num=N)\n\t\n\t## a = np.array(range(0, N))\n\t## return lo + (a * (hi-lo)/float(N))", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def add_points(grid, num_points):\n \n for i in range(num_points):\n # Coord for crit point\n rand_x = random.randint(0, GRID_WIDTH - 1)\n rand_y = random.randint(0, GRID_HEIGHT - 1)\n \n # Set value of crit point\n elev = (MAX_HEIGHT - MIN_HEIGHT) * random.random() + MIN_HEIGHT\n grid[rand_x][rand_y] = elev * PEAK_HEIGHT\n \n return grid", "def _build_point_grid(n_per_side: int) -> np.ndarray:\n offset = 1 / (2 * n_per_side)\n points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n return points", "def test_PRP(initial):\n return plan_route((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])", "def random_projection_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points) and the offset from the origin\n hyperplane_offset = 0.0\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = data[left, d] - data[right, d]\n hyperplane_offset -= hyperplane_vector[d] * (\n data[left, d] + data[right, d]) / 2.0\n\n # For each point compute the margin (project into normal vector, add offset)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = hyperplane_offset\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right", "def project_perp(A):\n return np.eye(A.shape[1]) - project(A)", "def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point", "def test_random_create_P():\n\n max_step = 100\n n = 50\n low = 1\n tol = 1e-8\n\n P_ι = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_δ = np.random.dirichlet(np.random.randint(low, max_step, size=n))\n P_ζ = np.random.dirichlet(np.random.randint(low, high=max_step, size=50),\n size=2)\n\n P = create_P(P_δ, P_ζ, P_ι)\n\n assert abs(P[:, 0, :, :].sum() - 1.) < tol\n assert abs(P[:, 1, :, :].sum() - 1.) < tol", "def gausspp(npt):\n if npt <= 0:\n raise ValueError(\"Can't generate grid for <= 0 points\")\n return\n if npt == 1:\n xpt = np.array([0.0])\n wht = np.array([2.0])\n return xpt, wht\n\n # Each mesh is stored as a section of a big array.\n # These store its number and start index is here\n mesh_npts = [2,3,4,5,6,7,8,9,10,11,12,13,14,16,20,24,28,32,40,48,64,96]\n\n # First, look to see if the mesh is stored.\n # If not we take the largest number that is lower than that stored.\n for i in range(len(mesh_npts)):\n mesh_idx = i\n if mesh_npts[i] >= npt:\n break\n npt = mesh_npts[mesh_idx]\n n2 = int((npt+1)/2.0) # Care: Integer division!\n iof = npt\n\n # The stored grid parameters are accessed as a dict of arrays.\n x = {\n 2 : [0.577350269189626e0],\n 3 : [0.774596669241483e0, 0.0e0],\n 4 : [0.861136311594053e0, 0.339981043584856e0],\n 5 : [0.906179845938664e0, 0.538469310105683e0, 0.0e0],\n 6 : [0.932469514203152e0, 0.661209386466265e0, 0.238619186083197e0],\n 7 : [0.949107912342759e0, 0.741531185599394e0, 0.405845151377397e0, 0.0e0],\n 8 : [0.960289856497536e0, 0.796666477413627e0, 0.525532409916329e0, 0.183434642495650e0],\n 9 : [0.968160239507626e0, 0.836031107326636e0, 0.613371432700590e0, 0.324253423403809e0,\n 0.0e0],\n 10 : [0.973906528517172e0, 0.865063366688985e0, 0.679409568299024e0, 0.433395394129247e0,\n 0.148874338981631e0],\n 11 : [0.978228658146057e0, 0.887062599768095e0, 0.730152005574049e0, 0.519096129206812e0,\n 0.269543155952345e0, 0.0e0],\n 12 : [0.981560634246719e0, 0.904117256370475e0, 0.769902674194305e0, 0.587317954286617e0,\n 0.367831498998180e0, 0.125233408511469e0],\n 13 : [0.984183054718588e0, 0.917598399222978e0, 0.801578090733310e0, 0.642349339440340e0,\n 0.448492751036447e0, 0.230458315955135e0, 0.0e0],\n 14 : [0.986283808696812e0, 0.928434883663574e0, 0.827201315069765e0, 0.687292904811685e0,\n 0.515248636358154e0, 0.319112368927890e0, 0.108054948707344e0],\n 16 : [0.989400934991650e0, 0.944575023073232e0, 0.865631202387832e0, 0.755404408355003e0,\n 0.617876244402644e0, 0.458016777657227e0, 0.281603550779259e0, 0.950125098376369e-1],\n 20 : [0.993128599185095e0, 0.963971927277914e0, 0.912234428251326e0, 0.839116971822219e0,\n 0.746331906460151e0, 0.636053680726515e0, 0.510867001950827e0, 0.373706088715419e0,\n 0.227785851141645e0, 0.765265211334969e-1],\n 24 : [0.995187219997021e0, 0.974728555971309e0, 0.938274552002733e0, 0.886415527004401e0,\n 0.820001985973903e0, 0.740124191578554e0, 0.648093651936975e0, 0.545421471388839e0,\n 0.433793507626045e0, 0.315042679696163e0, 0.191118867473616e0, 0.640568928626059e-1],\n 28 : [0.996442497573954e0, 0.981303165370873e0, 0.954259280628938e0, 0.915633026392132e0,\n 0.865892522574395e0, 0.805641370917179e0, 0.735610878013632e0, 0.656651094038865e0,\n 0.569720471811402e0, 0.475874224955118e0, 0.376251516089079e0, 0.272061627635178e0,\n 0.164569282133381e0, 0.550792898840340e-1],\n 32 : [0.997263861849481e0, 0.985611511545268e0, 0.964762255587506e0, 0.934906075937740e0,\n 0.896321155766052e0, 0.849367613732570e0, 0.794483795967942e0, 0.732182118740290e0,\n 0.663044266930215e0, 0.587715757240762e0, 0.506899908932229e0, 0.421351276130635e0,\n 0.331868602282128e0, 0.239287362252137e0, 0.144471961582796e0, 0.483076656877380e-1],\n 40 : [0.998237709710559e0, 0.990726238699457e0, 0.977259949983774e0, 0.957916819213792e0,\n 0.932812808278676e0, 0.902098806968874e0, 0.865959503212259e0, 0.824612230833312e0,\n 0.778305651426519e0, 0.727318255189927e0, 0.671956684614179e0, 0.612553889667980e0,\n 0.549467125095128e0, 0.483075801686179e0, 0.413779204371605e0, 0.341994090825758e0,\n 0.268152185007254e0, 0.192697580701371e0, 0.116084070675255e0, 0.387724175060510e-1],\n 48 : [0.998771007252426e0, 0.993530172266351e0, 0.984124583722827e0, 0.970591592546247e0,\n 0.952987703160431e0, 0.931386690706554e0, 0.905879136715570e0, 0.876572020274248e0,\n 0.843588261624393e0, 0.807066204029443e0, 0.767159032515740e0, 0.724034130923815e0,\n 0.677872379632664e0, 0.628867396776514e0, 0.577224726083973e0, 0.523160974722233e0,\n 0.466902904750958e0, 0.408686481990717e0, 0.348755886292161e0, 0.287362487355455e0,\n 0.224763790394689e0, 0.161222356068892e0, 0.970046992094629e-1, 0.323801709628690e-1],\n 64 : [0.999305041735772e0, 0.996340116771955e0, 0.991013371476744e0, 0.983336253884626e0,\n 0.973326827789911e0, 0.961008799652054e0, 0.946411374858403e0, 0.929569172131939e0,\n 0.910522137078503e0, 0.889315445995114e0, 0.865999398154093e0, 0.840629296252580e0,\n 0.813265315122797e0, 0.783972358943341e0, 0.752819907260532e0, 0.719881850171611e0,\n 0.685236313054233e0, 0.648965471254657e0, 0.611155355172393e0, 0.571895646202634e0,\n 0.531279464019894e0, 0.489403145707053e0, 0.446366017253464e0, 0.402270157963992e0,\n 0.357220158337668e0, 0.311322871990211e0, 0.264687162208767e0, 0.217423643740007e0,\n 0.169644420423993e0, 0.121462819296120e0, 0.729931217877989e-1, 0.243502926634240e-1],\n 96 : [0.999689503883230e0, 0.998364375863181e0, 0.995981842987209e0, 0.992543900323762e0,\n 0.988054126329623e0, 0.982517263563014e0, 0.975939174585136e0, 0.968326828463264e0,\n 0.959688291448742e0, 0.950032717784437e0, 0.939370339752755e0, 0.927712456722308e0,\n 0.915071423120898e0, 0.901460635315852e0, 0.886894517402420e0, 0.871388505909296e0,\n 0.854959033434601e0, 0.837623511228187e0, 0.819400310737931e0, 0.800308744139140e0,\n 0.780369043867433e0, 0.759602341176647e0, 0.738030643744400e0, 0.715676812348967e0,\n 0.692564536642171e0, 0.668718310043916e0, 0.644163403784967e0, 0.618925840125468e0,\n 0.593032364777572e0, 0.566510418561397e0, 0.539388108324357e0, 0.511694177154667e0,\n 0.483457973920596e0, 0.454709422167743e0, 0.425478988407300e0, 0.395797649828908e0,\n 0.365696861472313e0, 0.335208522892625e0, 0.304364944354496e0, 0.273198812591049e0,\n 0.241743156163840e0, 0.210031310460567e0, 0.178096882367618e0, 0.145973714654896e0,\n 0.113695850110665e0, 0.812974954644249e-1, 0.488129851360490e-1, 0.162767448496020e-1]\n }\n wt = {\n 2 : [0.999999999999999e0],\n 3 : [0.555555555555556e0, 0.888888888888889e0],\n 4 : [0.347854845137454e0, 0.652145154862546e0],\n 5 : [0.236926885056189e0, 0.478628670499366e0, 0.568888888888889e0],\n 6 : [0.171324492379170e0, 0.360761573048139e0, 0.467913934572691e0],\n 7 : [0.129484966168870e0, 0.279705391489277e0, 0.381830050505119e0, 0.417959183673469e0],\n 8 : [0.101228536290376e0, 0.222381034453374e0, 0.313706645877887e0, 0.362683783378362e0],\n 9 : [0.812743883615739e-1, 0.180648160694857e0, 0.260610696402935e0, 0.312347077040003e0,\n 0.330239355001260e0],\n 10 : [0.666713443086879e-1, 0.149451349150581e0, 0.219086362515982e0, 0.269266719309996e0,\n 0.295524224714753e0],\n 11 : [0.556685671161740e-1, 0.125580369464905e0, 0.186290210927734e0, 0.233193764591990e0,\n 0.262804544510247e0, 0.272925086777901e0],\n 12 : [0.471753363865120e-1, 0.106939325995318e0, 0.160078328543346e0, 0.203167426723066e0,\n 0.233492536538355e0, 0.249147045813403e0],\n 13 : [0.404840047653160e-1, 0.921214998377279e-1, 0.138873510219787e0, 0.178145980761946e0,\n 0.207816047536889e0, 0.226283180262897e0, 0.232551553230874e0],\n 14 : [0.351194603317520e-1, 0.801580871597599e-1, 0.121518570687903e0, 0.157203167158194e0,\n 0.185538397477938e0, 0.205198463721296e0, 0.215263853463158e0],\n 16 : [0.271524594117540e-1, 0.622535239386480e-1, 0.951585116824929e-1, 0.124628971255534e0,\n 0.149595988816577e0, 0.169156519395002e0, 0.182603415044923e0, 0.189450610455068e0],\n 20 : [0.176140071391520e-1, 0.406014298003870e-1, 0.626720483341089e-1, 0.832767415767049e-1,\n 0.101930119817240e0, 0.118194531961518e0, 0.131688638449177e0, 0.142096109318382e0,\n 0.149172986472604e0, 0.152753387130726e0],\n 24 : [0.123412297999870e-1, 0.285313886289340e-1, 0.442774388174200e-1, 0.592985849154370e-1,\n 0.733464814110799e-1, 0.861901615319529e-1, 0.976186521041139e-1, 0.107444270115966e0,\n 0.115505668053726e0, 0.121670472927803e0, 0.125837456346828e0, 0.127938195346752e0],\n 28 : [0.912428259309400e-2, 0.211321125927710e-1, 0.329014277823040e-1, 0.442729347590040e-1,\n 0.551073456757170e-1, 0.652729239669989e-1, 0.746462142345689e-1, 0.831134172289009e-1,\n 0.905717443930329e-1, 0.969306579979299e-1, 0.102112967578061e0, 0.106055765922846e0,\n 0.108711192258294e0, 0.110047013016475e0],\n 32 : [0.701861000947000e-2, 0.162743947309060e-1, 0.253920653092620e-1, 0.342738629130210e-1,\n 0.428358980222270e-1, 0.509980592623760e-1, 0.586840934785350e-1, 0.658222227763619e-1,\n 0.723457941088479e-1, 0.781938957870699e-1, 0.833119242269469e-1, 0.876520930044039e-1,\n 0.911738786957639e-1, 0.938443990808039e-1, 0.956387200792749e-1, 0.965400885147279e-1],\n 40 : [0.452127709853300e-2, 0.104982845311530e-1, 0.164210583819080e-1, 0.222458491941670e-1,\n 0.279370069800230e-1, 0.334601952825480e-1, 0.387821679744720e-1, 0.438709081856730e-1,\n 0.486958076350720e-1, 0.532278469839370e-1, 0.574397690993910e-1, 0.613062424929290e-1,\n 0.648040134566009e-1, 0.679120458152339e-1, 0.706116473912869e-1, 0.728865823958039e-1,\n 0.747231690579679e-1, 0.761103619006259e-1, 0.770398181642479e-1, 0.775059479784249e-1],\n 48 : [0.315334605230600e-2, 0.732755390127600e-2, 0.114772345792340e-1, 0.155793157229440e-1,\n 0.196161604573550e-1, 0.235707608393240e-1, 0.274265097083570e-1, 0.311672278327980e-1,\n 0.347772225647700e-1, 0.382413510658310e-1, 0.415450829434650e-1, 0.446745608566940e-1,\n 0.476166584924900e-1, 0.503590355538540e-1, 0.528901894851940e-1, 0.551995036999840e-1,\n 0.572772921004030e-1, 0.591148396983960e-1, 0.607044391658940e-1, 0.620394231598930e-1,\n 0.631141922862539e-1, 0.639242385846479e-1, 0.644661644359499e-1, 0.647376968126839e-1],\n 64 : [0.178328072169600e-2, 0.414703326056200e-2, 0.650445796897800e-2, 0.884675982636400e-2,\n 0.111681394601310e-1, 0.134630478967190e-1, 0.157260304760250e-1, 0.179517157756970e-1,\n 0.201348231535300e-1, 0.222701738083830e-1, 0.243527025687110e-1, 0.263774697150550e-1,\n 0.283396726142590e-1, 0.302346570724020e-1, 0.320579283548510e-1, 0.338051618371420e-1,\n 0.354722132568820e-1, 0.370551285402400e-1, 0.385501531786160e-1, 0.399537411327200e-1,\n 0.412625632426230e-1, 0.424735151236530e-1, 0.435837245293230e-1, 0.445905581637560e-1,\n 0.454916279274180e-1, 0.462847965813140e-1, 0.469681828162100e-1, 0.475401657148300e-1,\n 0.479993885964580e-1, 0.483447622348030e-1, 0.485754674415030e-1, 0.486909570091400e-1],\n 96 : [0.796792065552010e-3, 0.185396078894692e-2, 0.291073181793495e-2, 0.396455433844469e-2,\n 0.501420274292752e-2, 0.605854550423596e-2, 0.709647079115386e-2, 0.812687692569876e-2,\n 0.914867123078339e-2, 0.101607705350080e-1, 0.111621020998380e-1, 0.121516046710880e-1,\n 0.131282295669610e-1, 0.140909417723140e-1, 0.150387210269940e-1, 0.159705629025620e-1,\n 0.168854798642450e-1, 0.177825023160450e-1, 0.186606796274110e-1, 0.195190811401450e-1,\n 0.203567971543330e-1, 0.211729398921910e-1, 0.219666444387440e-1, 0.227370696583290e-1,\n 0.234833990859260e-1, 0.242048417923640e-1, 0.249006332224830e-1, 0.255700360053490e-1,\n 0.262123407356720e-1, 0.268268667255910e-1, 0.274129627260290e-1, 0.279700076168480e-1,\n 0.284974110650850e-1, 0.289946141505550e-1, 0.294610899581670e-1, 0.298963441363280e-1,\n 0.302999154208270e-1, 0.306713761236690e-1, 0.310103325863130e-1, 0.313164255968610e-1,\n 0.315893307707270e-1, 0.318287588944110e-1, 0.320344562319920e-1, 0.322062047940300e-1,\n 0.323438225685750e-1, 0.324471637140640e-1, 0.325161187138680e-1, 0.325506144923630e-1]\n }\n\n # Now calculate the grid and weighting from these data chosen by npt\n\n mesh_r = x[npt]\n mesh_wt = wt[npt]\n\n r = np.zeros((2*n2))\n weight = np.zeros((2*n2))\n\n for i in range(n2):\n r[i] = -mesh_r[i]\n r[iof - (i + 1)] = mesh_r[i]\n weight[i] = mesh_wt[i]\n weight[iof - (i + 1)] = mesh_wt[i]\n\n return npt, r, weight", "def point_to_ppm(point, procs, proc2s):\n \n # It seems that F1 is related to the Y axis, while F2 is related to the X axis\n \n begin = (float(proc2s[\"OFFSET\"]), float(procs[\"OFFSET\"]))\n # End is begin-sw_p/sf, so step is (end-begin)/si, which simplifies to\n # (-sw_p/sf+1)/si\n step = [(-float(p[\"SW_p\"])/float(p[\"SF\"]))/float(p[\"SI\"]) \n for p in [proc2s, procs] ]\n \n return [begin[i]+step[i]*point[i] for i in (0,1)]", "def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points", "def PCA_gen(pos, k = 6, self_loop = False):\n\n # Use PCA to find principle component projection\n p_components = PCA(n_components = 1).fit_transform(pos)\n\n a_idxs = neighbors(p_components, self_loop, k)\n ones = np.ones(size = a_idxs.shape[0])\n\n a = csr_matrix(ones, (a_idxs[:,0], a_idxs[:, 1]))\n\n return a", "def projective_point(p):\n from sage.rings.integer import GCD_list, LCM_list\n try:\n p_gcd = GCD_list([x.numerator() for x in p])\n p_lcm = LCM_list([x.denominator() for x in p])\n except AttributeError:\n return p\n scale = p_lcm / p_gcd\n return [scale * x for x in p]", "def guassian_point_process(x0, y0, xSigma, ySigma, nPoints):\n x = np.random.normal(loc=x0, scale=xSigma, size=(nPoints,))\n y = np.random.normal(loc=y0, scale=ySigma, size=(nPoints,))\n return x, y", "def get_interpolation_points(n_interpolation_points, grid, seed):\n np.random.seed(seed)\n\n grid_min = np.array(object=[min(v) for _, v in grid.items()])\n grid_max = np.array(object=[max(v) for _, v in grid.items()])\n\n points = []\n\n for _ in range(n_interpolation_points):\n tmp = np.random.uniform(0.0, 1.0, len(grid_min))\n points.append(tmp)\n\n interpolation_points = np.array(\n object=(\n points * grid_min\n + (np.ones((n_interpolation_points, len(grid_min))) - points) * grid_max\n ),\n dtype=float,\n )\n\n return interpolation_points", "def generar_polinomio(self):\n\t\tself.poli = 0\n\t\tfor i in range(len(self.v)):\n\t\t\tpoli2 = n(self.diferencias_divididas(self.v[0:i+1]))\n\t\t\tfor j in range(i):\n\t\t\t\tpoli2 *= self.x-self.v[j][0]\n\t\t\tself.poli = self.poli + poli2", "def sample_all_planck_points(all_ids, adaptivep0 = True, planck_tqu_cursor = None, planck_cov_cursor = None, region = \"SC_241\", verbose = False, tol=1E-5, sampletype = \"mean_bayes\", testproj=False):\n if testproj:\n all_naive_p = np.zeros(len(all_ids))\n all_naive_psi = np.zeros(len(all_ids))\n else:\n all_pMB = np.zeros(len(all_ids))\n all_psiMB = np.zeros(len(all_ids))\n\n if planck_tqu_cursor is None:\n print(\"Loading default planck_tqu_cursor because it was not provided\")\n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n \n if planck_cov_cursor is None:\n print(\"Loading default planck_cov_cursor because it was not provided\")\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n\n # Get p0 and psi0 sampling grids\n p0_all = np.linspace(0, 1, 165)\n psi0_all = np.linspace(0, np.pi, 165, endpoint=False) # don't count both 0 and pi\n\n update_progress(0.0)\n for i, _id in enumerate(all_ids):\n #if _id[0] in [3400757, 793551, 2447655]:\n posterior_obj = PlanckPosterior(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n #print(\"for id {}, p0 grid is {}\".format(_id, posterior_obj.sample_p0))\n #print(\"for id {}, pmeas is {}, psimeas is {}, psi naive is {}\".format(_id, posterior_obj.pmeas, posterior_obj.psimeas, posterior_obj.naive_psi))\n #print(\"for id {}, likelihood[0, 1] = {}\".format(_id, posterior_obj.posterior[0, 1]))\n #print(p0_all[0], psi0_all[1]) \n #lnlikeout = lnlikelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all[0], psi0_all[1])\n #print(\"for id {}, lnlikelihood[0, 1] = {}\".format(_id, lnlikeout[0]))\n #print(np.exp(lnlikeout[0]))\n \n if testproj:\n all_naive_p[i] = posterior_obj.pmeas\n all_naive_psi[i] = posterior_obj.psimeas \n else:\n if sampletype is \"mean_bayes\":\n all_pMB[i], all_psiMB[i] = mean_bayesian_posterior(posterior_obj, center = \"naive\", verbose = verbose, tol=tol)\n elif sampletype is \"MAP\":\n all_pMB[i], all_psiMB[i] = maximum_a_posteriori(posterior_obj, verbose = verbose)\n if verbose is True:\n print(\"for id {}, num {}, I get pMB {} and psiMB {}\".format(_id, i, all_pMB[i], all_psiMB[i]))\n\n update_progress((i+1.0)/len(all_ids), message='Sampling: ', final_message='Finished Sampling: ')\n \n if testproj:\n return all_naive_p, all_naive_psi\n else:\n return all_pMB, all_psiMB", "def generate_regular_grid_point_coords(R, side_size, device):\n aff = torch.tensor([[[0.5, 0, 0.5], [0, 0.5, 0.5]]], device=device)\n r = F.affine_grid(aff, torch.Size((1, 1, side_size, side_size)), align_corners=False)\n return r.view(1, -1, 2).expand(R, -1, -1)", "def percolation_vs_p(w: int, h: int, nsim=40, n_p=50):\n p_values = np.linspace(0., 1., n_p) # n_p-value array between 0 and 1\n\n def plot_crossing_probability(ax, Percolation) -> np.ndarray:\n \"\"\"\n Plot crossing probabilities of a percolation of type Percolation\n \"\"\"\n\n print(f\"Computing crossing probabilities for {Percolation.grid_type} \"\n \"percolation\")\n cross_proba = np.zeros_like(p_values)\n for i in progressbar.progressbar(range(nsim)):\n perco = Percolation(w, h)\n p_cross = perco.find_p_cross()\n cross_proba += np.where(p_values < p_cross, 0, 1)\n\n cross_proba /= nsim\n ax.plot(p_values, cross_proba, '-',\n label=f'{Percolation.grid_type} percolation')\n\n fig, ax = plt.subplots()\n fig.suptitle('Probability of crossing as a function of $p$')\n ax.set_xlabel('$p$')\n ax.set_ylabel('probability')\n ax.grid()\n plot_crossing_probability(ax, PercolationRect)\n plot_crossing_probability(ax, PercolationHex)\n ax.legend()\n ax.set_title(f\"{nsim} simulations on a {w} x {h} grid\", fontsize=10)", "def generate_points(octrees, pyramids, exsum):\n return _C.ops.spc.GeneratePoints(octrees.contiguous(),\n pyramids.contiguous(),\n exsum.contiguous())", "def pareto_distribution(v, p=0.8):\n thr = np.sum(v)*p\n cumsum = 0\n for i, _v in enumerate(v, 1):\n cumsum += _v\n if cumsum >= thr:\n return i * 1.0 / len(v)", "def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P", "def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6", "def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)", "def _generate_random_points_in_plane(nvect, dparam, npts, eps=0.0):\n np.random.seed(12345)\n a, b, c = nvect / np.linalg.norm(nvect)\n x, y = np.random.rand(npts), np.random.rand(npts)\n z = (dparam - a * x - b * y) / c\n if eps > 0:\n z += np.random.normal(loc=0., scale=eps, size=npts)\n return np.column_stack((x, y, z))", "def test_PSP(initial = (0,0,3)):\n return plan_shot((initial[0],initial[1]), initial[2],\n # Goals:\n [(2,3),(3,2)],\n # Allowed locations:\n [(0,0),(0,1),(0,2),(0,3),\n (1,0),(1,1),(1,2),(1,3),\n (2,0), (2,3),\n (3,0),(3,1),(3,2),(3,3)])", "def generate_phi(self):\n self.phi = np.empty((100, self.K))\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n self.phi[i][j] = np.exp(-self.gamma * distance.euclidean(point, center) ** 2)\n self.phi = np.concatenate((self.phi, np.ones((100, 1))), axis=1)", "def catmullrom(P0, P1, P2, P3, a, nPoints=100):\n # Convert the points to numpy so that we can do array multiplication\n P0, P1, P2, P3 = map(np.array, [P0, P1, P2, P3])\n\n # Calculate t0 to t4\n alpha = a\n\n def tj(ti, Pi, Pj):\n xi, yi, zi = Pi\n xj, yj, zj = Pj\n\n # ( ( (xj-xi)**2 + (yj-yi)**2 )**0.5 )**alpha + ti\n a = (xj - xi) ** 2 + (yj - yi) ** 2 + (zj - zi) ** 2\n b = a ** 0.5\n c = b ** alpha\n return c + ti\n\n t0 = 0\n t1 = tj(t0, P0, P1)\n t2 = tj(t1, P1, P2)\n t3 = tj(t2, P2, P3)\n\n # Only calculate points between P1 and P2\n t = np.linspace(t1, t2, nPoints)\n\n # Reshape so that we can multiply by the points P0 to P3\n # and get a point for each value of t.\n t = t.reshape(len(t), 1)\n\n A1 = (t1 - t) / (t1 - t0) * P0 + (t - t0) / (t1 - t0) * P1\n A2 = (t2 - t) / (t2 - t1) * P1 + (t - t1) / (t2 - t1) * P2\n A3 = (t3 - t) / (t3 - t2) * P2 + (t - t2) / (t3 - t2) * P3\n\n B1 = (t2 - t) / (t2 - t0) * A1 + (t - t0) / (t2 - t0) * A2\n B2 = (t3 - t) / (t3 - t1) * A2 + (t - t1) / (t3 - t1) * A3\n\n C = (t2 - t) / (t2 - t1) * B1 + (t - t1) / (t2 - t1) * B2\n return C", "def beta_gen_mnt(p):\n return np.array([-1.0]*int(0.7*p) + [1.0]*(p-int(0.7*p)))", "def pareto(x, alpha, x_m=1):\n return 1.0 * alpha * math.pow(x_m, alpha) / np.array(map(lambda y: math.pow(y, alpha + 1), x))", "def crp_gen(N, alpha):\n assert N > 0\n assert alpha > 0.0\n alpha = float(alpha)\n\n partition = np.zeros(N, dtype=int)\n Nk = [1]\n for i in range(1, N):\n K = len(Nk)\n ps = np.zeros(K+1)\n for k in range(K):\n # get the number of people sitting at table k\n ps[k] = float(Nk[k])\n\n ps[K] = alpha\n\n ps /= (float(i)-1+alpha)\n\n assignment = pflip(ps)\n\n if assignment == K:\n Nk.append(1)\n elif assignment < K:\n Nk[assignment] += 1\n else:\n raise ValueError(\"invalid assignment: %i, max=%i\" %\n (assignment, K))\n\n partition[i] = assignment\n\n assert max(partition)+1 == len(Nk)\n assert len(partition) == N\n assert sum(Nk) == N\n\n K = len(Nk)\n\n if K > 1:\n shuffle(partition)\n\n return np.array(partition), Nk, K", "def adj_ptycho_prb_batch(self, data, psi, scan):\n prb = np.zeros([self.ntheta, self.nmodes, self.nprb, self.nprb],\n dtype='complex64')\n for ids in chunk(range(self.ntheta), self.ptheta):\n psi_gpu = cp.array(psi[ids])\n scan_gpu = cp.array(scan[:, ids])\n for m in range(self.nmodes):\n data_gpu = cp.array(data[ids, m])\n prb_gpu = self.adj_ptycho_prb(\n data_gpu, psi_gpu, scan_gpu, 0)\n prb[ids, m] = prb_gpu.get()\n # prb *= (self.ndet*self.ndet) # FFT compensation\n return prb", "def to_pppack_style(tau):\n # saving the list in the given order\n\n Ni = [len(t) for t in tau]\n aux = []\n # generating grid points with the required order for pppack\n for x, i in zip(itertools.product(*tau), itertools.product(*[range(nx) for nx in Ni])):\n aux.append([x, getidx(i, Ni)])\n grid = []\n for point in sorted([v for v in aux], key=lambda tup: (tup[1])):\n grid.append(point[0])\n return grid", "def projectPoint(self,p):\n a,b,c = self.a, self.b, self.c\n x,y = p\n return numpy.array( [ b*(x*b-y*a) - c*a, a*(y*a-x*b) - c*b ] )", "def cp2p(np.ndarray[DTYPE_t, ndim=1] cp, DTYPE_t alpha, DTYPE_t rh, DTYPE_t rw):\n#def cp2p( cp, alpha, rh, rw):\n\tdirH=np.array([np.cos(alpha),np.sin(alpha)])\n\tdirW=np.array([np.sin(alpha),-np.cos(alpha)])\n\treturn np.array( [ _r+cp for _r in [rh*dirH+rw*dirW, -rh*dirH+rw*dirW, -rh*dirH-rw*dirW, rh*dirH-rw*dirW] ]).transpose()", "def generate_aima_grid():\n\n # https://stats.stackexchange.com/questions/339592/how-to-get-p-and-r-values-for-a-markov-decision-process-grid-world-problem\n\n actions_map = {0: '^', 1: 'V', 2: '<', 3: '>'}\n\n transitions = np.zeros((4, 12, 12)) # (A, S, S)\n transitions[0] = [[0.9, 0.1, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0.8, 0.1, 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.1, 0.8, 0.1, 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.8, 0., 0., 0., 0.2, 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.8, 0., 0., 0.1, 0., 0.1, 0., 0., 0., 0., 0.],\n [0., 0., 0.8, 0., 0., 0., 0.1, 0.1, 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0.1, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.8, 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0., 0.1],\n [0., 0., 0., 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0.1]]\n\n transitions[1] = [[0.1, 0.1, 0., 0., 0.8, 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0.8, 0.1, 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.1, 0., 0.1, 0., 0., 0.8, 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.2, 0., 0., 0., 0.8, 0., 0., 0.],\n [0., 0., 0., 0., 0.1, 0., 0.1, 0., 0., 0.8, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.1, 0.1, 0., 0., 0.8, 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.9, 0.1, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.8, 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.8, 0.1],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.1, 0.9]]\n\n transitions[2] = [[0.9, 0., 0., 0., 0.1, 0., 0., 0., 0., 0., 0., 0.],\n [0.8, 0.2, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.8, 0.1, 0., 0., 0., 0.1, 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0., 0., 0.],\n [0., 0.1, 0., 0., 0.8, 0., 0., 0., 0., 0.1, 0., 0.],\n [0., 0., 0.1, 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.1, 0., 0., 0., 0.9, 0., 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.8, 0.2, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0.8, 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0.8, 0.1]]\n\n transitions[3] = [[0.1, 0.8, 0., 0., 0.1, 0., 0., 0., 0., 0., 0., 0.],\n [0., 0.2, 0.8, 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0.1, 0.8, 0., 0., 0.1, 0., 0., 0., 0., 0.],\n [0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0.1, 0., 0., 0., 0.8, 0., 0., 0., 0.1, 0., 0., 0.],\n [0., 0.1, 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0., 0.],\n [0., 0., 0.1, 0., 0., 0., 0., 0.8, 0., 0., 0.1, 0.],\n [0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.1, 0., 0., 0., 0.1, 0.8, 0., 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.2, 0.8, 0.],\n [0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0., 0.1, 0.8],\n [0., 0., 0., 0., 0., 0., 0., 0.1, 0., 0., 0., 0.9]]\n\n rewards = np.asarray((-0.02, -0.02, -0.02, 1, -0.02, -0.02, -0.02, -1, -0.02, -0.02, -0.02, -0.02))\n\n print('\\n***** aima grid world *****\\n')\n print('Transition matrix:', transitions.shape)\n print('Reward matrix:', rewards.shape)\n\n return transitions, rewards", "def _paa(self):\n self.paa = np.array([self.series[i * self.points_per_symbol : (i + 1) * self.points_per_symbol].mean() for i in range(len(self.series) / self.points_per_symbol)])", "def crossover_arith(pa, ma):\n alpha = nprand.uniform(0, 2.)\n newdata = np.empty(pa.data.shape)\n if alpha < 1.0:\n newdata[:, R:] = pa.data[:, R:].copy()\n else:\n newdata[:, R:] = ma.data[:, R:].copy()\n newdata[:, :R] = alpha * pa.data[:, :R]+ (1 - alpha) * ma.data[:, :R]\n newdata.clip(0., 1., out = newdata)\n newind = Individual(newdata)\n return newind", "def _precompute_xl(self, p: int) -> List[int]:\n res = [1]\n val = 1\n for _ in range(len(self._s)):\n val = (val * self.X) % p\n res.append(val)\n return res", "def project(self):\n def _project(point):\n return (\n point[0]/(point[2]/Window.COP_DISTANCE+1),\n point[1]/(point[2]/Window.COP_DISTANCE+1))\n\n self._points = [list(map(_project, face)) for face in self._points]", "def algorithm_4_9(p, tau, t):\n m = len(t) - (p + 1)\n n = len(tau) - (p + 1)\n\n a = np.zeros(shape=(m, n))\n t = np.array(t, dtype=np.float64)\n tau = np.array(tau, dtype=np.float64)\n\n for i in range(m):\n mu = index(t[i], tau)\n b = 1\n for k in range(1, p + 1):\n tau1 = tau[mu - k + 1:mu + 1]\n tau2 = tau[mu + 1:mu + k + 1]\n omega = (t[i + k] - tau1) / (tau2 - tau1)\n b = np.append((1 - omega) * b, 0) + np.insert((omega * b), 0, 0)\n\n a[i, mu - p:mu + 1] = b\n return a", "def generate(self, objective, nb_anchor_points=10, nb_samples=1000):\n # No checks are made for duplicate points here. We could try to include something to ensure that the points\n # are somehow separated from each other.\n points = np.array([self.manifold.rand() for i in range(nb_samples)])\n\n if self.matrix_to_vector_transform is not None:\n # Transform the sampled matrix points in vectors\n points = np.array([self.matrix_to_vector_transform(points[i]) for i in range(nb_samples)])\n\n scores = objective(points)[0][:, 0]\n\n anchor_points = points[np.argsort(scores)[:min(len(scores), nb_anchor_points)], :]\n\n return anchor_points", "def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )", "def prob4():\n\n\n N = 500000\n random_draws = np.random.multivariate_normal(mean = [-1,1], cov =[[1,0],[0,1]], size = N)\n\n h = lambda x: x[0] < -1 and x[1] > 1\n f = lambda x: stats.multivariate_normal(mean = [ 0, 0]).pdf(x)\n g = lambda x: stats.multivariate_normal(mean = [-1, 1]).pdf(x)\n\n probability = [h(random_draws[i]) * f(random_draws[i]) / g(random_draws[i]) for i in range(N)]\n\n return 1./N * np.sum(probability)", "def compute_parallel_points(self, sorted_pts, np, display_opt):\n # find lowest point (get point closest to epi_apex_node)\n numPoints = sorted_pts.shape[0]\n ds = np.zeros((numPoints,), dtype=float)\n\n epi_apex_node = np.asarray(self.epi_apex_node)\n\n for i in range(numPoints):\n ds[i] = np.linalg.norm(epi_apex_node - sorted_pts[i])\n\n lowest_point_id = np.argmin(ds)\n\n # create linspace using lowest point id\n num_pts_left = lowest_point_id\n num_pts_right = numPoints - lowest_point_id\n print('num pts left = ', num_pts_left)\n print('num points right = ', num_pts_right)\n\n scale = 0.3 # we only want 30% of points on one side\n max_ls_idx = int(scale * num_pts_left)\n max_rs_idx = int(scale * num_pts_right)\n idxs = np.arange(lowest_point_id-max_ls_idx, lowest_point_id+max_rs_idx, np, dtype=int)\n\n # get points\n parapts = np.zeros((np, 3), dtype=float)\n for i, idx in enumerate(idxs):\n parapts[i] = sorted_pts[idx]\n\n if display_opt:\n parapts_act = include_points(parapts, parapts.shape[0], 7, (0,1,0))\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0,1.0,1.0)\n ren.AddActor(self.endoActor)\n ren.AddActor(parapts_act)\n vtk_show(ren)\n\n return parapts", "def dist_albedo(self, p):\r\n\r\n return self.uniform(p, self.prange)", "def calculate_rbpe(p, limits=(np.inf, np.inf)):\n rbpes = []\n\n def evaluate_pdf_at_z(z, dist):\n return dist.pdf(z)[0][0]\n\n for n in range(0, p.npdf):\n\n if p[n].npdf != 1:\n raise ValueError('quick_rbpe only handles Ensembles with a single PDF '\n 'for ensembles with more than one PDF, use the qp.metrics.risk_based_point_estimate function.')\n\n this_dist_pdf_at_z = partial(evaluate_pdf_at_z, dist=p[n])\n integration_bounds = (p[n].ppf(0.01)[0][0], p[n].ppf(0.99)[0][0])\n\n rbpes.append(array_metrics.quick_rbpe(this_dist_pdf_at_z, integration_bounds, limits))\n\n return np.array(rbpes)", "def p_adjust(data, pv_index=0, method='bonf', alpha=0.05):\n\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n ## error for non-numeric data frame column\n if not (np.issubdtype(data['p_value'].dtypes, np.number)):\n raise TypeError(\"Please ensure you have specified the column index of numeric p-values.\")\n else:\n data = pd.DataFrame({\"p_value\": data})\n # set the size of the data\n \n ##added an exception \n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\")\n\n m = data.shape[0]\n\n # sort p-values\n df = data.sort_values(by=['p_value'])\n df[\"rank\"] = round(df.rank(axis=0, method='min')[\"p_value\"])\n df[\"bh_value\"] = alpha * df[\"rank\"] / m\n\n ### generate final data frame\n df[\"bonf_pvalue\"] = np.where(df['p_value'] * m < 1, df['p_value'] * m, 1)\n df[\"bh_pvalue\"] = df['p_value'] / df[\"rank\"] * m\n\n\n if method == 'bh' or method == 'fdr':\n df[\"adjusted\"] = df['p_value'] / df[\"rank\"] * m\n return (df[['p_value', 'adjusted']])\n if method == 'bonf' or method == 'bonferroni':\n df[\"adjusted\"] = df['p_value'] * m\n return (df[['p_value', 'adjusted']])\n else:\n raise ValueError(\"Method should be set as 'bonf' or 'bh' corrections\")", "def random_projection_cosine_split(data, indices, rng_state):\n dim = data.shape[1]\n\n # Select two random points, set the hyperplane between them\n left_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index = tau_rand_int(rng_state) % indices.shape[0]\n right_index += left_index == right_index\n right_index = right_index % indices.shape[0]\n left = indices[left_index]\n right = indices[right_index]\n\n left_norm = norm(data[left])\n right_norm = norm(data[right])\n \n if left_norm == 0.0:\n left_norm = 1.0\n \n if right_norm == 0.0:\n right_norm = 1.0\n\n # Compute the normal vector to the hyperplane (the vector between\n # the two points)\n hyperplane_vector = np.empty(dim, dtype=np.float32)\n\n for d in range(dim):\n hyperplane_vector[d] = ((data[left, d] / left_norm) -\n (data[right, d] / right_norm))\n\n hyperplane_norm = norm(hyperplane_vector)\n if hyperplane_norm == 0.0:\n hyperplane_norm = 1.0\n \n for d in range(dim):\n hyperplane_vector[d] = hyperplane_vector[d] / hyperplane_norm\n\n # For each point compute the margin (project into normal vector)\n # If we are on lower side of the hyperplane put in one pile, otherwise\n # put it in the other pile (if we hit hyperplane on the nose, flip a coin)\n n_left = 0\n n_right = 0\n side = np.empty(indices.shape[0], np.int8)\n for i in range(indices.shape[0]):\n margin = 0.0\n for d in range(dim):\n margin += hyperplane_vector[d] * data[indices[i], d]\n\n if margin == 0:\n side[i] = tau_rand_int(rng_state) % 2\n if side[i] == 0:\n n_left += 1\n else:\n n_right += 1\n elif margin > 0:\n side[i] = 0\n n_left += 1\n else:\n side[i] = 1\n n_right += 1\n\n # Now that we have the counts allocate arrays\n indices_left = np.empty(n_left, dtype=np.int64)\n indices_right = np.empty(n_right, dtype=np.int64)\n\n # Populate the arrays with indices according to which side they fell on\n n_left = 0\n n_right = 0\n for i in range(side.shape[0]):\n if side[i] == 0:\n indices_left[n_left] = indices[i]\n n_left += 1\n else:\n indices_right[n_right] = indices[i]\n n_right += 1\n\n return indices_left, indices_right", "def uniform_but_one_dataset(n, p):\n elements = []\n for i in range(n):\n elements.append((i, 1))\n elements.append((1, (n**(1.0 / p)) - 1))\n return elements", "def P_order_prime(a,seedE,b,p,Ordercurve,facto):\n x,y,P=random_point_with_a_seed_of_curve(a,seedE,b,p)\n OrderP=OrderPoint(P,Ordercurve,facto)\n pr=Prime(OrderP)\n while(not pr):\n #print(\"Work Point\")\n x,y,P=random_point_with_a_seed_of_curve(a,seedE,b,p)\n OrderP=OrderPoint(P,Ordercurve,facto)\n pr=Prime(OrderP)\n return P,OrderP", "def CreateReleasePoints(points_on_longitude, points_on_latitude, grids):\n \n ReleasePointsLon = []\n ReleasePointsLat = []\n \n GridsCW_array = np.asarray(grids[['min_lon', 'min_lat', 'max_lon', 'max_lat']])\n \n for i in range(len(GridsCW_array)):\n \n lon_space = np.linspace(GridsCW_array[i,0], GridsCW_array[i,2], num = points_on_longitude+2 )\n lat_space = np.linspace(GridsCW_array[i,1], GridsCW_array[i,3], num = points_on_latitude+2 )\n \n \n lon_space_cor = lon_space[1:-1]\n lat_space_cor = lat_space[1:-1]\n \n for j in lon_space_cor:\n for k in lat_space_cor:\n \n ReleasePointsLon.append(j)\n ReleasePointsLat.append(k)\n \n return ReleasePointsLon, ReleasePointsLat", "def p_methods(data, pv_index=0, alpha = 0.05):\n\n #### Raise an error for an impossible alpha value\n if (alpha>= 1) or (alpha<= 0):\n raise ProbabilityError(\"alpha needs to be between 0 and 1!\")\n \n ####if it's a pd.dataframe, rename to col header\n if isinstance(data, pd.DataFrame):\n if isinstance(pv_index, int):\n pv_index = data.columns.get_values()[pv_index]\n data =data.rename(columns ={pv_index: \"p_value\"})\n ###or make a vector a pd.dataframe\n else:\n data = pd.DataFrame({\"p_value\": data})\n\n if (data[\"p_value\"].max()> 1) or (data[\"p_value\"].max()< 0):\n raise ProbabilityError(\"One or more p-values is not between 0 and 1!\") \n\n ###set the size of the data\n m = data.shape[0]\n\n ###find the smallest p_value st. p<k*alpha/m (BH method):\n ##set the rank, making ties the minimum\n df =data.sort_values(by=[\"p_value\"])\n df[\"rank\"]=round(df.rank(axis=0, method = 'min')[\"p_value\"])\n df[\"bh_value\"] = alpha*df[\"rank\"]/m\n df_temp = df\n df_temp[\"bh_sig\"]= np.where(df_temp[\"p_value\"] <= df_temp[\"bh_value\"], True, False)\n df_temp =df_temp[df_temp[\"bh_sig\"]==True]\n\n ###the maximum true value\n\n if len(df_temp[\"bh_sig\"]) == 0:\n max_true = 0\n else:\n max_true = max(df_temp[\"rank\"])\n\n ####Back to cool dataframe work!\n df[\"bh_significant\"]=np.where(df[\"rank\"]<=max_true, True, False)\n df[\"bonf_value\"] = alpha/m\n df[\"bonf_significant\"] = np.where(df[\"p_value\"]<=df[\"bonf_value\"], True, False)\n df = df.drop(['rank'], axis=1)\n df = df.drop(['bh_sig'], axis=1)\n\n return(df)", "def pose_in_grid(self, scale):\n xr = int(self.x / scale)\n yr = int(self.y / scale)\n theta = self.theta\n\n return (xr, yr, theta)", "def g_xy(self):\n for x in range(self.size.x):\n for y in range(self.size.y):\n yield self.p[0] + Vect(x, y)", "def project_points_plane(points, plane):\n return [project_point_plane(point, plane) for point in points]", "def pulsatile_flow(r, p0, pn, phi, timestep, grid, ru=1060, mu=.0035, freq=1.5):\n ofst = int(np.round(grid / 2))\n rxl = int(np.round(3 * ofst / 4))\n h = r / rxl\n nw = pn.size\n omega = 2 * np.pi * freq\n u = np.zeros((timestep, grid, grid))\n zt = np.zeros(timestep + 1, np.complex)\n alpha = r * np.sqrt(omega * ru / mu)\n kapa = alpha * 1j ** 1.5 / r\n\n snw = nw * (nw + 1) / 2\n # alpha = alpha * np.sqrt(snw)\n for k in range(timestep):\n t = (k + 1) / timestep / freq\n for l in range(nw):\n zt[k] += pn[l] * np.exp(1j * (omega * t * (l + 1) - phi[l]))\n\n CJA = special.jv(0, kapa * r)\n for m in range(-rxl, rxl):\n for n in range(-rxl, rxl):\n for k in range(timestep):\n ri = np.sqrt(m ** 2 + n ** 2)\n if ri * h < r:\n CBJ0 = special.jv(0, kapa * h * ri)\n u[k, m + ofst, n + ofst] = p0 * ((ri * h) ** 2 - r ** 2) / 4 / mu + np.real(\n 1j / ru / omega / snw * (1 - CBJ0 / CJA) * zt[k])\n\n return u / u.max()", "def G():\n Pz=[40]\n Pp=[1,2,1]\n return Pz, Pp", "def plot_crossing_probability(ax, Percolation) -> np.ndarray:\n\n print(f\"Computing crossing probabilities for {Percolation.grid_type} \"\n \"percolation\")\n cross_proba = np.zeros_like(p_values)\n for i in progressbar.progressbar(range(nsim)):\n perco = Percolation(w, h)\n p_cross = perco.find_p_cross()\n cross_proba += np.where(p_values < p_cross, 0, 1)\n\n cross_proba /= nsim\n ax.plot(p_values, cross_proba, '-',\n label=f'{Percolation.grid_type} percolation')", "def random_points_generator(trials_per_node=0):\r\n points_fallen_inside_circle = 0\r\n # draw a pair (x,y) from an uniform distribution\r\n # between 0 and 1 for each trial\r\n # NOTE: each child process has its own seed\r\n # since np.random.seed() hasn't been invoked\r\n # This is good for randomness but terrible for\r\n # reproducibility (which we don't consider in this project)\r\n for _ in range(trials_per_node):\r\n x = np.random.uniform(0,1)\r\n y = np.random.uniform(0,1)\r\n # if (x,y) falls inside the unit circle\r\n # increment the counter by 1\r\n if (x*x+y*y) <= 1.0:\r\n points_fallen_inside_circle += 1\r\n\r\n return points_fallen_inside_circle", "def _personalized_pagerank(seed, W):\n restart_prob = RESTART\n r = restart_prob * seed\n s_ovr = np.copy(r)\n for i in range(MAX_ITER):\n r_new = (1. - restart_prob) * (W.transpose().dot(r))\n s_ovr = s_ovr + r_new\n delta = abs(r_new.sum())\n if delta < 1e-5: break\n r = r_new\n return np.squeeze(s_ovr)", "def axpby(alpha,pepx1,beta,pepx2):\n\n pepx_new = add(mul(alpha,pepx1),mul(beta,pepx))\n return pepx_new", "def rc2p(row, col, N):\n\n # print('row:{} col:{}'.format(row,col))\n return row * (N + 1) + col", "def generatePos(self):\n self.pos = np.zeros((self.num_points, 2), dtype='int32')\n self.pos[:, 1] = np.repeat(list(reversed(np.arange(1, self.x*2, 2))), self.y)\n self.pos[:, 0] = np.tile(np.arange(1, self.x*2, 2), self.y)", "def generate_continuum_realizations_periodic(grid_path, save_path, perm_path, dp_x, dp_y, n_images, print_every=50):\n # loading the grid\n with open(grid_path, 'rb') as input:\n grid = pickle.load(input)\n n_cell = grid.m\n n_face = grid.nr_t\n # initialize perm matrix and pressure solution (n_cell x n_cell x n_perm_fields)\n X, Y = [np.zeros((n_images, n_cell, n_cell, 1)) for i in range(2)]\n # initialize arrays for saving the faces velocities\n U_face = np.zeros((n_images, n_face))\n # initialize the array for saving the face operator and bias\n face_operator_list = []\n face_bias_array = np.zeros((n_images, n_face))\n # load the permeability dataframe, each column is one realization\n # this is the file saved by SGEMS (Geostats software)\n perm_frame = pd.read_csv(perm_path, usecols=range(n_images))\n # initialize a linear system for the pressure fluctuations for the grid\n LS = LinearSystemStandard(grid)\n # initialize the perturbation system object\n PI = PeriodicPerturbations(grid, dp_x, dp_y)\n # for the number of specified realizations run particle tracking and save the results\n for i in range(n_images):\n if not i%print_every:\n print('realization number '+str(i))\n logperm = perm_frame.ix[:, i]\n perm = np.exp(logperm)\n grid.set_transmissibility(perm)\n # solve for fluctuations around mean pressure gradient\n # setting the left hand side of the equation\n LS.fill_matrix(grid.transmissibility)\n # for each cell add (dp_x/lx)*(T_down - T_up)_x + (dp_y/ly)*(T_down - T_up)_y\n # to the rhs\n rhs_vec = PI.periodic_rhs_vec(grid.transmissibility)\n LS.rhs.set_neumann_pores_distributed(range(grid.nr_p), rhs_vec)\n # set a dirichlet cell: no fluctuation for cell 0\n LS.set_dirichlet_pores([0], 0.0)\n LS.solve()\n # copy the pressure solution and the permeability field to the X and Y\n X[i, :, :, 0] = np.reshape(logperm, (n_cell, n_cell))\n Y[i, :, :, 0] = np.copy(np.reshape(LS.sol, (n_cell, n_cell)))\n grid.pressure = LS.sol\n # get the operators to calculate face velocity\n U_face_operator, U_face_fixed = PI.face_velocity_operator(grid.transmissibility)\n # save face_velocity\n U_face[i,:] = U_face_operator.dot(LS.sol) + U_face_fixed\n # save the face operator\n face_operator_list.append(U_face_operator)\n face_bias_array[i,:] = U_face_fixed\n # save X, Y, U_face, operators\n np.savez(save_path, X=X, Y=Y, U_face=U_face, U_face_operator=face_operator_list, U_face_fixed=face_bias_array)", "def PRGA_custom(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n yield i+j", "def update_curr_acc_points(self, new_points, ef_pose, step):\n new_points = se3_transform_pc(ef_pose, new_points) \n # the number below can be adjusted for efficiency and robustness\n aggr_sample_point_num = min(int(CONFIG.pt_accumulate_ratio**step * CONFIG.uniform_num_pts), new_points.shape[1])\n index = np.random.choice(range(new_points.shape[1]), size=aggr_sample_point_num, replace=False).astype(np.int)\n\n new_points = new_points[:,index]\n print('new points before filtering with table height', new_points.shape)\n index = new_points[2, :] > self.table_height\n new_points = new_points[:, index]\n print('new points {} total point {}'.format(new_points.shape, self.acc_points.shape))\n\n self.acc_points = np.concatenate((new_points, self.acc_points), axis=1) #\n self.acc_points = regularize_pc_point_count(self.acc_points.T, 4096, use_farthest_point=True).T\n # if it still grows too much, can limit points by call regularize pc point count\n # self.planner.expert_plan can also be called with these dense points directly", "def project(self, alpha):\n ax = alpha[0]\n ay = alpha[1]\n az = alpha[2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n return [ax, ay, az]", "def compute_plperac(ds, plant_bool, verbose=False):\n from utils import get_m_per_px, get_raster_extents, dist_btw_pair_latlon\n\n minx, maxx, miny, maxy = get_raster_extents(geo_transform)\n\n # get the extent of the train field in physical units\n yyyrange_m = dist_btw_pair_latlon(minx, miny, minx, maxy)\n xxxrange_m = dist_btw_pair_latlon(minx, miny, maxx, miny)\n\n if verbose:\n print(\"length spanned by map: {:.2f} [m] x {:.2f} [m]\".format(xxxrange_m, yyyrange_m))\n\n ac2sqm = 4046.86\n p = np.sqrt(ac2sqm)\n delx = p\n dely = p\n\n # assuming in meters\n xmin, xmax, ymin, ymax = 0, xxxrange_m, 0, yyyrange_m\n\n ny = int(np.ceil(np.abs(ymax - ymin)/p))\n nx = int(np.ceil(np.abs(xmax - xmin)/p))\n new = np.zeros((ny, nx))\n # print(ny, nx)\n\n # number of elements in original image w/in p [meters]\n cols = plant_bool.shape[1]\n rows = plant_bool.shape[0]\n\n mPerPixX, mPerPixY = xxxrange_m / cols, yyyrange_m / rows\n nnx = int(p / mPerPixX)\n nny = int(p / mPerPixY)\n\n xticklab = []\n yticklab = []\n for k in range(nx):\n for l in range(ny):\n new[l, k] = plant_bool[nny*l: nny*(l+1), nnx*k: nnx*(k+1)].sum()\n\n return new", "def anabpsk(n_points, n_comp=None, f0=0.25):\n if n_comp is None:\n n_comp = round(n_points / 5.0)\n if (f0 < 0) or (f0 > 0.5):\n raise TypeError(\"f0 must be between 0 and 0.5\")\n m = int(np.ceil(n_points / n_comp))\n jumps = 2.0 * np.round(np.random.rand(m)) - 1\n am = np.repeat(jumps, n_comp)[:n_points]\n y = am * fmconst(n_points, f0, 1)[0]\n return y, am", "def PageRankHelper(start, probs, numIterations, alpha=0.5):\n if numIterations <= 0:\n return probs\n else:\n ProbsPropagated = {}\n\n # with probability 1-alpha, we teleport back to the start\n # node\n ProbsPropagated[start] = 1 - alpha\n \n # Propagate the previous probabilities\n for node, prob in probs.iteritems():\n forwards = list(out_edges.get(node, set()))\n backwards = list(in_edges.get(node, set()))\n\n\n # With probability alpha, we move to a follwer\n # And each node distributes its current probability\n # equally to its neighbours.\n\n ProbtoPropagate = alpha * prob / (len(forwards)+len(backwards))\n\n for neighbour in (forwards+backwards):\n if not ProbsPropagated.has_key(neighbour):\n ProbsPropagated[neighbour] = 0\n\n ProbsPropagated[neighbour] += ProbtoPropagate\n\n return PageRankHelper(start, ProbsPropagated, numIterations-1, alpha)", "def _generate_proposals(self, box):\n # Generate proposals\n num_proposals = self.proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n sigma_factor=self.proposal_params['sigma_factor']\n )\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def _generate_proposals(self, box):\n # Generate proposals\n num_proposals = self.proposal_params['boxes_per_frame']\n proposals = torch.zeros((num_proposals, 4))\n gt_iou = torch.zeros(num_proposals)\n\n for i in range(num_proposals):\n proposals[i, :], gt_iou[i] = prutils.perturb_box(box, min_iou=self.proposal_params['min_iou'],\n sigma_factor=self.proposal_params['sigma_factor']\n )\n\n # Map to [-1, 1]\n gt_iou = gt_iou * 2 - 1\n return proposals, gt_iou", "def approximate_PageRank_weighted(G,\n ref_nodes,\n iterations: int = 100000,\n alpha: float = 0.15,\n rho: float = 1.0e-6): \n \n #print(\"Uses the weighted Andersen Chung and Lang (ACL) Algorithm.\")\n n = G.adjacency_matrix.shape[0]\n (length,xids,values) = aclpagerank_weighted_cpp(n,G.ai,G.aj,G.adjacency_matrix.data,alpha,rho,\n ref_nodes,iterations)\n #p = np.zeros(n)\n #p[xids] = values\n\n return xids, values", "def _calc_pp_pwl_points(ppc_pwl_points):\n\n def construct_list_of_list(row):\n arr = pts[row, ::2]\n arr = np.concatenate((arr[:1], np.repeat(arr[1:-1], 2), arr[-1:])).reshape((-1, 2))\n arr = np.c_[arr, c[row, :]]\n arr = arr[~np.isnan(arr[:, 2])]\n return arr.tolist()\n\n pts = ppc_pwl_points\n if not (pts.shape[1] % 2) == 0:\n raise ValueError(\"_calc_pp_pwl_points() expects ppc_pwl_points with shape[1] is \"\n f\"multiple of 2. However, ppc_pwl_points.shape[1]={ppc_pwl_points}.\")\n c = (pts[:, 3::2] - pts[:, 1:-2:2]) / (pts[:, 2::2] - pts[:, :-2:2])\n return [construct_list_of_list(row) for row in range(pts.shape[0])]", "def project(self, a):\n for g in xrange(0, len(a), 3):\n\n ax = a[g + 0]\n ay = a[g + 1]\n az = a[g + 2]\n anorm = ax ** 2.0 + ay ** 2.0 + az ** 2.0\n i = anorm > 1.0\n\n anorm_i = anorm[i] ** 0.5 # Square root is taken here. Faster.\n ax[i] = np.divide(ax[i], anorm_i)\n ay[i] = np.divide(ay[i], anorm_i)\n az[i] = np.divide(az[i], anorm_i)\n\n a[g + 0] = ax\n a[g + 1] = ay\n a[g + 2] = az\n\n return a", "def calc_points_shop(self):\n rem_pop = self.popula - self.popula_used\n points = min(self.cnt_shop, rem_pop // 5) * 11\n rem_shop = self.cnt_shop - rem_pop // 5\n vptab_shop = (0, 1, 2, 4, 7)\n if rem_shop > 0:\n points += vptab_shop[rem_pop % 5]\n penalty_popula = max(rem_pop - self.cnt_shop * 5, 0)\n points -= penalty_popula\n return points", "def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))", "def Compute_Grid(Idx, Coeff, q_max, rules, growth, LevelMax, sc, p, tol ):\n\n seed = 123456789\n #Coeff= Sandia.calculate_coefficients(Idx, q_max)\n new_np = Sandia.max_next_points(Idx, Coeff, rules, growth)\n points = Sandia.weights_and_points(new_np, LevelMax, Idx, Coeff, growth, rules, sc, p)\n N_Unique, sparse_index = Sandia.unique_points(seed, tol, points)\n return Sandia.reduce_points_and_weights(N_Unique, points, Idx, sparse_index, Coeff, growth, rules, sc, p)", "def __generate_point_based_on_prob(self) -> Point:\n possible = False\n while not possible:\n # make the random decision based on a distribution (hot spots / different probabilities)\n prob_list = self.probability_distribution_grid.flatten()\n selected_index = np.random.choice(\n np.arange(0, len(prob_list)), p=prob_list)\n\n # get the indices of the cell (from the one array index)\n # width is the number of cells in x directions (it starts with cell 0/0) and is needed due to row-major order\n cell_x = int(selected_index % self.occupancy_map.info.width)\n cell_y = int(selected_index / self.occupancy_map.info.width)\n\n # get the real world coordinates (which represents the center of the cell)\n x = self.occupancy_map.info.origin.position.x + \\\n (cell_x + 0.5) * self.occupancy_map.info.resolution\n y = self.occupancy_map.info.origin.position.y + \\\n (cell_y + 0.5) * self.occupancy_map.info.resolution\n\n # Check if the actual cell is free of STATIC obstacles (not occupied)\n if not self.__cell_is_occupied(cell_x, cell_y):\n # Check for not occupied neighbors (the robot needs some space the reach it)\n if not self.__has_occupied_neighbors(cell_x, cell_y):\n # If actual spawning of dirt is enabled, then it should also be ensured that no other dirt object is already\n # at this position, because spawning a model in the same location of an already existing model can lead to problems\n if not self.prevent_duplicates or not self.__check_for_duplicates(Point(x, y, 0.0)):\n possible = True\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to already \"\n \"active dirt at this position.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied neighbor \"\n \"cells.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied cell.\"\n \"\\n\\tGenerating next one...\\n\" % (x, y))\n return Point(x=x, y=y, z=0.0)", "def obj_points(grid_size, dx):\r\n objp = np.zeros((grid_size[0]*grid_size[1], 3), np.float32)\r\n objp[:,:2] = np.mgrid[0:grid_size[0], 0:grid_size[1]].T.reshape(-1, 2)\r\n\r\n return objp*dx", "def plan(self):\n return [(0, 0), (1, 0), (1, 1), (1, 2), (1, 3)]", "def get_base_p(r, j, to):\n return P((h[r] + v*(j-k)) / (h[r] + mu + v*(j-k)) for k in range(to))", "def PRGA(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n\n tab[i], tab[j] = tab[j], tab[i]\n K = tab[(tab[i] + tab[j]) % MOD]\n yield K", "def pia_from_kdp(kdp, dr, gamma=0.08):\n alpha = gamma * kdp\n return 2 * np.cumsum(alpha, axis=-1) * dr", "def generatePoints(N, k=2, scale=1, same_quadrant=False):\n if same_quadrant:\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n else:\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def gen_a(cls, p):\n a = random.randint(1, p-1)\n while cls.gcd(a, p) != 1:\n a = random.randint(1, p-1)\n return a", "def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):\n\n # Initialize some variables\n print \"Computing pairwise distances...\"\n (n, d) = X.shape;\n sum_X = Math.sum(Math.square(X), 1);\n D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);\n P = Math.zeros((n, n));\n beta = Math.ones((n, 1));\n logU = Math.log(perplexity);\n\n # Loop over all datapoints\n for i in range(n):\n \n # Print progress\n if i % 500 == 0:\n print \"Computing P-values for point \", i, \" of \", n, \"...\"\n \n # Compute the Gaussian kernel and entropy for the current precision\n betamin = -Math.inf; \n betamax = Math.inf;\n Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];\n (H, thisP) = Hbeta(Di, beta[i]);\n \n # Evaluate whether the perplexity is within tolerance\n Hdiff = H - logU;\n tries = 0;\n while Math.abs(Hdiff) > tol and tries < 50:\n \n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta[i];\n if betamax == Math.inf or betamax == -Math.inf:\n beta[i] = beta[i] * 2;\n else:\n beta[i] = (beta[i] + betamax) / 2;\n else:\n betamax = beta[i];\n if betamin == Math.inf or betamin == -Math.inf:\n beta[i] = beta[i] / 2;\n else:\n beta[i] = (beta[i] + betamin) / 2;\n \n # Recompute the values\n (H, thisP) = Hbeta(Di, beta[i]);\n Hdiff = H - logU;\n tries = tries + 1;\n \n # Set the final row of P\n P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;\n \n # Return final P-matrix\n print \"Mean value of sigma: \", Math.mean(Math.sqrt(1 / beta))\n return P;", "def pepper(self, prob=0.08):\n h, w, c = self.img.shape\n for i in range(h):\n for j in range(w):\n if random() < prob:\n self.img[i, j] = 0\n\n self.edits.append(f\"pepper:{prob}\")\n return self", "def generatePoints(centre: Point, radius: float, numPoints: int, jitterRatio: float = 0) -> List[Point]:\n def jitter() -> float:\n diamiter = radius * math.pi * 2\n jitterSize = jitterRatio * diamiter / numPoints\n return random.random() * 2 * jitterSize - jitterSize\n\n points: List[Point] = []\n angle_segment = math.pi * 2 / numPoints\n angle = 0\n\n while angle < math.pi * 2:\n point = (centre[0] + radius * math.cos(angle) + jitter(),\n centre[1] + radius * math.sin(angle) + jitter())\n points.append(point)\n angle += angle_segment\n\n return points", "def adj_ptycho_prb(self, data, psi, scan, igpu):\n res = cp.zeros([self.ptheta, self.nprb, self.nprb],\n dtype='complex64')\n data = data.copy() # avoid this todo\n res = cp.ascontiguousarray(res)\n data = cp.ascontiguousarray(data)\n psi = cp.ascontiguousarray(psi)\n scan = cp.ascontiguousarray(scan)\n self.adjprb(res.data.ptr, data.data.ptr,\n psi.data.ptr, scan.data.ptr, igpu)\n return res" ]
[ "0.60559386", "0.59712946", "0.59704006", "0.595379", "0.5917291", "0.58962584", "0.58279955", "0.582432", "0.5792295", "0.57677454", "0.5709391", "0.5696328", "0.56858194", "0.56748235", "0.5633881", "0.5615407", "0.56149656", "0.560475", "0.560267", "0.5601162", "0.5589663", "0.5574519", "0.5569534", "0.55672973", "0.5561524", "0.5556496", "0.5550597", "0.55192643", "0.5518998", "0.551796", "0.5516034", "0.54990005", "0.548948", "0.54877406", "0.5482648", "0.5482426", "0.5481003", "0.5480637", "0.5467645", "0.54650617", "0.5462093", "0.54479486", "0.54471177", "0.5438968", "0.54271984", "0.5423277", "0.5419273", "0.5417014", "0.5403133", "0.5387247", "0.5371631", "0.53640574", "0.53619564", "0.5359899", "0.53549975", "0.5341586", "0.53295934", "0.53144646", "0.5311596", "0.53112936", "0.53102833", "0.53068084", "0.53059983", "0.52997875", "0.52993244", "0.5294438", "0.52882665", "0.5286674", "0.5280995", "0.5280983", "0.52744347", "0.5267102", "0.52653223", "0.52619094", "0.52538985", "0.5253223", "0.5253055", "0.5242978", "0.5238614", "0.5238117", "0.523586", "0.523586", "0.52339315", "0.5233114", "0.52290624", "0.52276665", "0.52274776", "0.5226393", "0.5226259", "0.5224956", "0.5224905", "0.5224484", "0.52193487", "0.5219204", "0.5218816", "0.5218625", "0.5217589", "0.5216155", "0.5215589", "0.521009" ]
0.6255561
0
Calculates ray intersection point with plane
def LinePlaneCollision(planeNormal, planePoint, rayDirection, rayPoint, epsilon=1e-12): ndotu = planeNormal.dot(rayDirection) if abs(ndotu) < epsilon: raise RuntimeError("no intersection or line is within plane") w = rayPoint - planePoint si = -planeNormal.dot(w) / ndotu Psi = w + si * rayDirection + planePoint return Psi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersects(self, ray):\n theta = 45\n H = 512\n W = 512\n A = self.origin\n B = Point(W, A.y, A.z)\n C = Point(B.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n D = Point(A.x, (int)(H * math.sin(theta * math.pi / 180)), (int)(H * math.cos(math.pi * theta / 180)))\n vec3 = ray.direction * self.normal\n if vec3 != 0:\n vec1 = self.origin - ray.origin\n vec2 = vec1 * self.normal\n dist = vec2 / vec3\n if dist > 0:\n point_on_plane = ray.origin + dist * ray.direction\n if A.x <= point_on_plane.x <= B.x and A.y <= point_on_plane.y <= D.y and B.z <= point_on_plane.z <= C.z:\n #print A, B, C, D, point_on_plane\n return dist", "def intersection(self, ray):\n d_proj = self._normal.dot(ray.d)\n if abs(d_proj) < bounds.too_small:\n return -1.0\n s_proj = (self._origin - ray.o).dot(self._normal)\n if d_proj * s_proj < 0.0:\n # ray going away from plane\n return -1.0\n else:\n return s_proj / d_proj", "def LinePlaneIntersection(line, plane):\n plane = rhutil.coerceplane(plane, True)\n line_points = rhutil.coerce3dpointlist(line, True)\n line = Rhino.Geometry.Line(line_points[0], line_points[1])\n rc, t = Rhino.Geometry.Intersect.Intersection.LinePlane(line, plane) \n if not rc: return scriptcontext.errorhandler()\n return line.PointAt(t)", "def intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n else:\n return None", "def intersect_plane(L, plane):\n \n # Line U, V\n # Plane N n\n # (VxN-nU:U.N)\n # Note that this is in homogeneous coordinates.\n # intersection of plane (n,p) with the line (v,p)\n # returns point and line parameter\n \n \n den = np.dot(L.w, plane.n)\n \n if abs(den) > (100*_eps):\n P = -(np.cross(L.v, plane.n) + plane.p * L.w) / den\n p = (np.cross(L.v, plane.n) - plane.p * L.w) / den\n \n P = L.pp\n t = np.dot( P-p, N)\n return namedtuple('intersect_plane', 'p t')(P, t)\n else:\n return None", "def rayIntersection(self, ray):\n #t = \"what we are trying to find\"\n l = -ray.mDirection\n l0 = ray.mOrigin\n n = self.mNormal\n p0 = self.mDistance * n\n #p = l0 + l * t\n\n if l.dot(n) > 0:\n v = p0 - l0\n t = -(v.dot(n) / l.dot(n))\n return t\n\n else:\n return None", "def rayIntersection(self, ray):\n\n rotVect = ray.mDirection #math3d.VectorN(math.cos(num), - math.sin(num), 0)\n\n # this give all the lines (red green and blue at the moment)\n tankPos = math3d.VectorN(ray.mOrigin[0], ray.mOrigin[1], 0)\n linkPos = math3d.VectorN(200,200,0)\n v = linkPos - tankPos\n added = (tankPos + getPara(v, rotVect) + getPerp(v, rotVect))\n added2 = tankPos + getPara(v, rotVect) #If the magnitude of this is minus the sphere origin is less than the radius you're in the sphere\n added3 = tankPos + getPerp(v, rotVect)\n added4 = tankPos + rotVect.normalized() * 200 #this is get point only change 200 to dist\n\n\n test = added2 - self.mCenter #checks if in center\n\n\n if test.magnitude() <= self.mRadius:\n green = added2 - ray.mOrigin #this is Qpara\n thing = (self.mSRadius - test.magnitude()**2) ** 0.5\n t = (green.magnitude() - thing)\n print(green.magnitude() - thing)\n return t\n else:\n return None\n\n #print(test.magnitude(), self.mRadius)\n #print(green.magnitude(), \"green\")", "def ray_intersect_triangle(origin, direction, triangle, use_planes=False):\n origin = np.array(origin)\n direction = np.array(direction)\n if len(direction.shape) == 1:\n direction = direction.reshape(1, *direction.shape)\n return_single = True\n else:\n return_single = False\n triangle = np.array(triangle)\n if len(triangle.shape) == 2:\n triangle = triangle.reshape(1, *triangle.shape)\n\n v0 = triangle[..., 0, :]\n v1 = triangle[..., 1, :]\n v2 = triangle[..., 2, :]\n u = v1 - v0\n v = v2 - v0\n normal = np.cross(u, v)\n b = np.inner(normal, direction)\n a = my_inner(normal[..., None, :], v0[..., None, :] - origin[None, ..., :])\n\n rI = a / b\n # ray is parallel to the plane\n rI[(b == 0.0)*(a != 0.0)] = np.nan\n # ray is parallel and lies in the plane\n rI[(b == 0.0)*(a == 0.0)] = 0\n\n # check whether the intersection is behind the origin of the ray\n rI[rI < 0.0] = np.nan\n\n if not use_planes:\n w = origin + rI[..., None] * direction - v0[..., None, :]\n denom = my_inner(u, v) * my_inner(u, v) - my_inner(u, u) * my_inner(v, v)\n\n si = (my_inner(u, v)[..., None] * my_inner(w, v[..., None, :]) - my_inner(v, v)[..., None] * my_inner(w, u[..., None, :])) / denom[:, None]\n rI[((si < 0)+(si > 1.0)).astype(bool)] = np.nan\n\n ti = (my_inner(u, v)[..., None] * my_inner(w, u[..., None, :]) - my_inner(u, u)[..., None] * my_inner(w, v[..., None, :])) / denom[:, None]\n rI[((ti < 0.0) + (si + ti > 1.0)).astype(bool)] = np.nan\n\n def nanargmin(a, axis):\n from numpy.lib.nanfunctions import _replace_nan\n a, mask = _replace_nan(a, np.inf)\n res = np.argmin(a, axis=axis)\n return res\n\n index = nanargmin(rI, axis=0)\n rI = rI[index, np.arange(len(index))]\n point = origin + rI[..., None] * direction\n\n if return_single:\n return point[0]\n return point", "def getIntersection(self, ray):\n pass", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def intersect(self, plane, epsilon=0.00001):\r\n den = np.dot(self.direction, plane.normal)\r\n if math.fabs(den) < epsilon:\r\n return None\r\n\r\n result = (-plane.distance - np.dot(plane.normal, self.origin)) / den\r\n\r\n if result < 0.0:\r\n if result < -epsilon:\r\n return None\r\n result = 0.0\r\n return result", "def intersect(self, ray):\n # TODO A5 (Step1) implement this function\n # Copy your implementation from A4\n # Then calculate uv coordinates, to be passed into the Hit initializer\n vs = self.vs\n\n a = vs[0][0] - vs[1][0]\n b = vs[0][1] - vs[1][1]\n c = vs[0][2] - vs[1][2]\n d = vs[0][0] - vs[2][0]\n e = vs[0][1] - vs[2][1]\n f = vs[0][2] - vs[2][2]\n\n ray_dir = ray.direction\n ray_orig = ray.origin\n\n g = ray_dir[0]\n h = ray_dir[1]\n i = ray_dir[2]\n j = vs[0][0] - ray_orig[0]\n k = vs[0][1] - ray_orig[1]\n l = vs[0][2] - ray_orig[2]\n\n M = a * (e * i - h * f) + b * (g * f - d * i) + c * (d * h - e * g)\n\n t = -(f * (a * k - j * b) + e * (j * c - a * l) + d *\n (b * l - k * c)) / M\n\n if (t < ray.start or t > ray.end):\n return no_hit\n\n gamma = (i * (a * k - j * b) + h * (j * c - a * l) + g *\n (b * l - k * c)) / M\n\n if (gamma < 0 or gamma > 1):\n return no_hit\n\n beta = (j * (e * i - h * f) + k * (g * f - d * i) +\n l * (d * h - e * g)) / M\n\n if (beta < 0 or beta > 1 - gamma):\n return no_hit\n\n P = ray_orig + t * ray_dir\n\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n return Hit(t, P, unit_normal, vec([u, v]), self.material)", "def three_d_vector_plane_intersection(point_a, point_b, point_c, point_d, point_e):\n a = np.array(point_a)\n b = np.array(point_b)\n c = np.array(point_c)\n nv = plane_equation(point_c, point_d, point_e)\n t = (nv[0] * c[0] + nv[1] * c[1] + nv[2] * c[2] - nv[0] * a[0] - nv[1] * a[1] - nv[2] * a[2]) / \\\n (nv[0] * (b[0] - a[0]) + nv[1] * (b[1] - a[1]) + nv[2] * (b[2]-a[2]))\n x = a[0] + t * (b[0] - a[0])\n y = a[1] + t * (b[1] - a[1])\n z = a[2] + t * (b[2] - a[2])\n intersection = np.array([x, y, z])\n return intersection", "def intersectRay(self, ray):\n # Ray Tracing from the Ground Up, pg. 367\n a, b, c, d = self.a[0] - self.b[0], self.a[0] - self.c[0], ray.d[0], self.a[0] - ray.o[0]\n e, f, g, h = self.a[1] - self.b[1], self.a[1] - self.c[1], ray.d[1], self.a[1] - ray.o[1]\n i, j, k, L = self.a[2] - self.b[2], self.a[2] - self.c[2], ray.d[2], self.a[2] - ray.o[2]\n\n m, n, p = f * k - g * j, h * k - g * L, f * L - h * j\n q, s = g * i - e * k, e * j - f * i\n\n denom = a * m + b * q + c * s\n if denom < self.kEpsilon:\n return None\n\n inv_denom = 1.0 / denom\n\n e1 = d * m - b * n - c * p\n beta = e1 * inv_denom\n\n if 1.0 < beta or beta < 0.0:\n return None\n\n r = e * L - h * i\n e2 = a * n + d * q + c * r\n gamma = e2 * inv_denom\n\n if 1.0 < gamma or gamma < 0.0:\n return None\n\n e3 = a * p - b * r + d * s\n t = e3 * inv_denom\n\n if t < self.kEpsilon:\n return None\n\n return t", "def intersect(self, ray):\n\n t = None\n hit = None\n angle = ray.dir.dot(self.norm)\n if angle != 0:\n t = (self.point - ray.start).dot(self.norm) / angle\n if angle < 0:\n hit = Hit(self, ray, t, float('inf'), self.norm, self.mat)\n else:\n hit = Hit(self, ray, float('-inf'), t, self.norm, self.mat)\n else:\n vector = unit(ray.start - self.point)\n if vector.dot(self.norm) < 0:\n hit = Hit(self, ray, float('-inf'), float('inf'), self.norm, self.mat)\n else:\n return None\n if (self.mat.texture is not None and not isninf(hit.entry)) > 0:\n hit.texCords = self.texCords(ray.pos(t))\n return hit", "def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point", "def intersection_plane_plane(plane1, plane2, epsilon=1e-6):\n # check for parallelity of planes\n if abs(dot_vectors(plane1[1], plane2[1])) > 1 - epsilon:\n return None\n vec = cross_vectors(plane1[1], plane2[1]) # direction of intersection line\n p1 = plane1[0]\n vec_inplane = cross_vectors(vec, plane1[1])\n p2 = add_vectors(p1, vec_inplane)\n px1 = intersection_line_plane((p1, p2), plane2)\n px2 = add_vectors(px1, vec)\n return (px1, px2)", "def intersection_segment_plane(segment, plane, epsilon=1e-6):\n pt1 = segment[0]\n pt2 = segment[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n if fac > 0. and fac < 1.:\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n return None\n else:\n return None", "def points_on_lines(hyperplanes):\n intersections = []\n for row in hyperplanes:\n intersections.append(an_intersection(row[:-1], -row[-1]))\n return np.array(intersections)", "def intersects(self, ray):\n def raySegmentIntersectAB(self, ray):\n \"\"\"\n recibes a ray. checks if it intersects the segment\n dot: denominator. if dot = 0 they're paralel\n t1: distance from origin to intersection\n t2: intersection IN the segment\n \"\"\"\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n def raySegmentIntersectCD(self, ray):\n v1 = ray.origin - self.pointC\n v2 = self.pointD - self.pointC\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n def raySegmentIntersectAC(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointC - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n\n def raySegmentIntersectBD(self, ray):\n v1 = ray.origin - self.pointB\n v2 = self.pointD - self.pointB\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None\n \n \n minD = 9999\n distance_AB = raySegmentIntersectAB(self, ray)\n distance_CD = raySegmentIntersectCD(self, ray)\n distance_AC = raySegmentIntersectAC(self, ray)\n distance_BD = raySegmentIntersectBD(self, ray)\n \n if distance_AB is not None:\n minD = distance_AB\n \n if distance_CD is not None:\n if distance_CD < minD:\n minD = distance_CD\n \n if distance_AC is not None:\n if distance_AC < minD:\n minD = distance_AC\n \n if distance_BD is not None:\n if distance_BD < minD:\n minD = distance_BD\n\n if minD is not None:\n if minD != 9999:\n return minD\n return None\n \"\"\"\n minD = raySegmentIntersectBD(self, ray)\n #print (minD)\n return minD\n \"\"\"", "def intersection_plane_plane_plane(plane1, plane2, plane3, epsilon=1e-6):\n line = intersection_plane_plane(plane1, plane2, epsilon)\n if not line:\n return None\n pt = intersection_line_plane(line, plane3, epsilon)\n if pt:\n return pt\n return None", "def intersect(self, ray):\n # TODO A5 (Step1) implement this function\n # Copy your implementation from A4\n # Then calculate uv coordinates, to be passed into the Hit initializer\n D = ray.direction\n E = ray.origin\n C = self.center\n R = self.radius\n B = 2*np.dot(D, E-C)\n A = np.dot(D, D)\n min_t = ray.start\n max_t = ray.end\n\n discriminant = B ** 2 - 4 * A * (np.dot(E-C, E-C)-R**2)\n\n if discriminant < 0:\n return no_hit\n\n t0 = (-1*B - np.sqrt(discriminant)) / (2*A)\n t1 = (-1*B + np.sqrt(discriminant)) / (2*A)\n\n if (t0 >= min_t and t0 <= max_t and t0 <= t1):\n t = t0\n elif (t1 >= min_t and t1 <= max_t):\n t = t1\n else:\n return no_hit\n\n P = E + t * D\n unit_normal = (P - C) / R\n d_hat = normalize(P - C)\n u = 0.5 + (np.arctan2(d_hat[0], d_hat[2])) / (2 * np.pi)\n v = 0.5 + (np.arcsin(d_hat[1])) / np.pi\n\n return Hit(t, P, unit_normal, vec([u, v]), self.material)", "def intersect(self, ray):\n # TODO A5 (Step3 and Step4) implement this function\n # For step 4, check if uvs and normals are not None (respectively)\n # If so, then interpolate them\n\n # batch_intersect returns t, beta, gamma, i\n posns = self.posns\n uvs = self.uvs\n inds = self.inds\n normals = self.normals\n t, beta, gamma, i = batch_intersect(posns[inds[:, :]], ray)\n if (t == np.inf):\n return no_hit\n vs = posns[inds[i, :]]\n P = ray.origin + t * ray.direction\n\n if (t == np.inf):\n return no_hit\n else:\n\n alpha = 1 - beta - gamma\n\n if uvs is not None:\n\n uv0 = uvs[inds[i][0]]\n uv1 = uvs[inds[i][1]]\n uv2 = uvs[inds[i][2]]\n\n uv = alpha * uv0 + beta * uv1 + gamma * uv2\n\n else:\n\n A = np.linalg.norm(np.cross(vs[1] - vs[0], vs[2] - vs[0])) / 2\n areaA = np.linalg.norm(np.cross(vs[1] - P, vs[2] - P)) / 2\n areaB = np.linalg.norm(np.cross(vs[0] - P, vs[2] - P)) / 2\n areaC = np.linalg.norm(np.cross(vs[0] - P, vs[1] - P)) / 2\n u = areaB / A\n v = areaC / A\n uv = vec([u, v])\n\n if normals is not None:\n\n n0 = normals[inds[i][0]]\n n1 = normals[inds[i][1]]\n n2 = normals[inds[i][2]]\n\n unit_normal = normalize(alpha * n0 + beta * n1 + gamma * n2)\n\n else:\n unit_normal = normalize(np.cross(vs[0] - vs[2], vs[1] - vs[2]))\n\n return Hit(t, P, unit_normal, uv, self.material)", "def linesegment_plane_intersection(self, p0,p1,point,normal): # only returns lines...intersections through the segment end points are ignored\n\t\tp0dot=numpy.dot(p0-point,normal)\n\t\tp1dot=numpy.dot(p1-point,normal)\n\t\tif (p0dot>0 and p1dot<0) or (p0dot<0 and p1dot>0): \n\t\t\t# if the dot products have opposing signs, then the line intersects the plane\n\t\t\treturn True,p0+(p1-p0)*abs(p0dot)/(abs(p0dot)+abs(p1dot))\n\t\telse:\n\t\t\treturn False", "def raySegmentIntersectAB(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def _get_intersection(self, ray):\n\n intersection = None\n for obj in self.objects:\n dist = obj.intersects(ray)\n if dist is not None and \\\n (intersection is None or dist < intersection[1]):\n intersection = obj, dist\n\n return intersection", "def ray_segment_intersect(p_ray, d_ray, seg):\n d_seg = seg[1] - seg[0]\n\n t_max = np.linalg.norm(d_seg)\n\n d_seg = d_seg / t_max\n d_ray = d_ray / np.linalg.norm(d_ray)\n\n D = np.stack([d_ray, -d_seg], axis=1)\n b = seg[0] - p_ray\n\n try:\n T = np.linalg.solve(D, b)\n except np.linalg.LinAlgError as e:\n # D is a singular matrix, lines are parallel\n return None\n\n # 0 <= T[1] < t_max because if the ray intersects perfectly with vertices then they will\n # T[0] > 0 ray shoots only in one direction\n # be included twice because they are the end and the beginning of a two segments\n if 0 <= T[1] < t_max and T[0] > 0 and np.allclose(np.dot(D, T), b):\n return seg[0] + d_seg * T[1]\n else:\n return None", "def intersection(self, ray):\n \n points = []\n intersection_objects = []\n for obj in self.objects:\n intersection = obj.shape.intersection(ray)\n if intersection != None:\n for pt in intersection:\n points.append(pt)\n intersection_objects.append(obj)\n \n if len(points) == 0:\n return None, None\n return points, intersection_objects", "def general_plane_intersection(n_a, da, n_b, db):\n \n # https://en.wikipedia.org/wiki/Intersection_curve\n \n n_a = np.array(n_a)\n n_b = np.array(n_b)\n da = np.array(da)\n db = np.array(db)\n \n l_v = np.cross(n_a, n_b)\n norm_l = sqrt(np.dot(l_v, l_v))\n if norm_l == 0:\n return None\n else:\n l_v /= norm_l\n aa = np.dot(n_a, n_a)\n bb = np.dot(n_b, n_b)\n ab = np.dot(n_a, n_b)\n d_ = 1./(aa*bb - ab*ab)\n l_0 = (da*bb - db*ab)*d_*n_a + (db*aa - da*ab)*d_*n_b\n \n return l_v, l_0", "def triangle_plane_intersection(self,p0,p1,p2,point,normal):\n\t\ttol=0.00001\n\t\n\t\t# handle all of the stupid cases before we do costly math\n\t\n\t\t#basic stuff\n\t\tp0dp=numpy.dot(p0-point,normal)\n\t\tp1dp=numpy.dot(p1-point,normal)\n\t\tp2dp=numpy.dot(p2-point,normal)\n\t\tp0ip=numpy.abs(p0dp)<tol # p0 in-plane\n\t\tp1ip=numpy.abs(p1dp)<tol # p1 in-plane\n\t\tp2ip=numpy.abs(p2dp)<tol # p02in-plane\n\n\t\t# are all vertices of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(p2ip): # yes, triangle is in the plane\n\t\t\treturn [p0,p1,p2]\n\t\n\t\t# are all vertices of the triangle on the same side?\n\t\tif (not(p0ip))&(not(p1ip))&(not(p2ip))&(numpy.sign(p0dp)==numpy.sign(p1dp))&(numpy.sign(p0dp)==numpy.sign(p2dp)): # yup, they are all on the same side\n\t\t\treturn []\n\t\n\t\t# is one vertex in the plane?\n\t\tif (p0ip)&(not(p1ip))&(not(p2ip)): #just p0 in plane\n\t\t\treturn [p0]\n\t\telif (not(p0ip))&(p1ip)&(not(p2ip)): #just p1 in plane\n\t\t\treturn [p1]\n\t\telif (not(p0ip))&(not(p1ip))&(p2ip): #just p2 in plane\n\t\t\treturn [p2]\n\t\n\t\t# is one line of the triangle in the plane?\n\t\tif (p0ip)&(p1ip)&(not(p2ip)): #L1 in plane\n\t\t\treturn [p0,p1]\n\t\telif (not(p0ip))&(p1ip)&(p2ip): #L2 in plane\n\t\t\treturn [p1,p2]\n\t\telif (p0ip)&(not(p1ip))&(p2ip): #L3 in plane\n\t\t\treturn [p0,p2]\n\t\n\t\t# if we have gotten this far, we have to actually calculate intersections\n\t\tif numpy.sign(p0dp)==numpy.sign(p1dp):\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l2b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l2i,l3i]\n\t\telif numpy.sign(p2dp)==numpy.sign(p1dp):\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl3b,l3i=self.linesegment_plane_intersection(p0,p2,point,normal)\n\t\t\tif (l1b)&(l3b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l3i]\n\t\telse:\n\t\t\tl1b,l1i=self.linesegment_plane_intersection(p0,p1,point,normal)\n\t\t\tl2b,l2i=self.linesegment_plane_intersection(p1,p2,point,normal)\n\t\t\tif (l1b)&(l2b): #sanity check only, should always be true\n\t\t\t\treturn [l1i,l2i]\n\t\n\t\t# If the function makes it this far, I have no idea what is going on.\n\t\treturn \"bananna pants\"", "def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled", "def intersectsRay(self, ray):\n pass", "def is_intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n return True\n return False", "def intersect(self, ray):\n # TODO A5 copy your implementation from A4\n surfaces = self.surfs\n\n min_t = np.inf\n i = no_hit\n\n for s in surfaces:\n intersect = s.intersect(ray)\n if (intersect.t < min_t):\n min_t = intersect.t\n i = intersect\n return i", "def intersection(self, segment):\n p0, p1 = segment.p0, segment.p1\n\n # x = t*(p1 - p0) + p0\n # n'*(x - origin) = 0\n # combine to get\n # n'*(t*(p1-p0) + p0 - origin) = 0\n # solve for t\n\n v = p1 - p0\n w = p0 - self.origin\n t = -np.dot(self.normal, w)/np.dot(self.normal, v)\n\n if 0-epsilon <= t <= 1+epsilon:\n return t*(p1-p0) + p0\n else:\n return None", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]", "def ray_trace(x, y, poly):\n\n @vectorize([bool_(float64, float64)])\n def ray(x, y):\n # where xy is a coordinate\n n = len(poly)\n inside = False\n p2x = 0.0\n p2y = 0.0\n xints = 0.0\n p1x, p1y = poly[0]\n for i in range(n + 1):\n p2x, p2y = poly[i % n]\n if y > min(p1y, p2y):\n if y <= max(p1y, p2y):\n if x <= max(p1x, p2x):\n if p1y != p2y:\n xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x\n if p1x == p2x or x <= xints:\n inside = not inside\n p1x, p1y = p2x, p2y\n return inside\n\n return ray(x, y)", "def intersect(self,ray:Ray):\n o = ray.o #ray origin\n d = ray.d #ray destination\n oc = o - self.center #vector from ray origin to center\n b = 2*(oc*d)\n c = oc*oc - self.r**2\n disc = b**2-4*c\n if disc<0:\n return False,-1\n else:\n disc **=0.5\n t0 = -b-disc\n t1 = -b+disc\n return True,max(t0,t1)", "def is_intersecting(self, ray):\n\n intersecting_point = self._sympy_plane.intersection(ray.sympy_line)[0]\n\n if 'x' in self._name:\n\n if self._within_y_bounds(intersecting_point.y) and self._within_z_bounds(intersecting_point.z):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n\n\n elif 'y' in self._name:\n\n if self._within_x_bounds(intersecting_point.x) and self._within_z_bounds(intersecting_point.z):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n\n\n elif 'z' in self._name:\n\n if self._within_y_bounds(intersecting_point.y) and self._within_x_bounds(intersecting_point.x):\n return True, np.array(map(float, [intersecting_point.x, intersecting_point.y, intersecting_point.z]))\n\n return False, None", "def ray(self, pixel):\n # Ensure pixel is in homogenous coordinates\n if len(pixel) == 2:\n pixel = np.vstack((pixel, [1]))\n\n ray = project(self._camera.P_pinv, pixel.astype(np.float32))\n assert ray.shape == (4, 1)\n\n return self._camera.center, ray", "def trail_length_from_plane_intersection_numpy(point, vector, plane, tol=1e-6):\n origin, normal = plane\n cos_nv = np.dot(normal, normalize_vector_numpy(vector))\n\n if np.abs(cos_nv) < tol:\n return\n\n oa = origin - point\n cos_noa = np.dot(normal, oa)\n\n return cos_noa / cos_nv", "def intersectsAB(self, ray):\n v1 = ray.origin - self.pointA\n v2 = self.pointB - self.pointA\n v3 = Point(-ray.direction.y, ray.direction.x)\n dot = v2.dot(v3)\n if (abs(dot) < 0.000001):\n return None\n t1 = v2.cross(v1) / dot\n t2 = v1.dot(v3) / dot\n if (t1 >= 0.0 and (t2 >= 0.0 and t2 <= 1.0)):\n return t1\n return None", "def intersection(x, y, f, p):", "def closest_point_on_plane(point, plane):\n base, normal = plane\n x, y, z = base\n a, b, c = normalize_vector(normal)\n x1, y1, z1 = point\n d = a * x + b * y + c * z\n k = (a * x1 + b * y1 + c * z1 - d) / (a**2 + b**2 + c**2)\n return [x1 - k * a,\n y1 - k * b,\n z1 - k * c]", "def compute_intersections(\r\n self, plane: Plane, directions: List[np.ndarray]\r\n ) -> List[np.ndarray]:\r\n return [\r\n line_plane_intersection(\r\n plane_origin=plane.origin,\r\n plane_normal=plane.normal,\r\n line_direction=direction,\r\n )\r\n for direction in directions\r\n ]", "def intersect_plane(self, other: Plane, **kwargs) -> Line:\n if self.normal.is_parallel(other.normal, **kwargs):\n raise ValueError(\"The planes must not be parallel.\")\n\n array_normals_stacked = np.vstack((self.normal, other.normal))\n\n # Construct a matrix for a linear system.\n array_00 = 2 * np.eye(3)\n array_01 = array_normals_stacked.T\n array_10 = array_normals_stacked\n array_11 = np.zeros((2, 2))\n matrix = np.block([[array_00, array_01], [array_10, array_11]])\n\n dot_a = np.dot(self.point, self.normal)\n dot_b = np.dot(other.point, other.normal)\n array_y = np.array([0, 0, 0, dot_a, dot_b])\n\n # Solve the linear system.\n solution = np.linalg.solve(matrix, array_y)\n\n point_line = Point(solution[:3])\n direction_line = self.normal.cross(other.normal)\n\n return Line(point_line, direction_line)", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)", "def ray_at(self, O, t):\n point = self.float_mul(t).plus(O)\n return point", "def intersect(self, rays):\n raise NotImplementedError", "def intersects(self, ray):\n sphere_to_ray = ray.origin - self.center\n a = 1\n b = 2 * ray.direction.dot_product(sphere_to_ray)\n c = sphere_to_ray.dot_product(sphere_to_ray) - self.radius * self.radius\n discriminant = b * b - 4 * a * c\n\n if discriminant >= 0:\n dist = (-b - sqrt(discriminant)) / 2\n if dist > 0:\n return dist\n\n return None", "def plane_dem_intersection(\n srcPlaneAttitude: Plane,\n srcPt: Point,\n geo_array: GeoArray,\n level_ndx: int=0) -> List[Point]:\n\n # dem values as a Numpy array\n\n q_d = geo_array.level(\n level_ndx=level_ndx)\n\n # row and column numbers of the dem\n\n row_num, col_num = q_d.shape\n\n # plane closure that, given (x, y), derive z\n\n plane_z_closure = srcPlaneAttitude.closure_plane_from_geo(srcPt)\n\n # plane elevations at grid cell centers\n\n q_p = array_from_function(\n row_num=row_num,\n col_num=col_num,\n geotransform=geo_array.gt,\n z_transfer_func=plane_z_closure)\n\n index_multiplier = 100 # sufficiently large value to ensure a precise slope values\n\n mi_p = xyarr2segmentslope(\n xy2z_func=plane_z_closure,\n arrij2xy_func=geo_array.ijArrToxy,\n i=index_multiplier,\n j=0) * np.ones((row_num, col_num))\n\n mj_p = xyarr2segmentslope(\n xy2z_func=plane_z_closure,\n arrij2xy_func=geo_array.ijArrToxy,\n i=0,\n j=index_multiplier) * np.ones((row_num, col_num))\n\n # 2D array of DEM segment parameters\n\n cell_size_j, cell_size_i = geo_array.geotransf_cell_sizes()\n\n mj_d = grad_j(\n fld=q_d,\n cell_size_j=cell_size_j)\n\n mi_d = grad_iminus(\n fld=q_d,\n cell_size_i=cell_size_i)\n\n # intersection points\n\n intersection_pts_j = segment_intersections_array(\n m_arr1=mj_d,\n m_arr2=mj_p,\n q_arr1=q_d,\n q_arr2=q_p,\n cell_size=cell_size_j)\n\n intersection_pts_j = arrayTo3DPts(\n direction='j',\n arr=intersection_pts_j,\n ij2xy_func=geo_array.ijArrToxy,\n xy2z_func=plane_z_closure)\n\n intersection_pts_i = segment_intersections_array(\n m_arr1=mi_d,\n m_arr2=mi_p,\n q_arr1=q_d,\n q_arr2=q_p,\n cell_size=cell_size_i)\n\n # filter out i-direction points coincident with those of j-direction\n\n #intersection_pts_i = intersection_pts_i[np.where( intersection_pts_i > 1e10-6 )]\n\n intersection_pts_i = arrayTo3DPts(\n direction='i',\n arr=intersection_pts_i,\n ij2xy_func=geo_array.ijArrToxy,\n xy2z_func=plane_z_closure)\n\n unique_pts = intersection_pts_j + intersection_pts_i\n\n return unique_pts", "def reflect_line_plane(line, plane, epsilon=1e-6):\n intx_pt = intersection_line_plane(line, plane, epsilon)\n if not intx_pt:\n return None\n vec_line = subtract_vectors(line[1], line[0])\n vec_reflect = mirror_vector_vector(vec_line, plane[1])\n if angle_smallest_vectors(plane[1], vec_reflect) > 0.5 * pi:\n return None\n return [intx_pt, add_vectors(intx_pt, vec_reflect)]", "def intersection(self, segment):\n intersection = self.hyperplane.intersection(segment)\n if intersection is not None and np.linalg.norm(intersection - self.closest_point_to(intersection)) < epsilon:\n return intersection\n\n return None", "def intersects(self, ray):\n\n sphere_to_ray = ray.origin - self.origin\n b = 2 * ray.direction * sphere_to_ray\n c = sphere_to_ray ** 2 - self.radius ** 2\n discriminant = b ** 2 - 4 * c\n\n if discriminant >= 0:\n dist = (-b - math.sqrt(discriminant)) / 2\n if dist > 0:\n return dist", "def rays(self):\n pixels = np.array([\n [u, v, 1.]\n for u, v in product(range(self.width), range(self.height))\n ], dtype=np.int32).T\n rays = project(self.camera.P_pinv, pixels)\n\n return self._camera.center, rays.T", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def intersect(self, rays):\n has_segments = bool(self._merged_segments)\n has_arcs = bool(self._merged_arcs)\n \n seg = {}\n arc = {}\n \n if has_segments:\n # do segment intersection\n seg[\"x\"], seg[\"y\"], seg[\"valid\"], seg[\"ray_u\"], seg[\"segment_u\"], \\\n seg[\"gather_ray\"], seg[\"gather_segment\"] = self._segment_intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n self._merged_segments[\"x_start\"],\n self._merged_segments[\"y_start\"],\n self._merged_segments[\"x_end\"],\n self._merged_segments[\"y_end\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n seg[\"norm\"] = tf.gather(\n tf.atan2(\n self._merged_segments[\"y_end\"] - self._merged_segments[\"y_start\"],\n self._merged_segments[\"x_end\"] - self._merged_segments[\"x_start\"]\n ) + PI/2.0,\n seg[\"gather_segment\"]\n )\n \n if has_arcs:\n # do arc intersection\n arc[\"x\"], arc[\"y\"], arc[\"valid\"], arc[\"ray_u\"], arc[\"arc_u\"], \\\n arc[\"gather_ray\"], arc[\"gather_arc\"] = self._arc_intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n self._merged_arcs[\"x_center\"],\n self._merged_arcs[\"y_center\"],\n self._merged_arcs[\"angle_start\"],\n self._merged_arcs[\"angle_end\"],\n self._merged_arcs[\"radius\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n arc[\"norm\"] = self._get_arc_norm(\n self._merged_arcs[\"radius\"], arc[\"arc_u\"], arc[\"gather_arc\"]\n )\n \n if has_segments and has_arcs:\n # has arcs and segments, so we need to chooose between segment and arc \n # intersections.\n seg[\"valid\"], arc[\"valid\"] = self._seg_or_arc(\n seg[\"ray_u\"], arc[\"ray_u\"], seg[\"valid\"], arc[\"valid\"]\n )\n \n return seg, arc", "def project_point_plane(point, plane):\n base, normal = plane\n normal = normalize_vector(normal)\n vector = subtract_vectors(point, base)\n snormal = scale_vector(normal, dot_vectors(vector, normal))\n return subtract_vectors(point, snormal)", "def intersection_ray_ray_3d(ray1: Tuple[Vector, Vector],\n ray2: Tuple[Vector, Vector], abs_tol=1e-10) -> \\\nSequence[\n Vector]:\n # source: http://www.realtimerendering.com/intersections.html#I304\n o1, p1 = ray1\n d1 = (p1 - o1).normalize()\n o2, p2 = ray2\n d2 = (p2 - o2).normalize()\n d1xd2 = d1.cross(d2)\n denominator = d1xd2.magnitude_square\n if math.isclose(denominator, 0., abs_tol=abs_tol):\n # ray1 is parallel to ray2\n return tuple()\n else:\n o2_o1 = o2 - o1\n det1 = _determinant(o2_o1, d2, d1xd2)\n det2 = _determinant(o2_o1, d1, d1xd2)\n p1 = o1 + d1 * (det1 / denominator)\n p2 = o2 + d2 * (det2 / denominator)\n if p1.isclose(p2, abs_tol=abs_tol):\n # ray1 and ray2 have an intersection point\n return p1,\n else:\n # ray1 and ray2 do not have an intersection point,\n # p1 and p2 are the points of closest approach on each ray\n return p1, p2", "def ray_poly_intersect(p_ray, d_ray, poly):\n ret = []\n\n for s in poly.segments:\n p = ray_segment_intersect(p_ray, d_ray, s)\n\n if p is not None:\n ret.append(p)\n\n return ret", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def intersect(self, rays): \n result = {}\n \n if bool(self._merged):\n result[\"x\"], result[\"y\"], result[\"z\"], result[\"valid\"], result[\"ray_u\"], \\\n result[\"trig_u\"], result[\"trig_v\"], result[\"gather_ray\"], \\\n result[\"gather_trig\"] = self._intersection(\n rays[\"x_start\"],\n rays[\"y_start\"],\n rays[\"z_start\"],\n rays[\"x_end\"],\n rays[\"y_end\"],\n rays[\"z_end\"],\n self._merged[\"xp\"],\n self._merged[\"yp\"],\n self._merged[\"zp\"],\n self._merged[\"x1\"],\n self._merged[\"y1\"],\n self._merged[\"z1\"],\n self._merged[\"x2\"],\n self._merged[\"y2\"],\n self._merged[\"z2\"],\n self.intersect_epsilion,\n self.size_epsilion,\n self.ray_start_epsilion\n )\n \n result[\"norm\"] = tf.gather(\n self._merged[\"norm\"],\n result[\"gather_trig\"]\n )\n \n return result", "def plane_equation(point_a, point_b, point_c):\n v1 = np.subtract(point_a, point_c)\n v2 = np.subtract(point_a, point_b)\n normal = np.cross(v1, v2)\n # print 'b4 norm', normal\n unit_normal = norm_vect(normal)\n # print 'unityyy', unit_normal\n return unit_normal", "def planeIndex(plane, center = False):\n return (planeBase(center = center) * np.array(plane)).sum()", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)", "def test_point_on_plane(self, point, plane):\n _dist = point.dot(plane[:3]) + plane[3]\n if _dist <= epsilon:\n print('OK => point on plane')\n else:\n print('NO => point not on plane')", "def is_intersection_segment_plane(segment, plane, epsilon=1e-6):\n pt1 = segment[0]\n pt2 = segment[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = - dot_vectors(p_norm, v2) / dot\n if fac > 0. and fac < 1.:\n return True\n return False\n else:\n return False", "def plane_equation(p1, p2, p3):\n a1 = p2[0] - p1[0]\n b1 = p2[1] - p1[1]\n c1 = p2[2] - p1[2]\n a2 = p3[0] - p1[0]\n b2 = p3[1] - p1[1]\n c2 = p3[2] - p1[2]\n a = b1 * c2 - b2 * c1\n b = a2 * c1 - a1 * c2\n c = a1 * b2 - b1 * a2\n # Points are collinear\n if (abs(a) < 1e-6) and (abs(b) < 1e-6) and (abs(c) < 1e-6):\n return None\n # All clear\n d = (- a * p1[0] - b * p1[1] - c * p1[2])\n return a, b, c, d", "def find_circle_line_intersection(P0, r0, P1):\n\t\n\tx_offset, y_offset = P0\n\tx0, y0 = 0, 0\n\tx1, y1 = P1\n\n\tx1, y1 = x1 - x_offset, y1 - y_offset\n\n\tdx = x1 - x0\n\tdy = y1 - y0\n\tdr = math.sqrt(dx*dx + dy*dy)\n\n\tD = x0*y1 - x1*y0\n\n\tdelta0 = r0*r0*dr*dr - D*D\n\n\tx2 = (D*dy + sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty2 = (D*dx + math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx3 = (D*dy - sgn(dy)*dx*math.sqrt(delta0)) / (dr*dr)\n\ty3 = (D*dx - math.fabs(dy)*math.sqrt(delta0)) / (dr*dr)\n\n\tx2 += x_offset\n\tx3 += x_offset\n\ty2 += y_offset\n\ty3 += y_offset\n\n\treturn np.array([[x2, y2], [x3, y3]])", "def calculate_points(self):\n\n v0 = np.vstack(np.sign(self.distance_from_plane[:, 0] *\n self.distance_from_plane[:, 1]))\n v1 = np.vstack(np.sign(self.distance_from_plane[:, 1] *\n self.distance_from_plane[:, 2]))\n v2 = np.vstack(np.sign(self.distance_from_plane[:, 2] *\n self.distance_from_plane[:, 0]))\n\n goes_through = np.concatenate([v0, v1, v2], 1)\n self.num_points = (np.sum(goes_through == 0) * 2 +\n np.sum(goes_through == -1))\n\n self.slice_points = np.zeros((self.num_points, 3))\n curr_point = 0\n\n for i in range(len(goes_through)):\n for x in range(len(goes_through[i])):\n if goes_through[i][x] == -1:\n # The line goes through the plane\n # There is a point which lies in the plane, which is r\n # amounts of vector V away from point P0\n p0 = self.obj.vectors[i][x]\n if x == 0 or x == 1:\n V = self.obj.vectors[i][x]-self.obj.vectors[i][x+1]\n else:\n V = self.obj.vectors[i][x]-self.obj.vectors[i][0]\n d = (np.dot((self.plane_origin-p0), self.plane_normal) /\n (np.dot(V, self.plane_normal)))\n P = p0 + d*V\n self.slice_points[curr_point] = P\n curr_point += 1\n\n if goes_through[i][x] == 0:\n if x == 0 or x == 1:\n self.slice_points[curr_point] = self.obj.vectors[i][x]\n self.slice_points[curr_point+1] = self.obj.vectors[i][x+1]\n else:\n self.slice_points[curr_point] = self.obj.vectors[i][x]\n self.slice_points[curr_point+1] = self.obj.vectors[i][0]\n\n self.slice_points = np.round(self.slice_points, decimals=5)\n self.slice_points = np.unique(self.slice_points, axis=0)\n \n return None", "def intersection_line_triangle(line, triangle, epsilon=1e-6):\n a, b, c = triangle\n v1 = subtract_vectors(line[1], line[0])\n p1 = line[0]\n # Find vectors for two edges sharing V1\n e1 = subtract_vectors(b, a)\n e2 = subtract_vectors(c, a)\n # Begin calculating determinant - also used to calculate u parameter\n p = cross_vectors(v1, e2)\n # if determinant is near zero, ray lies in plane of triangle\n det = dot_vectors(e1, p)\n # NOT CULLING\n if(det > - epsilon and det < epsilon):\n return None\n inv_det = 1.0 / det\n # calculate distance from V1 to ray origin\n t = subtract_vectors(p1, a)\n # Calculate u parameter and make_blocks bound\n u = dot_vectors(t, p) * inv_det\n # The intersection lies outside of the triangle\n if(u < 0.0 or u > 1.0):\n return None\n # Prepare to make_blocks v parameter\n q = cross_vectors(t, e1)\n # Calculate V parameter and make_blocks bound\n v = dot_vectors(v1, q) * inv_det\n # The intersection lies outside of the triangle\n if(v < 0.0 or u + v > 1.0):\n return None\n t = dot_vectors(e2, q) * inv_det\n if t > epsilon:\n return add_vectors(p1, scale_vector(v1, t))\n # No hit\n return None", "def plane_distance(p, plane):\n x, y, z = p\n A, B, C, D = plane\n return A*x + B*y + C*z + D", "def small_circle_intersection(axis_a, angle_a, axis_b, angle_b):\n line = general_plane_intersection(axis_a, cos(angle_a),\n axis_b, cos(angle_b))\n if line is None:\n return ()\n l_v, l_0 = line\n # https://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection\n b = 2*l_v.dot(l_0)\n delta = b*b - 4*(l_0.dot(l_0) - 1)\n # Should the answers be normalized?\n if delta < 0:\n return ()\n elif delta == 0:\n return -b/2.,\n else:\n sqrt_delta = sqrt(delta)\n return l_0 + l_v*(-b - sqrt_delta)/2., l_0 + l_v*(-b + sqrt_delta)/2.", "def intersection(line1, line2):\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]", "def intersection_ring(self, q_total):\n \n # WARNING: This ignores the effect of the incident angle\n \n \n\n # This is a point that intersects the Ewald sphere\n # (if incident_angle = 0)\n theta = np.arcsin(q_total/(2*self.get_k()))\n qx, qy, qz = 0, -q_total*np.sin(theta), q_total*np.cos(theta)\n \n #qx, qy, qz = 0, 0, q_total\n \n qxs = []\n qys = []\n qzs = []\n \n for rot_angle in np.linspace(0, 2*np.pi, num=200):\n qx_rot = qx*np.cos(rot_angle) + qz*np.sin(rot_angle)\n qy_rot = qy\n qz_rot = -qx*np.sin(rot_angle) + qz*np.cos(rot_angle)\n qxy_rot = np.sqrt(np.square(qx_rot)+np.square(qy_rot))\n if qx_rot<0:\n qxy_rot *= -1\n \n qxs.append( qx_rot )\n qys.append( qy_rot )\n qzs.append( qz_rot )\n \n return qxs, qys, qzs", "def _circle_intersection(self, circle, point):\n dist = euclidean_distance((circle[0], circle[1]), point) - circle[2]\n vun = vec2d((circle[0] - point[0]), (circle[1] - point[1]))\n v = vun.normalized()\n\n x, y = (point[0] + dist * v.x), (point[0] + dist * v.x)\n\n return dist, (x, y)", "def get_line_circle_intersections(A, B, C, r):\n Lx = B[0] - A[0]\n Ly = B[1] - A[1]\n Lz = B[2] - A[2]\n\n # stranger things\n D = Lx**2 + Ly**2\n E = 2 * ( Lx * (A[0] - C[0]) + Ly * (A[1] - C[1]) )\n F = (\n (A[0] - C[0])**2\n + (A[1] - C[1])**2\n - r**2\n )\n det = E**2 - 4 * D * F\n \n # declare null vectors\n P1 = [0, 0, 0]\n P2 = [0, 0, 0]\n t1 = t2 = None\n eps = .00001\n if ( not (D <= eps) or (det < 0) ):\n if det == 0:\n print \"tangential intersection found\",\n t1 = t2 = -E / (2*D)\n else:\n print \"pass-through intersection found\",\n t1 = ( (-E + math.sqrt(det)) / (2 * D) )\n t2 = ( (-E - math.sqrt(det)) / (2 * D) )\n P1[0] = A[0] + t1 * Lx\n P1[1] = A[1] + t1 * Ly\n P1[2] = A[2] + t1 * Lz\n P2[0] = A[0] + t2 * Lx\n P2[1] = A[1] + t2 * Ly\n P2[2] = A[2] + t2 * Lz\n else:\n print \"no intersections are available\",\n\n return P1, P2", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t", "def ray(self):\n return self._ray", "def PlaneSubtraction(data, direction='xy', xdim=20, ydim=20):\n img = 1*data\n dy, dx = img.shape\n x = np.linspace(0, xdim, dx)\n y = np.linspace(0, ydim, dy)\n DX, DY = np.meshgrid(x, y)\n PX = []\n PY = []\n\n for i, j in zip(np.arange(dy), np.arange(dx)):\n lx = img[i]\n ly = img[:, j]\n maskx = np.isnan(lx)\n masky = np.isnan(ly)\n\n if len(lx[maskx]) < len(lx):\n s = np.polyfit(x[~maskx], lx[~maskx], 1)\n PX.append(s)\n\n if len(ly[masky]) < len(ly):\n s = np.polyfit(y[~masky], ly[~masky], 1)\n PY.append(s)\n\n px = np.nanmean(PX, axis=0)\n py = np.nanmean(PY, axis=0)\n print(\"x - slope: %.2e, y - slope: %.2e\" % (px[0], py[0]))\n print(\"calculate planes\")\n xplane = np.polyval(px, DX)\n yplane = np.polyval(py, DY)\n\n if direction == 'x':\n print('x plane subtraction')\n correction = xplane\n elif direction == 'y':\n print('y plane subtraction')\n correction = yplane\n else:\n print('x-y plane subtraction')\n correction = xplane + yplane\n\n corrected = data - correction\n corrected -= np.nanmin(corrected)\n return corrected", "def intersect_point(self,m1,c1,m2,c2):\n\n x = (c2 - c1)/(m1 - m2)\n y = m1*x + c1\n return x, y", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def batch_mesh_contains_points(\n ray_origins, # point cloud as origin of rays\n obj_triangles,\n direction=torch.Tensor([0.4395064455, 0.617598629942, 0.652231566745]),\n):\n tol_thresh = 0.0000001\n batch_size = obj_triangles.shape[0]\n triangle_nb = obj_triangles.shape[1]\n point_nb = ray_origins.shape[1]\n\n # Batch dim and triangle dim will flattened together\n batch_points_size = batch_size * triangle_nb\n # Direction is random but shared\n v0, v1, v2 = obj_triangles[:, :, 0], obj_triangles[:, :, 1], obj_triangles[:, :, 2]\n # Get edges\n v0v1 = v1 - v0\n v0v2 = v2 - v0\n\n direction = direction.to(ray_origins.device)\n # Expand needed vectors\n batch_direction = direction.view(1, 1, 3).expand(batch_size, triangle_nb, 3)\n\n # Compute ray/triangle intersections\n pvec = torch.cross(batch_direction, v0v2, dim=2)\n dets = torch.bmm(\n v0v1.view(batch_points_size, 1, 3), pvec.view(batch_points_size, 3, 1)\n ).view(batch_size, triangle_nb)\n\n # Check if ray and triangle are parallel\n parallel = abs(dets) < tol_thresh\n invdet = 1 / (dets + 0.1 * tol_thresh)\n\n # Repeat mesh info as many times as there are rays\n triangle_nb = v0.shape[1]\n v0 = v0.repeat(1, point_nb, 1)\n v0v1 = v0v1.repeat(1, point_nb, 1)\n v0v2 = v0v2.repeat(1, point_nb, 1)\n hand_verts_repeated = (\n ray_origins.view(batch_size, point_nb, 1, 3)\n .repeat(1, 1, triangle_nb, 1)\n .view(ray_origins.shape[0], triangle_nb * point_nb, 3)\n )\n pvec = pvec.repeat(1, point_nb, 1)\n invdet = invdet.repeat(1, point_nb)\n tvec = hand_verts_repeated - v0\n u_val = (\n torch.bmm(\n tvec.view(batch_size * tvec.shape[1], 1, 3),\n pvec.view(batch_size * tvec.shape[1], 3, 1),\n ).view(batch_size, tvec.shape[1])\n * invdet\n )\n # Check ray intersects inside triangle\n u_correct = (u_val > 0) * (u_val < 1)\n qvec = torch.cross(tvec, v0v1, dim=2)\n\n batch_direction = batch_direction.repeat(1, point_nb, 1)\n v_val = (\n torch.bmm(\n batch_direction.view(batch_size * qvec.shape[1], 1, 3),\n qvec.view(batch_size * qvec.shape[1], 3, 1),\n ).view(batch_size, qvec.shape[1])\n * invdet\n )\n v_correct = (v_val > 0) * (u_val + v_val < 1)\n t = (\n torch.bmm(\n v0v2.view(batch_size * qvec.shape[1], 1, 3),\n qvec.view(batch_size * qvec.shape[1], 3, 1),\n ).view(batch_size, qvec.shape[1])\n * invdet\n )\n # Check triangle is in front of ray_origin along ray direction\n t_pos = t >= tol_thresh\n parallel = parallel.repeat(1, point_nb)\n # # Check that all intersection conditions are met\n try:\n not_parallel = 1 - parallel\n except:\n not_parallel = parallel==False\n final_inter = v_correct * u_correct * not_parallel * t_pos\n # Reshape batch point/vertices intersection matrix\n # final_intersections[batch_idx, point_idx, triangle_idx] == 1 means ray\n # intersects triangle\n final_intersections = final_inter.view(batch_size, point_nb, triangle_nb)\n # Check if intersection number accross mesh is odd to determine if point is\n # outside of mesh\n exterior = final_intersections.sum(2) % 2 == 0\n return exterior", "def distance_from_xy_plane(p,r):\n return np.abs(p[2]-r[2])", "def project_onto_plane(vect):\n x, y, z = vect\n \n return (x, y, 0.)", "def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z", "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]", "def find_intersection(center0, direction0, center1, direction1):\n # c0 + d0 t = c1 + d1 s\n # (-d0) t + (d1) s = c0 - c1\n # [-d0, d1] [t,s]^T = delta\n A = np.array([-direction0, direction1]).T\n delta = center0 - center1\n # Unpack M = A^T * A:\n # [[a, b],\n # [c, d]]\n (a, b), (c, d) = A.T.dot(A)\n # Inverse of M:\n # 1/ det(M) [[ d, -b],\n # [-c, a]]\n M_inv = np.array([[d, -b], [-c, a]]) / (a * d - b * c)\n t, s = M_inv.dot(A.T.dot(delta))\n return t, s", "def intersect_triangle(v1, v2, v3, pos):\r\n #calc normal from two edge vectors v2-v1 and v3-v1\r\n nVec = cross(subtract(v2, v1), subtract(v3, v1))\r\n #equation of plane: Ax + By + Cz = kVal where A,B,C are components of normal. x,y,z for point v1 to find kVal\r\n kVal = dot(nVec,v1)\r\n #return y val i.e. y = (kVal - Ax - Cz)/B\r\n return (kVal - nVec[0]*pos[0] - nVec[2]*pos[2])/nVec[1]", "def FindCurveIntersectionWithMesh(CurveObj, MeshObj):\n\n CurvePointsAsVectorsArray = [p.co for p in CurveObj.data.splines[0].points]\n\n #convert the points to same space as the mesh\n CurvePointsAsVectorsArray = [ CurveSpaceVectorToMeshSpace(CurveObj, p, MeshObj ) for p in CurvePointsAsVectorsArray ]\n \n # find the last point on the curve that is inside the mesh\n # iterate until i find how many points starting from first point are inside\n # and use direction between the last inside point, and the next point after\n # if only root point is inside mesh, always just use the second point\n LastInsideIndex = 0\n while(\n IsPointInsideMesh(MeshObj, CurvePointsAsVectorsArray[LastInsideIndex].xyz) \n or\n IsPointInsideMesh2(MeshObj, CurvePointsAsVectorsArray[LastInsideIndex].xyz)\n ):\n LastInsideIndex += 1\n \n if((LastInsideIndex + 1 ) >= len(CurvePointsAsVectorsArray)):\n raise Exception('(LastInsideIndex + 1 ) == len(CurvePointsAsVectorsArray). This should never happen.')\n\n Direction = (CurvePointsAsVectorsArray[LastInsideIndex + 1] - CurvePointsAsVectorsArray[LastInsideIndex]).normalized()\n\n for Face in MeshObj.data.polygons:\n\n Origin = CurvePointsAsVectorsArray[0]\n VerticesIndices = Face.vertices\n p1, p2, p3 = [MeshObj.data.vertices[VerticesIndices[i]].co for i in range(3)]\n\n # last arg is clip to area of triangle, obviously we want that\n found = mathutils.geometry.intersect_ray_tri(p1, p2, p3, Direction, Origin, True)\n if found is not None:\n return found\n return None", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b" ]
[ "0.75932515", "0.7466053", "0.7187562", "0.7109652", "0.71037644", "0.70618355", "0.7053688", "0.70142746", "0.6986311", "0.6911706", "0.691073", "0.68320477", "0.6821069", "0.6723013", "0.6722326", "0.6714333", "0.66570723", "0.6611767", "0.6607392", "0.6602498", "0.6568937", "0.656427", "0.6529198", "0.65237635", "0.64556664", "0.64144427", "0.637415", "0.6342419", "0.63418", "0.6279331", "0.6249964", "0.61785847", "0.6175953", "0.6124396", "0.61220604", "0.6119177", "0.6091444", "0.6076623", "0.6068574", "0.6060742", "0.60598236", "0.60477793", "0.60164183", "0.60124326", "0.5922242", "0.5914787", "0.59123915", "0.58757526", "0.5862196", "0.58604944", "0.58434045", "0.5843385", "0.5840307", "0.58317554", "0.5816889", "0.5815655", "0.58068126", "0.5800792", "0.57956195", "0.57881224", "0.5771439", "0.5762208", "0.57585055", "0.5752957", "0.57317865", "0.5707501", "0.5702445", "0.568496", "0.5683828", "0.5680739", "0.5669456", "0.56665", "0.56620336", "0.5651935", "0.5622539", "0.56199825", "0.5594275", "0.5569987", "0.5549461", "0.55255854", "0.5513786", "0.54955196", "0.54900444", "0.5471736", "0.54672855", "0.54671943", "0.5464921", "0.546474", "0.54604834", "0.5446564", "0.54446846", "0.54405165", "0.5434928", "0.5434289", "0.54298395", "0.54232365", "0.5419257", "0.54136723", "0.5413628", "0.54131794" ]
0.69176984
9
Compute exterior orientation approximate values via 2D conform transformation
def __ComputeApproximateVals(self, cameraPoints, groundPoints): # Find approximate values cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1) groundPointsXY = groundPoints[0:2, :].T groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1) groundPointsZ = groundPoints[2, :].T n = int(len(cameraPoints)) # number of observations u = 4 # 4 conform parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(cameraPoints)): if i % 2 == 0: A[i, 0] = 1 A[i, 1] = 0 A[i, 2] = cameraPoints[j] A[i, 3] = cameraPoints[j + 1] else: A[i, 0] = 0 A[i, 1] = 1 A[i, 2] = cameraPoints[j + 1] A[i, 3] = -cameraPoints[j] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY)) # now we can compute the rest of the params X0 = X[0] Y0 = X[1] kappa = np.arctan2(-X[3], X[2]) lam = np.sqrt(X[2] ** 2 + X[3] ** 2) Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength adjustment_results = {"X0": X0[0], "Y0": Y0[0], "Z0": Z0[0], "omega": 0, "phi": 0, "kappa": np.rad2deg(kappa[0])} self.__exteriorOrientationParameters = np.array( [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T #return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def test_array_orientation_consistency_tilt():\n samples = 128\n p = FringeZernike(Z2=1000, samples=samples)\n ps = PSF.from_pupil(p, 1)\n idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape) # row-major y, x\n assert idx_x == ps.center_x\n assert idx_y > ps.center_y", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def calc_affine(df):\n\tx0 = df.columns[0]\n\ty0 = df.index[0]\n\tdx = df.columns[1] - df.columns[0]\n\tdy = df.index[1] - df.index[0]\n\t\n\tt = affine.Affine(dx, 0, x0 , 0, dy ,y0 - dy) \n\t# y0 - dy because anker point is in the south!\n\treturn t", "def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0", "def get_orientation_map_tensor(image, filts, rescale_angle=False, max_intensity=220):\n # move to numpy\n image = np.squeeze(image.numpy())\n\n # convolve Gabors and get energy of each\n magnitudes = []\n for filt in filts:\n sin_conv = convolve2d(image, filt[1], mode='same')\n cos_conv = convolve2d(image, filt[0], mode='same')\n\n magnitudes.append(np.sqrt(sin_conv ** 2 + cos_conv ** 2))\n\n orientation_vec = np.array([magnitudes[0] - magnitudes[2],\n magnitudes[1] - magnitudes[3]])\n\n return orientation_vec", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def get_bar_yz_transform(v, ihat, eid, xyz1, xyz2, nid1, nid2, i, Li):\n vhat = v / norm(v) # j\n try:\n z = np.cross(ihat, vhat) # k\n except ValueError:\n msg = 'Invalid vector length\\n'\n msg += 'xyz1=%s\\n' % str(xyz1)\n msg += 'xyz2=%s\\n' % str(xyz2)\n msg += 'nid1=%s\\n' % str(nid1)\n msg += 'nid2=%s\\n' % str(nid2)\n msg += 'i =%s\\n' % str(i)\n msg += 'Li =%s\\n' % str(Li)\n msg += 'ihat=%s\\n' % str(ihat)\n msg += 'v =%s\\n' % str(v)\n msg += 'vhat=%s\\n' % str(vhat)\n msg += 'z=cross(ihat, vhat)'\n print(msg)\n raise ValueError(msg)\n\n zhat = z / norm(z)\n yhat = np.cross(zhat, ihat) # j\n\n if norm(ihat) == 0.0 or norm(yhat) == 0.0 or norm(z) == 0.0:\n print(' invalid_orientation - eid=%s yhat=%s zhat=%s v=%s i=%s n%s=%s n%s=%s' % (\n eid, yhat, zhat, v, i, nid1, xyz1, nid2, xyz2))\n elif not np.allclose(norm(yhat), 1.0) or not np.allclose(norm(zhat), 1.0) or Li == 0.0:\n print(' length_error - eid=%s Li=%s Lyhat=%s Lzhat=%s'\n ' v=%s i=%s n%s=%s n%s=%s' % (\n eid, Li, norm(yhat), norm(zhat), v, i, nid1, xyz1, nid2, xyz2))\n return yhat, zhat", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def orientation(a:tuple, b:tuple, c:tuple)->int:\n d = direction(a, b, c)\n if d == 0:\n return 0\n elif d > 0:\n return 1\n else:\n return -1", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):\n max_value = np.amax(prev_image)\n\n if prev_image.dtype == 'float' and max_value <= 1:\n prev_image = np.uint8(prev_image * 255)\n curr_image = np.uint8(curr_image * 255)\n\n if prev_image.dtype == 'float' and max_value > 1:\n prev_image = np.uint8(prev_image)\n curr_image = np.uint8(curr_image)\n\n prev_image = cv.equalizeHist(prev_image)\n curr_image = cv.equalizeHist(curr_image)\n\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=200)\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(prev_image, None)\n kp2, des2 = orb.detectAndCompute(curr_image, None)\n\n # do feature matching\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # calculate perspective transform matrix\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n vector_along_x_axis_from_center = \\\n np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],\n [size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)\n vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)\n\n theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],\n vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi\n # negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system\n return theta", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def inverse_transform(self, y: Array2D) -> Array2D:", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def orientation(point_p, point_q, point_r):\n # Set https://www.geeksforgeeks.org/orientation-3-ordered-points/\n # for details of below formula.\n r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -\n (point_q.x - point_p.x) * (point_r.y - point_q.y))\n if r == 0:\n return 0\n return 1 if r > 0 else 2", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def orientate(arrayin,orientation):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n \r\n if orientation == 1 :\r\n # x,y\r\n y = range(ny)\r\n x = range(nx)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 2 :\r\n # x,-y\r\n y = range(ny-2,-1,-1)\r\n y.append(0)\r\n x = range(nx)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 3 :\r\n # -x,y\r\n y = range(ny)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 4 :\r\n # -x,-y\r\n y = range(nx-2,-1,-1)\r\n y.append(0)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 5 :\r\n # x,y\r\n y = range(ny)\r\n x = range(nx)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 6 :\r\n # x,-y\r\n y = range(ny-2,-1,-1)\r\n y.append(0)\r\n x = range(nx)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 7 :\r\n # -x,y\r\n y = range(ny)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 8 :\r\n # -x,-y\r\n y = range(nx-2,-1,-1)\r\n y.append(0)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n y, x = np.meshgrid(x,y)\r\n else :\r\n print 'orientation must be an integer between 1 and 8.'\r\n return np.copy(arrayin[y,x])", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def raw_orient(\n cal: Calibration,\n cpar: ControlPar,\n nfix: int,\n fix: List[np.ndarray],\n pix: List[Target],\n) -> bool:\n X = np.zeros((10, 6))\n y = np.zeros((10,))\n XPX = np.zeros((6, 6))\n XPy = np.zeros((6,))\n beta = np.zeros((6,))\n itnum = 0\n stopflag = False\n dm = 0.0001\n drad = 0.0001\n cal.added_par.k1 = 0\n cal.added_par.k2 = 0\n cal.added_par.k3 = 0\n cal.added_par.p1 = 0\n cal.added_par.p2 = 0\n cal.added_par.scx = 1\n cal.added_par.she = 0\n\n while not stopflag and itnum < 20:\n itnum += 1\n\n n = 0\n for i in range(nfix):\n xc, yc = pixel_to_metric(pix[i].x, pix[i].y, cpar)\n\n pos = vec_set(fix[i][0], fix[i][1], fix[i][2])\n cal.ext_par.update_rotation_matrix()\n xp, yp = img_coord(pos, cal, cpar.mm)\n\n X[n], X[n + 1] = num_deriv_exterior(cal, cpar, dm, drad, pos)\n y[n], y[n + 1] = xc - xp, yc - yp\n\n n += 2\n\n # void ata (double *a, double *ata, int m, int n, int n_large )\n ata(X, XPX, n, 6, 6)\n if np.any(XPX):\n XPXi = np.linalg.inv(XPX)\n else:\n XPXi = XPX\n\n # atl (double *u, double *a, double *l, int m, int n, int n_large)\n XPy = atl(XPy, X, y, 6)\n beta = XPXi @ XPy\n\n # ata ((double *) X, (double *) XPX, n, 6, 6);\n # matinv ((double *) XPX, 6, 6);\n # atl ((double *) XPy, (double *) X, y, n, 6, 6);\n # matmul ((double *) beta, (double *) XPX, (double *) XPy, 6,6,1,6,6);\n\n stopflag = all(abs(beta) <= 0.1)\n\n cal.ext_par.x0 += beta[0]\n cal.ext_par.y0 += beta[1]\n cal.ext_par.z0 += beta[2]\n cal.ext_par.omega += beta[3]\n cal.ext_par.phi += beta[4]\n cal.ext_par.kappa += beta[5]\n\n if stopflag:\n cal.ext_par.rotation_matrix()\n\n return stopflag", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.__ComputeApproximateVals(cameraPoints, groundPoints)\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def orientToXYZR( a, b ):\n if allclose(a,b):\n return (0,1,0,0)\n an,bn = normalise( (a,b) )\n angle = arccos(dot(an,bn))\n x,y,z = crossProduct( a, b )[0]\n if allclose( (x,y,z), 0.0):\n y = 1.0\n return (x,y,z,angle)", "def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):\n num_points = int(its_elev[0])\n step = its_elev[1]\n dist = num_points * step\n\n # Find the refractivity at the average terrain height\n start_avg = int(3.0 + 0.1 * num_points)\n end_avg = num_points - start_avg + 6\n zsys = np.mean(its_elev[start_avg-1:end_avg])\n refractivity *= np.exp(-zsys/9460.0)\n\n # Find the ray down-curvature per meter\n gma = 157e-9\n gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))\n\n alt_cbsd = its_elev[2] + height_cbsd\n alt_rx = its_elev[num_points+2] + height_rx\n qc = 0.5 * gme\n q = qc * dist\n # theta0 and theta1 the slopes, dl0 and dl1 the horizon distances\n theta1 = (alt_rx - alt_cbsd) / dist\n theta0 = theta1 - q\n theta1 = -theta1 - q\n dl0 = dist\n dl1 = dist\n\n if num_points >= 2:\n sa = 0.0\n sb = dist\n wq = True\n for i in range(1, num_points):\n sa += step\n sb -= step\n q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd\n if q > 0.0:\n theta0 += q/sa\n dl0 = sa\n wq = False\n if not wq:\n q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx\n if q > 0.0:\n theta1 += q/sb\n dl1 = sb\n\n return (np.arctan(theta0) * 180/np.pi,\n np.arctan(theta1) * 180/np.pi,\n dl0,\n dl1)", "def horizontal_to_cartesian(altitude, azimuth):\n theta = math.pi / 2 - math.radians(altitude)\n phi = math.radians(-azimuth)\n x = math.sin(phi) * math.sin(-theta)\n y = math.sin(theta) * math.cos(phi)\n z = math.cos(theta)\n return x, y, z", "def acquisition_angles(Px,Gx):\n are_two_arrays_equal(Px, Gx)\n\n major_axis,minor_axis = earth_axes()\n Vx = Px - Gx # observation vector\n del Px\n Vdist = np.linalg.norm(Vx, axis=1) # make unit length\n Vx = np.einsum('i...,i->i...', Vx, np.divide(1, Vdist))\n del Vdist\n\n e_Z = np.einsum('...i,i->...i', Gx,\n 1 / np.array([major_axis, major_axis, minor_axis]))\n e_E = np.zeros_like(e_Z)\n e_E[..., 0], e_E[..., 1] = -e_Z[:, 1].copy(), e_Z[:, 0].copy()\n e_plan = np.linalg.norm(e_Z[:, :2], axis=1)\n e_E = np.einsum('i...,i->i...', e_E, np.divide(1, e_plan))\n del e_plan\n e_N = np.array([np.multiply(e_Z[:, 1], e_E[:, 2]) -\n np.multiply(e_Z[:, 2], e_E[:, 1]),\n np.multiply(e_Z[:, 2], e_E[:, 0]) -\n np.multiply(e_Z[:, 0], e_E[:, 2]),\n np.multiply(e_Z[:, 0], e_E[:, 1]) -\n np.multiply(e_Z[:, 1], e_E[:, 0])]).T\n\n LoS = np.zeros_like(e_Z)\n LoS[..., 0] = np.einsum('...i,...i->...', Vx, e_E)\n del e_E\n LoS[..., 1] = np.einsum('...i,...i->...', Vx, e_N)\n del e_N\n LoS[..., 2] = np.einsum('...i,...i->...', Vx, e_Z)\n del e_Z\n\n az = np.rad2deg(np.arctan2(LoS[..., 0], LoS[..., 1]))\n zn = np.rad2deg(np.arccos(LoS[...,2]))\n return zn, az", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def angle2D(self) -> float:\n\n return self.v2ddict.angle2d()", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)", "def yy(self):\n return self.exterior[:, 1]", "def angle(z):", "def orientation(pointA, pointB, target):\n if target in (pointA, pointB):\n return -1\n buf = np.array([1, pointA.X, pointA.Y, 1, pointB.X, pointB.Y, 1, target.X, target.Y]).reshape(3,-1)\n buf = np.linalg.det(buf)\n if abs(buf) < Drawable._comparisonLimit:\n return 0\n if buf < 0:\n return -1\n return 1", "def test_orientation_vector():\n\topening_angle = geom_instance.source_opening_angle\n\ttest_orientation = o_gen_instance.generate_orientation_vector()\n\tassert test_orientation[0] < np.cos(opening_angle)\n\tassert test_orientation[1] < np.sin(opening_angle)", "def principal_axes_of_inertia(self, i_seq):\n return self._principal_axes_of_inertia[i_seq]", "def _derY(self, x, y):\n x_pos, y_pos = self.find_sector(x, y)\n alpha, beta = self.find_coords(x, y, x_pos, y_pos)\n\n # Get four corners data for each point\n xA = self.x_values[x_pos, y_pos]\n xB = self.x_values[x_pos + 1, y_pos]\n xC = self.x_values[x_pos, y_pos + 1]\n xD = self.x_values[x_pos + 1, y_pos + 1]\n yA = self.y_values[x_pos, y_pos]\n yB = self.y_values[x_pos + 1, y_pos]\n yC = self.y_values[x_pos, y_pos + 1]\n yD = self.y_values[x_pos + 1, y_pos + 1]\n fA = self.f_values[x_pos, y_pos]\n fB = self.f_values[x_pos + 1, y_pos]\n fC = self.f_values[x_pos, y_pos + 1]\n fD = self.f_values[x_pos + 1, y_pos + 1]\n\n # Calculate components of the alpha,beta --> x,y delta translation matrix\n alpha_x = (1 - beta) * (xB - xA) + beta * (xD - xC)\n alpha_y = (1 - beta) * (yB - yA) + beta * (yD - yC)\n beta_x = (1 - alpha) * (xC - xA) + alpha * (xD - xB)\n beta_y = (1 - alpha) * (yC - yA) + alpha * (yD - yB)\n\n # Invert the delta translation matrix into x,y --> alpha,beta\n det = alpha_x * beta_y - beta_x * alpha_y\n y_alpha = -beta_x / det\n y_beta = alpha_x / det\n\n # Calculate the derivative of f w.r.t. alpha and beta\n dfda = (1 - beta) * (fB - fA) + beta * (fD - fC)\n dfdb = (1 - alpha) * (fC - fA) + alpha * (fD - fB)\n\n # Calculate the derivative with respect to x (and return it)\n dfdy = y_alpha * dfda + y_beta * dfdb\n return dfdy", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def orientation(self, point):\n p_x = self.begin.x\n p_y = self.begin.y\n\n q_x = self.end.x\n q_y = self.end.y\n\n r_x = point.x\n r_y = point.y\n\n D = q_x * r_y + p_x * q_y + p_y * r_x - q_x * p_y - r_x * q_y - r_y * p_x\n\n if D > 0:\n return 1\n elif D == 0:\n return 0\n else:\n return -1", "def houghTransform(img):\n\n #initializing the values:\n theta = np.deg2rad(np.arange(-90, 90, 1)) #initializing a vector of angles in radians\n sinTheta = np.sin(theta)\n cosinTheta = np.cos(theta)\n imgWidth = img.shape [0]\n imgHeight = img.shape [1]\n imgDiagnal = int(math.sqrt(imgWidth * imgWidth + imgHeight * imgHeight)) #get the diagonal length of the image for initializing rho\n rho = np.linspace(-imgDiagnal, imgDiagnal, imgDiagnal*2) #initializing the rho values\n\n accumulator = np.zeros((2*imgDiagnal, len(theta)))\n points = [ [ 0] * len(theta)] * (2* imgDiagnal)\n\n\n are_edges = img > 5 if True else img < value_threshold\n yXis, xXis = np.nonzero(are_edges)\n\n\n\n\n #doing hough transform\n for i in range(len(xXis)):\n currentX = xXis[i]\n currentY = yXis[i]\n\n #loop through all possible angles\n\n currentRhos = [] #have a rhos to check duplicate x, y\n for j in range(len(theta)):\n currentRho = imgDiagnal + int(currentX * cosinTheta[j] + currentY*sinTheta[j])\n\n\n if points[currentRho][j] == 0 :\n points[currentRho][j] = [ ] * len(theta)\n\n if not currentRho in currentRhos:\n currentRhos.append(currentRho)\n points[currentRho][j].append([currentX, currentY])\n\n\n accumulator[currentRho, j] += 1\n\n\n return accumulator, points, theta, rho", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def _affine_coordinates(self, Vrep_object):\n if '_affine_coordinates_pivots' not in self.__dict__:\n v_list = [ vector(v) for v in self.Vrepresentation() ]\n if len(v_list)>0:\n origin = v_list[0]\n v_list = [ v - origin for v in v_list ]\n coordinates = matrix(v_list)\n self._affine_coordinates_pivots = coordinates.pivots()\n \n v = list(Vrep_object)\n if len(v) != self.ambient_dim():\n raise ValueError('Incorrect dimension: '+str(v))\n\n return vector(self.field(), [ v[i] for i in self._affine_coordinates_pivots ])", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def proyZ1(u, v, t2):\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)", "def read_affine(df):\n SliceThickness = [df.SliceThickness]\n PixelSpacing = _string_to_list_of_floats(df.PixelSpacing)\n ImageOrientationPatient = _string_to_list_of_floats(df.ImageOrientationPatient)\n ImagePositionPatient = _string_to_list_of_floats(df.ImagePositionPatient)\n\n Zooms = np.array(PixelSpacing+SliceThickness, dtype=float)\n ImageOrientationPatient = np.array(ImageOrientationPatient, dtype=float)\n ImagePositionPatient = np.array(ImagePositionPatient, dtype=float)\n \n ijk2ras = extract_cosines(ImageOrientationPatient)\n\n ijk2ras = (ijk2ras*np.array([-1,-1,1])).T\n ImagePositionPatient = ImagePositionPatient*np.array([-1,-1,1])\n\n affine = np.stack((ijk2ras[:,0]*Zooms[0],\n ijk2ras[:,1]*Zooms[1],\n ijk2ras[:,2]*Zooms[2],\n ImagePositionPatient), axis=1)\n\n return np.vstack((affine,[[0,0,0,1]]))", "def yprojection(self):\n return self.image.sum(axis=1)", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def test_active_matrix_from_extrinsic_euler_zyz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw([0.5 * np.pi, 0, 0]),\n np.array([\n [1, 0, 0],\n [0, 0, -1],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 1, 0],\n [0, 0, -1],\n [-1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, 1, 0],\n [-1, 0, 0]\n ])\n )", "def homogeneous_transformation_matrix_2d(angle, tx, ty):\n return np.array([[np.cos(angle), -np.sin(angle), tx],\n [np.sin(angle), np.cos(angle), ty],\n [0, 0, 1]])", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def secondorder(self):\n f = self.img\n x = self.x\n y = self.y\n self.x2 = sum(f*x**2)/sum(f) - self.x1**2\n self.y2 = sum(f*y**2)/sum(f) - self.y1**2\n self.xy = sum(f*x*y)/sum(f) - self.x1*self.y1", "def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu", "def translateToOriginXform(self):\n return np.array([[1, 0, 0, -self.eye[0]],\n [0, 1, 0, -self.eye[1]],\n [0, 0, 1, -self.eye[2]],\n [0, 0, 0, 1]])", "def to_axang(self) -> Tuple[np.ndarray, float]:\n denom = np.linalg.norm(self.v)\n angle = 2.0*np.arctan2(denom, self.w)\n axis = np.zeros(3) if angle==0.0 else self.v/denom\n return axis, angle", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def _calculate_angle(x0, y0, x1, y1):\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def transform2h(self, x, y, m):\n A = torch.matmul(m, torch.stack([x, y, torch.ones(len(x))]))\n xt = A[0, :] / A[2, :]\n yt = A[1, :] / A[2, :]\n return xt, yt", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def prf2visual_angle(prf_mtx, img_size, out_dir, base_name):\n feature_size = prf_mtx.shape[1]\n pos_mtx = prf_mtx[:, :2]\n # eccentricity\n ecc = retinotopy.coord2ecc(pos_mtx, img_size, 20)\n vol = ecc.reshape(18, 64, 64)\n vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_ecc.nii.gz'))\n # angle\n angle = retinotopy.coord2angle(pos_mtx, img_size)\n vol = angle.reshape(18, 64, 64)\n vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_angle.nii.gz'))\n # pRF size\n if feature_size > 2:\n size_angle = retinotopy.get_prf_size(prf_mtx, 55, 20)\n vol = size_angle.reshape(18, 64, 64)\n vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_size.nii.gz'))", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def get_y(EQ, M):\n return (EQ[1] * ((-1) * EQ[0] * M[0] + EQ[1] * M[1]) - EQ[0] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)" ]
[ "0.6454401", "0.63721627", "0.63071674", "0.6261199", "0.6196916", "0.6190799", "0.61566526", "0.60925615", "0.60682034", "0.6002589", "0.59904027", "0.5930793", "0.5907026", "0.5904838", "0.5902323", "0.58778685", "0.58416617", "0.58258176", "0.58110034", "0.5757958", "0.5741579", "0.5710248", "0.57085216", "0.5680774", "0.56716603", "0.56629574", "0.5662641", "0.5643609", "0.5635845", "0.5627301", "0.5605069", "0.5599455", "0.5598289", "0.55772334", "0.5574006", "0.5571472", "0.55665916", "0.5553053", "0.55518717", "0.5529964", "0.5518819", "0.5492775", "0.5489097", "0.5486433", "0.5476902", "0.5475419", "0.5474921", "0.54721135", "0.546658", "0.545225", "0.54385614", "0.5404473", "0.5395997", "0.53941953", "0.53937113", "0.53905207", "0.53904533", "0.539041", "0.5388549", "0.53805137", "0.5373326", "0.5371099", "0.5345297", "0.5342446", "0.53385144", "0.5326929", "0.53261006", "0.5323215", "0.532199", "0.5312409", "0.5310579", "0.5309963", "0.53093904", "0.5302001", "0.52768993", "0.52751875", "0.5266632", "0.52658767", "0.52654046", "0.52651393", "0.5261884", "0.52581173", "0.5254085", "0.5252276", "0.52519584", "0.5250296", "0.52350026", "0.5234039", "0.52338445", "0.5223462", "0.52217084", "0.52189875", "0.5213695", "0.5213168", "0.52082294", "0.52073354", "0.52073354", "0.5204707", "0.5200297", "0.5199332", "0.51964784" ]
0.0
-1
Compute exterior orientation approximate values via 2D conform transformation
def __ComputeApproximateVals_RzRyRz(self, cameraPoints, groundPoints): # Find approximate values cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1) groundPointsXY = groundPoints[0:2, :].T groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1) groundPointsZ = groundPoints[2, :].T n = int(len(cameraPoints)) # number of observations u = 4 # 4 conform parameters A = np.zeros((n, u)) # A matrix (n,u) j = 0 for i in range(len(cameraPoints)): if i % 2 == 0: A[i, 0] = 1 A[i, 1] = 0 A[i, 2] = cameraPoints[j] A[i, 3] = cameraPoints[j + 1] else: A[i, 0] = 0 A[i, 1] = 1 A[i, 2] = cameraPoints[j + 1] A[i, 3] = -cameraPoints[j] j += 2 X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY)) # now we can compute the rest of the params X0 = X[0] Y0 = X[1] kappa = np.arctan2(-X[3], X[2]) lam = np.sqrt(X[2] ** 2 + X[3] ** 2) Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength adjustment_results = {"X0": X0[0], "Y0": Y0[0], "Z0": Z0[0], "omega": 0, "phi": 0, "kappa": np.rad2deg(kappa[0])} self.__exteriorOrientationParameters = np.array( [X0[0], Y0[0], Z0[0], 0.2, 0.2, kappa[0]]).T # updating the exterior orientation params # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T #return adjustment_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def orientation(xp, yp, xq, yq, xr, yr):\n cross = (xq-xp)*(yr-yp) - (xr-xp)*(yq-yp)\n dot = (xq-xp)*(xr-xp) + (yr-yp)*(yq-yp)\n if cross < 0:\n return -1\n elif cross > 0:\n return 1\n elif dot > 0:\n return 0\n else:\n return math.pi", "def orientation(p, q, r):\n val = (q.y - p.y) * (r.x - q.x) - (q.x - p.x) * (r.y - q.y)\n if val == 0:\n return 0\n elif val > 0:\n return 1\n else:\n return 2", "def ComputeInverseInnerOrientation(self):\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n mat = np.array([[a1[0], a2[0]], [b1[0], b2[0]]])\n mat = la.inv(mat)\n\n return np.array([a0[0], b0[0], mat[0, 0], mat[0, 1], mat[1, 0], mat[1, 1]]).T", "def orientation(p0, p1, p2):\n\n angle = (p1[1] - p0[1])*(p2[0] - p1[0]) - (p2[1] - p1[1])*(p1[0] - p0[0])\n if angle == 0.0:\n return 0\n elif angle < 0.0:\n return -1\n elif angle > 0.0:\n return 1", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def rotated_e():\n x = np.zeros((5, 5))\n x[:, 0] = 1.\n y = np.zeros((5, 5))\n y[:, 2] = 1.\n z = np.zeros((5, 5))\n z[:, 4] = 1.\n a = np.zeros((5, 5))\n a[0, :] = 1.\n b = np.zeros((5, 5))\n b[2, :] = 1.\n c = np.zeros((5, 5))\n c[4, :] = 1.\n\n img = np.zeros((4, 5, 5))\n img[0] = x + y + z + a\n img[1] = x + y + z + c\n img[2] = a + b + c + x\n img[3] = a + b + c + z\n img[img > 0] = 1.\n\n return img.astype('float32')", "def compute_orientation(x,y,lx,ly,nfil):\n # number of molecules\n natoms = len(x)\n nmol = natoms/nfil\n # allocate aray for results\n phi = np.zeros((natoms), dtype = np.float64)\n tx = np.zeros((natoms), dtype = np.float64)\n ty = np.zeros((natoms), dtype = np.float64)\n # loop over all polymers\n k = 0\n for i in range(nmol):\n for j in range(nfil):\n if j == 0:\n x1 = x[k]\n y1 = y[k]\n x2 = x[k+1]\n y2 = y[k+1]\n elif j == nfil-1:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k]\n y2 = y[k]\n else:\n x1 = x[k-1]\n y1 = y[k-1]\n x2 = x[k+1]\n y2 = y[k+1]\n # compute nearest neighbor\n dx = neigh_min(x2-x1,lx)\n dy = neigh_min(y2-y1,ly)\n # compute angle using atan2\n pi = math.atan2(dy,dx)\n phi[k] = pi\n tx[k] = dx / np.sqrt(dx**2 + dy**2)\n ty[k] = dy / np.sqrt(dx**2 + dy**2)\n # increment k\n k = k + 1\n return phi, tx, ty", "def test_array_orientation_consistency_tilt():\n samples = 128\n p = FringeZernike(Z2=1000, samples=samples)\n ps = PSF.from_pupil(p, 1)\n idx_y, idx_x = np.unravel_index(ps.data.argmax(), ps.data.shape) # row-major y, x\n assert idx_x == ps.center_x\n assert idx_y > ps.center_y", "def coord_space(\n a0: numpy.ndarray, a1: numpy.ndarray, a2: numpy.ndarray, rev: bool = False\n) -> Tuple[numpy.ndarray, Optional[numpy.ndarray]]:\n # dbg = False\n # if dbg:\n # print(a0.transpose())\n # print(a1.transpose())\n # print(a2.transpose())\n\n # a0 = acs[0]\n # a1 = acs[1]\n # a2 = acs[2]\n\n global gtm\n global gmry\n global gmrz, gmrz2\n\n tm = gtm\n mry = gmry\n mrz = gmrz\n mrz2 = gmrz2\n\n # tx acs[1] to origin\n # tm = homog_trans_mtx(-a1[0][0], -a1[1][0], -a1[2][0])\n set_homog_trans_mtx(-a1[0], -a1[1], -a1[2], tm)\n\n # directly translate a2 using a1\n p = a2 - a1\n sc = get_spherical_coordinates(p)\n\n # if dbg:\n # print(\"p\", p.transpose())\n # print(\"sc\", sc)\n\n # mrz = homog_rot_mtx(-sc[1], \"z\") # rotate translated a2 -azimuth about Z\n set_Z_homog_rot_mtx(-sc[1], mrz)\n # mry = homog_rot_mtx(-sc[2], \"y\") # rotate translated a2 -polar_angle about Y\n set_Y_homog_rot_mtx(-sc[2], mry)\n\n # mt completes a1-a2 on Z-axis, still need to align a0 with XZ plane\n # mt = mry @ mrz @ tm # python 3.5 and later\n mt = gmry.dot(gmrz.dot(gtm))\n\n # if dbg:\n # print(\"tm:\\n\", tm)\n # print(\"mrz:\\n\", mrz)\n # print(\"mry:\\n\", mry)\n # # print(\"mt \", mt)\n\n p = mt.dot(a0)\n\n # if dbg:\n # print(\"mt:\\n\", mt, \"\\na0:\\n\", a0, \"\\np:\\n\", p)\n\n # need azimuth of translated a0\n # sc2 = get_spherical_coordinates(p)\n # print(sc2)\n azimuth2 = _get_azimuth(p[0], p[1])\n\n # rotate a0 -azimuth2 about Z to align with X\n # mrz2 = homog_rot_mtx(-azimuth2, \"z\")\n set_Z_homog_rot_mtx(-azimuth2, mrz2)\n\n # mt = mrz2 @ mt\n mt = gmrz2.dot(mt)\n\n # if dbg:\n # print(\"mt:\", mt, \"\\na0:\", a0, \"\\np:\", p)\n # # print(p, \"\\n\", azimuth2, \"\\n\", mrz2, \"\\n\", mt)\n\n # if dbg:\n # print(\"mt:\\n\", mt)\n # print(\"<<<<<<==============================\")\n\n if not rev:\n return mt, None\n\n # rev=True, so generate the reverse transformation\n\n # rotate a0 theta about Z, reversing alignment with X\n # mrz2 = homog_rot_mtx(azimuth2, \"z\")\n set_Z_homog_rot_mtx(azimuth2, mrz2)\n # rotate a2 phi about Y\n # mry = homog_rot_mtx(sc[2], \"y\")\n set_Y_homog_rot_mtx(sc[2], mry)\n # rotate a2 theta about Z\n # mrz = homog_rot_mtx(sc[1], \"z\")\n set_Z_homog_rot_mtx(sc[1], mrz)\n # translation matrix origin to a1\n # tm = homog_trans_mtx(a1[0][0], a1[1][0], a1[2][0])\n set_homog_trans_mtx(a1[0], a1[1], a1[2], tm)\n\n # mr = tm @ mrz @ mry @ mrz2\n mr = gtm.dot(gmrz.dot(gmry.dot(gmrz2)))\n # mr = numpy.dot(tm, numpy.dot(mrz, numpy.dot(mry, mrz2)))\n\n return mt, mr", "def orient_2d(p,q,r):\n return (q[0]-p[0])*(r[1]-p[1]) - (r[0]-p[0])*(q[1]-p[1])", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def as_homogenous_transformation(self):\n r3 = self.orientation.normalize().unit_quaternion_as_r3_rotation_matrix()\n return matrix.sqr((r3[0],r3[1],r3[2],self.translation[0],\n r3[3],r3[4],r3[5],self.translation[1],\n r3[6],r3[7],r3[8],self.translation[2],\n 0,0,0,1))", "def calc_affine(df):\n\tx0 = df.columns[0]\n\ty0 = df.index[0]\n\tdx = df.columns[1] - df.columns[0]\n\tdy = df.index[1] - df.index[0]\n\t\n\tt = affine.Affine(dx, 0, x0 , 0, dy ,y0 - dy) \n\t# y0 - dy because anker point is in the south!\n\treturn t", "def point_orientation(a, b, c):\n return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y) >= 0", "def get_orientation_map_tensor(image, filts, rescale_angle=False, max_intensity=220):\n # move to numpy\n image = np.squeeze(image.numpy())\n\n # convolve Gabors and get energy of each\n magnitudes = []\n for filt in filts:\n sin_conv = convolve2d(image, filt[1], mode='same')\n cos_conv = convolve2d(image, filt[0], mode='same')\n\n magnitudes.append(np.sqrt(sin_conv ** 2 + cos_conv ** 2))\n\n orientation_vec = np.array([magnitudes[0] - magnitudes[2],\n magnitudes[1] - magnitudes[3]])\n\n return orientation_vec", "def get_orientation_vector(self, xyz):\n if self.g0:\n v = xyz[self.g0] - xyz[self.Ga()]\n else:\n v = self.x\n assert self.offt == 'GGG', self.offt\n return v", "def get_bar_yz_transform(v, ihat, eid, xyz1, xyz2, nid1, nid2, i, Li):\n vhat = v / norm(v) # j\n try:\n z = np.cross(ihat, vhat) # k\n except ValueError:\n msg = 'Invalid vector length\\n'\n msg += 'xyz1=%s\\n' % str(xyz1)\n msg += 'xyz2=%s\\n' % str(xyz2)\n msg += 'nid1=%s\\n' % str(nid1)\n msg += 'nid2=%s\\n' % str(nid2)\n msg += 'i =%s\\n' % str(i)\n msg += 'Li =%s\\n' % str(Li)\n msg += 'ihat=%s\\n' % str(ihat)\n msg += 'v =%s\\n' % str(v)\n msg += 'vhat=%s\\n' % str(vhat)\n msg += 'z=cross(ihat, vhat)'\n print(msg)\n raise ValueError(msg)\n\n zhat = z / norm(z)\n yhat = np.cross(zhat, ihat) # j\n\n if norm(ihat) == 0.0 or norm(yhat) == 0.0 or norm(z) == 0.0:\n print(' invalid_orientation - eid=%s yhat=%s zhat=%s v=%s i=%s n%s=%s n%s=%s' % (\n eid, yhat, zhat, v, i, nid1, xyz1, nid2, xyz2))\n elif not np.allclose(norm(yhat), 1.0) or not np.allclose(norm(zhat), 1.0) or Li == 0.0:\n print(' length_error - eid=%s Li=%s Lyhat=%s Lzhat=%s'\n ' v=%s i=%s n%s=%s n%s=%s' % (\n eid, Li, norm(yhat), norm(zhat), v, i, nid1, xyz1, nid2, xyz2))\n return yhat, zhat", "def orient(ps, origin, v1, v2):\r\n \r\n ps = np.vstack((v1, v2, ps))\r\n ps -= origin\r\n if ps[0][1] == 0:\r\n a = 0\r\n else:\r\n a = np.arcsin(np.fabs(ps[0][1]) / np.sqrt(ps[0][1] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][1] < 0 <= ps[0][2]) or (ps[0][1] > 0 > ps[0][2]):\r\n a = 2 * np.pi - a\r\n if (ps[0][1] * np.sin(a) + ps[0][2] * np.cos(a)) < 0:\r\n a = np.pi + a \r\n ps = rotate(a, ps, 0)\r\n if ps[0][0] == 0:\r\n b = 0\r\n else:\r\n b = np.arcsin(np.fabs(ps[0][0]) / np.sqrt(ps[0][0] ** 2 + ps[0][2] ** 2))\r\n if (ps[0][0] < 0 and ps[0][2] < 0) or (ps[0][0] > 0 and ps[0][2] > 0):\r\n b = 2 * np.pi - b\r\n if (ps[0][2] * np.cos(b) - ps[0][0] * np.sin(b)) < 0:\r\n b = np.pi + b\r\n ps = rotate(b, ps, 1)\r\n if ps[1][1] == 0:\r\n c = 0\r\n else:\r\n c = np.arcsin(np.fabs(ps[1][1]) / np.sqrt(ps[1][0]**2 + ps[1][1]**2))\r\n if (ps[1][0] < 0 and ps[1][1] < 0) or (ps[1][0] > 0 and ps[1][1] > 0):\r\n c = 2 * np.pi - c\r\n if (ps[1][0] * np.cos(c) - ps[1][1] * np.sin(c)) < 0:\r\n c = np.pi + c\r\n ps = rotate(c, ps, 2)\r\n return ps[2:]", "def calc_main_axis(self):\n #Clarify why the above step has been done\n c0, c1, c2 = self.calc_geom_center ()\n M = numpy.zeros ((3, 3), dtype=float)\n M = [[0] * 3, [0] * 3, [0] * 3]\n for x in self.atomcoords:\n xi = x[0] - c0\n yi = x[1] - c1\n zi = x[2] - c2\n M[0][0] = M[0][0] + xi * xi\n M[0][1] = M[0][1] + xi * yi\n M[0][2] = M[0][2] + xi * zi\n M[1][1] = M[1][1] + yi * yi\n M[1][2] = M[1][2] + yi * zi\n M[2][2] = M[2][2] + zi * zi\n M[1][0] = M[0][1]\n M[2][0] = M[0][2]\n M[2][1] = M[1][2]\n M = numpy.array (M)\n d = sum (numpy.diag (M))\n M = -M\n M[0, 0] = M[0, 0] + d\n M[1, 1] = M[1, 1] + d\n M[2, 2] = M[2, 2] + d\n\n eigenVals, eigenVecs = numpy.linalg.eig (M)\n eigenVecs = eigenVecs.transpose ()\n return eigenVecs", "def orientation(a:tuple, b:tuple, c:tuple)->int:\n d = direction(a, b, c)\n if d == 0:\n return 0\n elif d > 0:\n return 1\n else:\n return -1", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def get_rotation_angle(prev_image, curr_image, size_of_cropped_image):\n max_value = np.amax(prev_image)\n\n if prev_image.dtype == 'float' and max_value <= 1:\n prev_image = np.uint8(prev_image * 255)\n curr_image = np.uint8(curr_image * 255)\n\n if prev_image.dtype == 'float' and max_value > 1:\n prev_image = np.uint8(prev_image)\n curr_image = np.uint8(curr_image)\n\n prev_image = cv.equalizeHist(prev_image)\n curr_image = cv.equalizeHist(curr_image)\n\n # Initiate ORB detector\n orb = cv.ORB_create(nfeatures=200)\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(prev_image, None)\n kp2, des2 = orb.detectAndCompute(curr_image, None)\n\n # do feature matching\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n\n # calculate perspective transform matrix\n src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n if transform_matrix is None:\n transform_matrix, mask = cv.findHomography(src_pts, dst_pts, 0)\n\n vector_along_x_axis_from_center = \\\n np.float32([[size_of_cropped_image / 2, size_of_cropped_image / 2],\n [size_of_cropped_image, size_of_cropped_image / 2]]).reshape(-1, 1, 2)\n vector_transformed = cv.perspectiveTransform(vector_along_x_axis_from_center, transform_matrix)\n\n theta = - np.arctan2(vector_transformed[1, 0, 1] - vector_transformed[0, 0, 1],\n vector_transformed[1, 0, 0] - vector_transformed[0, 0, 0]) * 180 / np.pi\n # negative sign is to make the sign of the angle to correspond to one in a right-handed coordinate system\n return theta", "def getOrientationVect(self, a,b):\r\n return np.array(a)-np.array(b)", "def res(self) -> tuple[float, float]:\n xs, ys = self.xcoords.data, self.ycoords.data\n dx, dy = 0, 0\n if xs.ndim == 1:\n dx = xs[1] - xs[0]\n dy = ys[1] - ys[0]\n elif xs.ndim == 2:\n ddx0 = xs[1, 0] - xs[0, 0]\n ddy0 = ys[1, 0] - ys[0, 0]\n ddx1 = xs[0, 1] - xs[0, 0]\n ddy1 = ys[0, 1] - ys[0, 0]\n dx = math.hypot(ddx1, ddy1) # always positive!\n dy = math.hypot(ddx0, ddy0)\n rot = self.rotation\n acos = math.cos(math.radians(rot))\n # find grid top-down orientation\n if (\n (acos < 0 and ddy0 > 0)\n or (acos > 0 and ddy0 < 0)\n or (\n ddy0 == 0\n and (np.isclose(rot, 270) and ddx0 < 0)\n or (np.isclose(rot, 90) and ddx0 > 0)\n )\n ):\n dy = -1 * dy\n return dx, dy", "def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])", "def inverse_transform(self, y: Array2D) -> Array2D:", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H1 + sigma_H1\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def calc_transform(src_, dst_):\n M_ = cv2.getPerspectiveTransform(src_, dst_)\n Minv_ = cv2.getPerspectiveTransform(dst_, src_)\n return M_, Minv_", "def orientation(self, p, q, r):\n\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if (val > 0):\n\n # Clockwise orientation\n return 1\n elif (val < 0):\n\n # Counterclockwise orientation\n return 2\n else:\n\n # Colinear orientation\n return 0", "def orientation(point_p, point_q, point_r):\n # Set https://www.geeksforgeeks.org/orientation-3-ordered-points/\n # for details of below formula.\n r = ((point_q.y - point_p.y) * (point_r.x - point_q.x) -\n (point_q.x - point_p.x) * (point_r.y - point_q.y))\n if r == 0:\n return 0\n return 1 if r > 0 else 2", "def align(self) -> np.ndarray:\n vel = self.state[:, :, Boids.Attr.VEL]\n vel_norm = np.linalg.norm(vel, axis=0)\n orientation = vel / (vel_norm + EPSILON)\n mut_influence = self._perceive(self.p_range)\n desired_orientation = np.dot(orientation, mut_influence)\n desired_orientation = np.multiply(desired_orientation, \n vel_norm + EPSILON)\n return desired_orientation - orientation", "def getAxisAndAngleDegres(self):\n theta = numpy.arccos(self.real)\n angle = 360 * theta / numpy.pi\n xyz = self.pure / numpy.sin(theta)\n return xyz, angle", "def orientate(arrayin,orientation):\r\n ny = arrayin.shape[0]\r\n nx = arrayin.shape[1]\r\n \r\n if orientation == 1 :\r\n # x,y\r\n y = range(ny)\r\n x = range(nx)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 2 :\r\n # x,-y\r\n y = range(ny-2,-1,-1)\r\n y.append(0)\r\n x = range(nx)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 3 :\r\n # -x,y\r\n y = range(ny)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 4 :\r\n # -x,-y\r\n y = range(nx-2,-1,-1)\r\n y.append(0)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n x, y = np.meshgrid(x,y)\r\n elif orientation == 5 :\r\n # x,y\r\n y = range(ny)\r\n x = range(nx)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 6 :\r\n # x,-y\r\n y = range(ny-2,-1,-1)\r\n y.append(0)\r\n x = range(nx)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 7 :\r\n # -x,y\r\n y = range(ny)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n y, x = np.meshgrid(x,y)\r\n elif orientation == 8 :\r\n # -x,-y\r\n y = range(nx-2,-1,-1)\r\n y.append(0)\r\n x = range(nx-2,-1,-1)\r\n x.append(0)\r\n y, x = np.meshgrid(x,y)\r\n else :\r\n print 'orientation must be an integer between 1 and 8.'\r\n return np.copy(arrayin[y,x])", "def horn_adjust(x, y):\n debug=False\n #debug=True\n meanX = x.mean(axis=0)\n meanY = y.mean(axis=0)\n translation = meanY - meanX\n x_centered = x - meanX\n y_centered = y - meanY\n if debug:\n print(\"x_centered\")\n print(x_centered)\n print(\"y_centered\")\n print(y_centered)\n # Find how much to rescale the x's. Entrywise multiplication.\n x_scale = np.sqrt((x_centered * x_centered).sum())\n y_scale = np.sqrt((y_centered * y_centered).sum())\n scale_factor = y_scale / x_scale\n x_centered_prime = x_centered * scale_factor\n if debug:\n print(\"scale_factor\")\n print(scale_factor)\n print(\"x_centered_prime\")\n print(x_centered_prime)\n # Find angle to rotate the planes\n x_perp = np.cross(x_centered_prime[0], x_centered_prime[1])\n y_perp = np.cross(y_centered[0], y_centered[1])\n # Find rotation matrix to rotate the x plane into the y plane\n # Using https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d\n # https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula\n x_perp_unit = x_perp / np.linalg.norm(x_perp)\n y_perp_unit = y_perp / np.linalg.norm(y_perp)\n v = np.cross(x_perp_unit, y_perp_unit)\n s = np.linalg.norm(v) # sine of angle between the planes\n c = x_perp_unit.dot(y_perp_unit) # cosine of angle between the planes\n v_x = np.array([[0, -v[2], v[1]],\n [v[2], 0, -v[0]],\n [-v[1], v[0], 0]])\n # rotation_p acts on the plane\n rotation_p = np.eye(3) + v_x + v_x.dot(v_x) * (1 - c) / s**2.0\n # Transpose to make each x a column vector, then transpose back for next part\n x_plane = rotation_p.dot(x_centered_prime.T).T\n # Now rotate within the plane, as in Sec. 5 of Horn\n v_y = np.array([[0, -y_perp_unit[2], y_perp_unit[1]],\n [y_perp_unit[2], 0, -y_perp_unit[0]],\n [-y_perp_unit[1], y_perp_unit[0], 0]])\n s_win_tmp = np.sum([np.cross(x_plane[i], y_centered[i]) for i in range(3)],\n axis=0).dot(y_perp_unit)\n c_win_tmp = np.sum([x_plane[i].dot(y_centered[i]) for i in range(3)],\n axis=0)\n sin_theta = s_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n cos_theta = c_win_tmp / np.sqrt(np.linalg.norm(s_win_tmp)**2 +\n np.linalg.norm(c_win_tmp)**2)\n rotation_win = np.eye(3) + sin_theta * v_y + (1 - cos_theta) * v_y.dot(v_y)\n # transpose so each column is an x vector, then transpose back at the end\n # x_final = rotation_win.dot(x_final.T).T\n rotation_full = rotation_win.dot(rotation_p)\n # Ignore scale_factor\n # T(x) = Ax + b\n A = rotation_full\n b = meanY - rotation_full.dot(meanX)\n if debug:\n print(\"A\")\n print(rotation_full)\n print(\"b\")\n print(b)\n return(A, b)", "def get_orientation(self):\n # Only work with rotation around x by now\n n0 = DEFAULT_N0\n n1 = DEFAULT_N1\n if self.rotation[2] != 0.0:\n n0 = self.rotate_z(n0)\n n1 = self.rotate_z(n1)\n return n0, n1, DEFAULT_N2", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def raw_orient(\n cal: Calibration,\n cpar: ControlPar,\n nfix: int,\n fix: List[np.ndarray],\n pix: List[Target],\n) -> bool:\n X = np.zeros((10, 6))\n y = np.zeros((10,))\n XPX = np.zeros((6, 6))\n XPy = np.zeros((6,))\n beta = np.zeros((6,))\n itnum = 0\n stopflag = False\n dm = 0.0001\n drad = 0.0001\n cal.added_par.k1 = 0\n cal.added_par.k2 = 0\n cal.added_par.k3 = 0\n cal.added_par.p1 = 0\n cal.added_par.p2 = 0\n cal.added_par.scx = 1\n cal.added_par.she = 0\n\n while not stopflag and itnum < 20:\n itnum += 1\n\n n = 0\n for i in range(nfix):\n xc, yc = pixel_to_metric(pix[i].x, pix[i].y, cpar)\n\n pos = vec_set(fix[i][0], fix[i][1], fix[i][2])\n cal.ext_par.update_rotation_matrix()\n xp, yp = img_coord(pos, cal, cpar.mm)\n\n X[n], X[n + 1] = num_deriv_exterior(cal, cpar, dm, drad, pos)\n y[n], y[n + 1] = xc - xp, yc - yp\n\n n += 2\n\n # void ata (double *a, double *ata, int m, int n, int n_large )\n ata(X, XPX, n, 6, 6)\n if np.any(XPX):\n XPXi = np.linalg.inv(XPX)\n else:\n XPXi = XPX\n\n # atl (double *u, double *a, double *l, int m, int n, int n_large)\n XPy = atl(XPy, X, y, 6)\n beta = XPXi @ XPy\n\n # ata ((double *) X, (double *) XPX, n, 6, 6);\n # matinv ((double *) XPX, 6, 6);\n # atl ((double *) XPy, (double *) X, y, n, 6, 6);\n # matmul ((double *) beta, (double *) XPX, (double *) XPy, 6,6,1,6,6);\n\n stopflag = all(abs(beta) <= 0.1)\n\n cal.ext_par.x0 += beta[0]\n cal.ext_par.y0 += beta[1]\n cal.ext_par.z0 += beta[2]\n cal.ext_par.omega += beta[3]\n cal.ext_par.phi += beta[4]\n cal.ext_par.kappa += beta[5]\n\n if stopflag:\n cal.ext_par.rotation_matrix()\n\n return stopflag", "def getEllipsYZRotMatrix(a1, a2):\n adir = a2 - a1\n amid = a1 + 0.5 * adir\n kath = np.sqrt((adir[0] * adir[0] + adir[1] * adir[1]) / 4.0)\n octantA2 = octant(a2)\n theta = np.arctan( abs( (adir[2]/2) / kath) )\n #[1, 4, 6, 7 ] => left rotation\n #[2, 3, 5, 8 ] => right rotation\n if octantA2 in [2, 3, 5, 8]: \n theta = -theta \n print \"theta =\" , np.rad2deg(theta)\n RotY = np.matrix( [ [ np.cos(theta), 0.0, np.sin(theta) ],\n [ 0.0 , 1.0, 0.0 ],\n [ -np.sin(theta), 0.0, np.cos(theta) ]\n ]) \n \n psi = np.arctan( abs( adir[1] / adir[0] ) )\n #[2, 4, 6, 8 ] => left rotation\n #[1, 3, 5, 7 ] => right rotation\n if octantA2 in [1, 3, 5, 7]:\n psi = -psi\n print \"psi =\" , np.rad2deg(psi)\n RotZ = np.matrix( [ [ np.cos(psi), -np.sin(psi), 0.0 ],\n [ np.sin(psi), np.cos(psi), 0.0 ],\n [ 0.0 , 0.0 , 1.0 ]\n ])\n return np.asarray( RotY * RotZ )", "def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.__ComputeApproximateVals(cameraPoints, groundPoints)\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def orientToXYZR( a, b ):\n if allclose(a,b):\n return (0,1,0,0)\n an,bn = normalise( (a,b) )\n angle = arccos(dot(an,bn))\n x,y,z = crossProduct( a, b )[0]\n if allclose( (x,y,z), 0.0):\n y = 1.0\n return (x,y,z,angle)", "def _GetHorizonAnglesLegacy(its_elev, height_cbsd, height_rx, refractivity):\n num_points = int(its_elev[0])\n step = its_elev[1]\n dist = num_points * step\n\n # Find the refractivity at the average terrain height\n start_avg = int(3.0 + 0.1 * num_points)\n end_avg = num_points - start_avg + 6\n zsys = np.mean(its_elev[start_avg-1:end_avg])\n refractivity *= np.exp(-zsys/9460.0)\n\n # Find the ray down-curvature per meter\n gma = 157e-9\n gme = gma*(1.0 - 0.04665 * np.exp(refractivity/179.3))\n\n alt_cbsd = its_elev[2] + height_cbsd\n alt_rx = its_elev[num_points+2] + height_rx\n qc = 0.5 * gme\n q = qc * dist\n # theta0 and theta1 the slopes, dl0 and dl1 the horizon distances\n theta1 = (alt_rx - alt_cbsd) / dist\n theta0 = theta1 - q\n theta1 = -theta1 - q\n dl0 = dist\n dl1 = dist\n\n if num_points >= 2:\n sa = 0.0\n sb = dist\n wq = True\n for i in range(1, num_points):\n sa += step\n sb -= step\n q = its_elev[i+2] - (qc*sa + theta0) * sa - alt_cbsd\n if q > 0.0:\n theta0 += q/sa\n dl0 = sa\n wq = False\n if not wq:\n q = its_elev[i+2] - (qc*sb + theta1) * sb - alt_rx\n if q > 0.0:\n theta1 += q/sb\n dl1 = sb\n\n return (np.arctan(theta0) * 180/np.pi,\n np.arctan(theta1) * 180/np.pi,\n dl0,\n dl1)", "def horizontal_to_cartesian(altitude, azimuth):\n theta = math.pi / 2 - math.radians(altitude)\n phi = math.radians(-azimuth)\n x = math.sin(phi) * math.sin(-theta)\n y = math.sin(theta) * math.cos(phi)\n z = math.cos(theta)\n return x, y, z", "def acquisition_angles(Px,Gx):\n are_two_arrays_equal(Px, Gx)\n\n major_axis,minor_axis = earth_axes()\n Vx = Px - Gx # observation vector\n del Px\n Vdist = np.linalg.norm(Vx, axis=1) # make unit length\n Vx = np.einsum('i...,i->i...', Vx, np.divide(1, Vdist))\n del Vdist\n\n e_Z = np.einsum('...i,i->...i', Gx,\n 1 / np.array([major_axis, major_axis, minor_axis]))\n e_E = np.zeros_like(e_Z)\n e_E[..., 0], e_E[..., 1] = -e_Z[:, 1].copy(), e_Z[:, 0].copy()\n e_plan = np.linalg.norm(e_Z[:, :2], axis=1)\n e_E = np.einsum('i...,i->i...', e_E, np.divide(1, e_plan))\n del e_plan\n e_N = np.array([np.multiply(e_Z[:, 1], e_E[:, 2]) -\n np.multiply(e_Z[:, 2], e_E[:, 1]),\n np.multiply(e_Z[:, 2], e_E[:, 0]) -\n np.multiply(e_Z[:, 0], e_E[:, 2]),\n np.multiply(e_Z[:, 0], e_E[:, 1]) -\n np.multiply(e_Z[:, 1], e_E[:, 0])]).T\n\n LoS = np.zeros_like(e_Z)\n LoS[..., 0] = np.einsum('...i,...i->...', Vx, e_E)\n del e_E\n LoS[..., 1] = np.einsum('...i,...i->...', Vx, e_N)\n del e_N\n LoS[..., 2] = np.einsum('...i,...i->...', Vx, e_Z)\n del e_Z\n\n az = np.rad2deg(np.arctan2(LoS[..., 0], LoS[..., 1]))\n zn = np.rad2deg(np.arccos(LoS[...,2]))\n return zn, az", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation", "def rotation(self) -> float:\n xs, ys = self.xcoords.data, self.ycoords.data\n rot = 0\n if xs.ndim == 2:\n ddx1 = xs[0, -1] - xs[0, 0]\n ddy1 = ys[0, -1] - ys[0, 0]\n if not np.isclose(ddx1, 0):\n rot = math.degrees(math.atan(ddy1 / ddx1))\n else:\n rot = -90\n if ddx1 < 0:\n rot = 180 + rot\n elif ddy1 < 0:\n rot = 360 + rot\n return rot", "def getAffineTransform(self, coord1, coord2):\n num_coords = 2 * len(coord1)\n A = np.zeros((num_coords, 6))\n b = []\n for point2 in coord2:\n b.append(float(point2[0]))\n b.append(float(point2[1]))\n b = np.asarray(b)\n i = 0\n for point1 in coord1:\n A[i, 0:2] = point1[0:2]\n A[i, 2] = 1\n A[i+1, 3:5] = point1[0:2]\n A[i+1, 5] = 1\n i += 2\n A = np.asarray(A)\n b = np.asarray(b)\n x = np.matmul(np.matmul(np.linalg.inv(np.matmul(A.T, A)), A.T), b.T)\n self.depth2rgb_affine = np.reshape(x, (2, 3))\n csv.writer(open(\"depth2rgb_affine.cfg\", \"w+\", newline=''), delimiter=',').writerows(self.depth2rgb_affine)\n # else:\n # x = np.vstack([np.reshape(x,(2,3)),[0,0,1]])\n # self.cam_ext_mat = x\n # A = [point[i,j+0:j+3].astype(np.float32) for i,point in enumerate(coord1) if i%2 == 0]\n # pts1 = coord1[0:3].astype(np.float32)\n # pts2 = coord2[0:3].astype(np.float32)\n # print(cv2.getAffineTransform(pts1, pts2))\n # return cv2.getAffineTransform(pts1, pts2)", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def angle2D(self) -> float:\n\n return self.v2ddict.angle2d()", "def F_trans(self):\n rho_H1 = self.edp_par['rho_H1'].value\n Z_H1 = self.edp_par['Z_H1'].value\n sigma_H1 = self.edp_par['sigma_H1'].value\n rho_H2 = self.edp_par['rho_H2'].value\n Z_H2 = self.edp_par['Z_H2'].value\n sigma_H2 = self.edp_par['sigma_H2'].value\n rho_M = self.edp_par['rho_M'].value\n sigma_M = self.edp_par['sigma_M'].value\n psi = self.edp_par['psi'].value \n common_scale = self.edp_par['common_scale'].value\n \n \n # Make sure Z_H2 > Z_H1. If Z_H2 < Z_H1, swap them\n if Z_H1 > Z_H2:\n Z_H1, Z_H2 = Z_H2, Z_H1\n sigma_H1, sigma_H2 = sigma_H2, sigma_H1\n rho_H1, rho_H2 = rho_H2, rho_H1\n \n # Calculate the intermediate variables\n alpha = self.qz*cos(psi) - self.qx*sin(psi)\n Z_CH2 = Z_H1 - sigma_H1\n Z_W = Z_H2 + sigma_H2\n DeltaZ_H = Z_W - Z_CH2\n \n # Calculate the Gaussian part \n FG = -rho_M*sigma_M * exp(-0.5*(alpha*sigma_M)**2)\n FG += 2*rho_H1*sigma_H1 * cos(alpha*Z_H1) * exp(-0.5*(alpha*sigma_H1)**2)\n FG += 2*rho_H2*sigma_H2 * cos(alpha*Z_H2) * exp(-0.5*(alpha*sigma_H2)**2)\n FG *= np.sqrt(2*pi)\n \n # Calculate the strip part\n FS = -2 * sin(alpha*Z_CH2) / alpha\n \n # Calculate the bridging part\n FB = 1 / (alpha + pi/DeltaZ_H)\n FB += 1 / (alpha - pi/DeltaZ_H)\n FB *= sin(alpha*Z_W) + sin(alpha*Z_CH2)\n FB *= 0.5\n FB -= (sin(alpha*Z_W)-sin(alpha*Z_CH2)) / alpha\n \n return common_scale * (FG + FS + FB)", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def proyZm1(u, v, t1):\n den = u ** 2 + v ** 2 + 4\n x = u - t1 * (u - 4 * u / den)\n y = v - t1 * (v - 4 * v / den)\n z = -1 - t1 * (-2 + 8 / den)\n return (x, y, z)", "def yy(self):\n return self.exterior[:, 1]", "def angle(z):", "def orientation(pointA, pointB, target):\n if target in (pointA, pointB):\n return -1\n buf = np.array([1, pointA.X, pointA.Y, 1, pointB.X, pointB.Y, 1, target.X, target.Y]).reshape(3,-1)\n buf = np.linalg.det(buf)\n if abs(buf) < Drawable._comparisonLimit:\n return 0\n if buf < 0:\n return -1\n return 1", "def test_orientation_vector():\n\topening_angle = geom_instance.source_opening_angle\n\ttest_orientation = o_gen_instance.generate_orientation_vector()\n\tassert test_orientation[0] < np.cos(opening_angle)\n\tassert test_orientation[1] < np.sin(opening_angle)", "def principal_axes_of_inertia(self, i_seq):\n return self._principal_axes_of_inertia[i_seq]", "def _derY(self, x, y):\n x_pos, y_pos = self.find_sector(x, y)\n alpha, beta = self.find_coords(x, y, x_pos, y_pos)\n\n # Get four corners data for each point\n xA = self.x_values[x_pos, y_pos]\n xB = self.x_values[x_pos + 1, y_pos]\n xC = self.x_values[x_pos, y_pos + 1]\n xD = self.x_values[x_pos + 1, y_pos + 1]\n yA = self.y_values[x_pos, y_pos]\n yB = self.y_values[x_pos + 1, y_pos]\n yC = self.y_values[x_pos, y_pos + 1]\n yD = self.y_values[x_pos + 1, y_pos + 1]\n fA = self.f_values[x_pos, y_pos]\n fB = self.f_values[x_pos + 1, y_pos]\n fC = self.f_values[x_pos, y_pos + 1]\n fD = self.f_values[x_pos + 1, y_pos + 1]\n\n # Calculate components of the alpha,beta --> x,y delta translation matrix\n alpha_x = (1 - beta) * (xB - xA) + beta * (xD - xC)\n alpha_y = (1 - beta) * (yB - yA) + beta * (yD - yC)\n beta_x = (1 - alpha) * (xC - xA) + alpha * (xD - xB)\n beta_y = (1 - alpha) * (yC - yA) + alpha * (yD - yB)\n\n # Invert the delta translation matrix into x,y --> alpha,beta\n det = alpha_x * beta_y - beta_x * alpha_y\n y_alpha = -beta_x / det\n y_beta = alpha_x / det\n\n # Calculate the derivative of f w.r.t. alpha and beta\n dfda = (1 - beta) * (fB - fA) + beta * (fD - fC)\n dfdb = (1 - alpha) * (fC - fA) + alpha * (fD - fB)\n\n # Calculate the derivative with respect to x (and return it)\n dfdy = y_alpha * dfda + y_beta * dfdb\n return dfdy", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def orientation(self, point):\n p_x = self.begin.x\n p_y = self.begin.y\n\n q_x = self.end.x\n q_y = self.end.y\n\n r_x = point.x\n r_y = point.y\n\n D = q_x * r_y + p_x * q_y + p_y * r_x - q_x * p_y - r_x * q_y - r_y * p_x\n\n if D > 0:\n return 1\n elif D == 0:\n return 0\n else:\n return -1", "def houghTransform(img):\n\n #initializing the values:\n theta = np.deg2rad(np.arange(-90, 90, 1)) #initializing a vector of angles in radians\n sinTheta = np.sin(theta)\n cosinTheta = np.cos(theta)\n imgWidth = img.shape [0]\n imgHeight = img.shape [1]\n imgDiagnal = int(math.sqrt(imgWidth * imgWidth + imgHeight * imgHeight)) #get the diagonal length of the image for initializing rho\n rho = np.linspace(-imgDiagnal, imgDiagnal, imgDiagnal*2) #initializing the rho values\n\n accumulator = np.zeros((2*imgDiagnal, len(theta)))\n points = [ [ 0] * len(theta)] * (2* imgDiagnal)\n\n\n are_edges = img > 5 if True else img < value_threshold\n yXis, xXis = np.nonzero(are_edges)\n\n\n\n\n #doing hough transform\n for i in range(len(xXis)):\n currentX = xXis[i]\n currentY = yXis[i]\n\n #loop through all possible angles\n\n currentRhos = [] #have a rhos to check duplicate x, y\n for j in range(len(theta)):\n currentRho = imgDiagnal + int(currentX * cosinTheta[j] + currentY*sinTheta[j])\n\n\n if points[currentRho][j] == 0 :\n points[currentRho][j] = [ ] * len(theta)\n\n if not currentRho in currentRhos:\n currentRhos.append(currentRho)\n points[currentRho][j].append([currentX, currentY])\n\n\n accumulator[currentRho, j] += 1\n\n\n return accumulator, points, theta, rho", "def __compose_transformation(self):\n s = self.scale\n rotR = self.rotation\n t = self.translation\n T = np.eye(4)\n T[0:3, 3] = t\n R = np.eye(4)\n R[0:3, 0:3] = rotR\n M = T.dot(R)\n if s == 1:\n M = T.dot(R)\n else:\n S = np.eye(4)\n S[0:3, 0:3] = np.diag([s, s, s])\n M = T.dot(R).dot(S)\n return M", "def estimate_affine_matrix_3d_to_2d(X, x):\n assert x.shape[0] == X.shape[0]\n assert x.shape[0] >= 4\n X = X.T # (3, n)\n x = x.T # (2, n)\n n = x.shape[1]\n\n ###---- 1. normalization\n ## 2d points\n mean = np.mean(x, 1) # (2, )\n x = x - np.tile(mean[:, np.newaxis], [1, n]) # (2, n)\n average_norm = np.mean(np.sqrt(np.sum(x ** 2, 0)))\n scale = np.sqrt(2) / average_norm\n x = scale * x\n\n # T = [[scale, 0, -mean * scale], \n # [ 0, scale, -mean * scale], \n # [ 0, 0, 1 ]]\n T = np.zeros((3, 3), dtype=np.float32)\n T[0, 0] = T[1, 1] = scale\n T[:2, 2] = -mean * scale\n T[2, 2] = 1\n\n ## 3d points\n X_homo = np.vstack((X, np.ones((1, n)))) # (4, n)\n mean = np.mean(X, 1) # (3, )\n X = X - np.tile(mean[:, np.newaxis], [1, n]) # (3, n)\n m = X_homo[: 3, :] - X\n average_norm = np.mean(np.sqrt(np.sum(X ** 2, 0)))\n scale = np.sqrt(3) / average_norm\n X = scale * X\n\n U = np.zeros((4, 4), dtype=np.float32)\n U[0, 0] = U[1, 1] = U[2, 2] = scale\n U[: 3, 3] = -mean * scale\n U[3, 3] = 1\n\n ###---- 2. equations\n A = np.zeros((n * 2, 8), dtype=np.float32)\n X_homo = np.vstack((X, np.ones((1, n)))).T\n A[: n, : 4] = X_homo\n A[n: , 4: ] = X_homo\n b = np.reshape(x, [-1, 1]) # (2n, 1)\n\n ###---- 3.solution\n p_8 = np.linalg.pinv(A).dot(b) # (8, 2n) x (2n, 1) -> (8, 1)\n p = np.zeros((3, 4), dtype=np.float32)\n p[0, :] = p_8[:4, 0]\n p[1, :] = p_8[4:, 0]\n p[-1, -1] = 1\n\n ###---- 4. denormalization\n P_Affine = np.linalg.inv(T).dot(p.dot(U))\n return P_Affine", "def interior_angle(self):\n if self.interior_angle_l is not None:\n return self.interior_angle_l\n else:\n self.interior_angle_l = ((self.vert_count - 2)*180)/math.pi\n return self.interior_angle_l", "def orientation_ras_lps(affine: NdarrayTensor) -> NdarrayTensor:\n sr = max(affine.shape[0] - 1, 1) # spatial rank is at least 1\n flip_d = [[-1, 1], [-1, -1, 1], [-1, -1, 1, 1]]\n flip_diag = flip_d[min(sr - 1, 2)] + [1] * (sr - 3)\n if isinstance(affine, torch.Tensor):\n return torch.diag(torch.as_tensor(flip_diag).to(affine)) @ affine # type: ignore\n return np.diag(flip_diag).astype(affine.dtype) @ affine # type: ignore", "def _affine_coordinates(self, Vrep_object):\n if '_affine_coordinates_pivots' not in self.__dict__:\n v_list = [ vector(v) for v in self.Vrepresentation() ]\n if len(v_list)>0:\n origin = v_list[0]\n v_list = [ v - origin for v in v_list ]\n coordinates = matrix(v_list)\n self._affine_coordinates_pivots = coordinates.pivots()\n \n v = list(Vrep_object)\n if len(v) != self.ambient_dim():\n raise ValueError('Incorrect dimension: '+str(v))\n\n return vector(self.field(), [ v[i] for i in self._affine_coordinates_pivots ])", "def inverse_transform2(alpha, tx = 0.0, ty = 0.0, mirror = 0):\n\n\tt = Transform({\"type\":\"2D\",\"alpha\":alpha,\"tx\":tx,\"ty\":ty,\"mirror\":mirror,\"scale\":1.0})\n\tt = t.inverse()\n\tt = t.get_params(\"2D\")\n\treturn t[ \"alpha\" ], t[ \"tx\" ], t[ \"ty\" ], t[ \"mirror\" ]", "def proyZ1(u, v, t2):\n den = u ** 2 + v ** 2 + 4\n x = u - t2 * (u - 4 * u / den)\n y = v - t2 * (v - 4 * v / den)\n z = 1 - t2 * (2 - 8 / den)\n return (x, y, z)", "def read_affine(df):\n SliceThickness = [df.SliceThickness]\n PixelSpacing = _string_to_list_of_floats(df.PixelSpacing)\n ImageOrientationPatient = _string_to_list_of_floats(df.ImageOrientationPatient)\n ImagePositionPatient = _string_to_list_of_floats(df.ImagePositionPatient)\n\n Zooms = np.array(PixelSpacing+SliceThickness, dtype=float)\n ImageOrientationPatient = np.array(ImageOrientationPatient, dtype=float)\n ImagePositionPatient = np.array(ImagePositionPatient, dtype=float)\n \n ijk2ras = extract_cosines(ImageOrientationPatient)\n\n ijk2ras = (ijk2ras*np.array([-1,-1,1])).T\n ImagePositionPatient = ImagePositionPatient*np.array([-1,-1,1])\n\n affine = np.stack((ijk2ras[:,0]*Zooms[0],\n ijk2ras[:,1]*Zooms[1],\n ijk2ras[:,2]*Zooms[2],\n ImagePositionPatient), axis=1)\n\n return np.vstack((affine,[[0,0,0,1]]))", "def yprojection(self):\n return self.image.sum(axis=1)", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def test_active_matrix_from_extrinsic_euler_zyz():\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw([0.5 * np.pi, 0, 0]),\n np.array([\n [1, 0, 0],\n [0, 0, -1],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [1, 0, 0],\n [0, 1, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0]),\n np.array([\n [0, 1, 0],\n [0, 0, -1],\n [-1, 0, 0]\n ])\n )\n assert_array_almost_equal(\n pr.active_matrix_from_extrinsic_roll_pitch_yaw(\n [0.5 * np.pi, 0.5 * np.pi, 0.5 * np.pi]),\n np.array([\n [0, 0, 1],\n [0, 1, 0],\n [-1, 0, 0]\n ])\n )", "def homogeneous_transformation_matrix_2d(angle, tx, ty):\n return np.array([[np.cos(angle), -np.sin(angle), tx],\n [np.sin(angle), np.cos(angle), ty],\n [0, 0, 1]])", "def _inverse_affine_matrix(self) -> np.ndarray:\n raise NotImplementedError", "def exteriorOrientationParameters(self):\n return self.__exteriorOrientationParameters", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def ComputeGeometricParameters(self):\n # extracting inner orientation params\n a0 = self.innerOrientationParameters[0]\n b0 = self.innerOrientationParameters[1]\n a1 = self.innerOrientationParameters[2]\n a2 = self.innerOrientationParameters[3]\n b1 = self.innerOrientationParameters[4]\n b2 = self.innerOrientationParameters[5]\n\n # computing algebric params\n tx = a0;\n ty = b0\n theta = np.arctan(b1 / b2)\n gamma = np.arctan((a1 * np.sin(theta) + a2 * np.cos(theta)) / (b1 * np.sin(theta) + b2 * np.cos(theta)))\n sx = a1 * np.cos(theta) - a2 * np.sin(theta)\n sy = (a1 * np.sin(theta) + a2 * np.cos(theta)) / np.sin(gamma)\n\n return {\"translationX\": tx, \"translationY\": ty, \"rotationAngle\": np.rad2deg(theta), \"scaleFactorX\": sx,\n \"scaleFactorY\": sy, \"shearAngle\": np.rad2deg(gamma)}", "def secondorder(self):\n f = self.img\n x = self.x\n y = self.y\n self.x2 = sum(f*x**2)/sum(f) - self.x1**2\n self.y2 = sum(f*y**2)/sum(f) - self.y1**2\n self.xy = sum(f*x*y)/sum(f) - self.x1*self.y1", "def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu", "def translateToOriginXform(self):\n return np.array([[1, 0, 0, -self.eye[0]],\n [0, 1, 0, -self.eye[1]],\n [0, 0, 1, -self.eye[2]],\n [0, 0, 0, 1]])", "def to_axang(self) -> Tuple[np.ndarray, float]:\n denom = np.linalg.norm(self.v)\n angle = 2.0*np.arctan2(denom, self.w)\n axis = np.zeros(3) if angle==0.0 else self.v/denom\n return axis, angle", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def vrrotvec2mat(ax_ang):\n\n #file_dir = os.path.dirname(os.path.realpath(__file__))\n #path_dir2 = file_dir + '/../geometry/'\n #sys.path.append(path_dir2)\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n c = np.cos(angle)\n s = np.sin(angle)\n tc = 1 - c\n\n mt11 = tc*x*x + c\n mt12 = tc*x*y - s*z\n mt13 = tc*x*z + s*y\n\n mt21 = tc*x*y + s*z\n mt22 = tc*y*y + c\n mt23 = tc*y*z - s*x\n\n mt31 = tc*x*z - s*y\n mt32 = tc*y*z + s*x\n mt33 = tc*z*z + c\n\n mtx = np.column_stack((mt11, mt12, mt13, mt21, mt22, mt23, mt31, mt32, mt33))\n\n inds1 = np.where(ax_ang[4, :] == -1)\n mtx[inds1, :] = -mtx[inds1, :]\n\n if msz == 1:\n mtx = mtx.reshape(3, 3)\n else:\n mtx = mtx.reshape(msz, 3, 3)\n\n return mtx", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def _calculate_angle(x0, y0, x1, y1):\n if x0 == y0 == x1 == y1 == 0:\n return 0\n\n if x1 - x0 > 0: # pointing to the right semi-plane\n angle = atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 >= 0: # adding pi if pointing to the left-bottom quart\n angle = pi + atan((y1 - y0) / (x1 - x0))\n elif x1 - x0 < 0 and y1 - y0 < 0: # subtract pi if pointing to the left-upper quart\n angle = -pi + atan((y1 - y0) / (x1 - x0))\n else: # zerodevision handle\n if y1 - y0 > 0: # pointing down\n angle = pi / 2\n else: # pointing up\n angle = -pi / 2\n\n return angle", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def py_ang(self,v1, v2):\n cosang = np.dot(v1, v2)\n sinang = la.norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)", "def transform2h(self, x, y, m):\n A = torch.matmul(m, torch.stack([x, y, torch.ones(len(x))]))\n xt = A[0, :] / A[2, :]\n yt = A[1, :] / A[2, :]\n return xt, yt", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def relative_phases(self):\r\n return np.angle(self.coherency)", "def prf2visual_angle(prf_mtx, img_size, out_dir, base_name):\n feature_size = prf_mtx.shape[1]\n pos_mtx = prf_mtx[:, :2]\n # eccentricity\n ecc = retinotopy.coord2ecc(pos_mtx, img_size, 20)\n vol = ecc.reshape(18, 64, 64)\n vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_ecc.nii.gz'))\n # angle\n angle = retinotopy.coord2angle(pos_mtx, img_size)\n vol = angle.reshape(18, 64, 64)\n vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_angle.nii.gz'))\n # pRF size\n if feature_size > 2:\n size_angle = retinotopy.get_prf_size(prf_mtx, 55, 20)\n vol = size_angle.reshape(18, 64, 64)\n vutil.save2nifti(vol, os.path.join(out_dir, base_name+'_size.nii.gz'))", "def T(self):\n\n # Calculate the direction cosines for the local x-axis\n # The local x-axis will run from the i-node to the j-node\n xi = self.i_node.X\n xj = self.j_node.X\n yi = self.i_node.Y\n yj = self.j_node.Y\n zi = self.i_node.Z\n zj = self.j_node.Z\n x = [(xj - xi), (yj - yi), (zj - zi)]\n x = x/norm(x)\n \n # The local y-axis will be in the plane of the plate\n # Find a vector in the plate's local xy plane\n xn = self.n_node.X\n yn = self.n_node.Y\n zn = self.n_node.Z\n xy = [xn - xi, yn - yi, zn - zi]\n\n # Find a vector perpendicular to the plate surface to get the orientation of the local z-axis\n z = cross(x, xy)\n \n # Divide the vector by its magnitude to produce a unit z-vector of direction cosines\n z = z/norm(z)\n\n # Calculate the local y-axis as a vector perpendicular to the local z and x-axes\n y = cross(z, x)\n \n # Divide the z-vector by its magnitude to produce a unit vector of direction cosines\n y = y/norm(y)\n\n # Create the direction cosines matrix\n dirCos = array([x, y, z])\n \n # Build the transformation matrix\n transMatrix = zeros((24, 24))\n transMatrix[0:3, 0:3] = dirCos\n transMatrix[3:6, 3:6] = dirCos\n transMatrix[6:9, 6:9] = dirCos\n transMatrix[9:12, 9:12] = dirCos\n transMatrix[12:15, 12:15] = dirCos\n transMatrix[15:18, 15:18] = dirCos\n transMatrix[18:21, 18:21] = dirCos\n transMatrix[21:24, 21:24] = dirCos\n \n return transMatrix", "def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M", "def get_y(EQ, M):\n return (EQ[1] * ((-1) * EQ[0] * M[0] + EQ[1] * M[1]) - EQ[0] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)" ]
[ "0.6454401", "0.63721627", "0.63071674", "0.6261199", "0.6196916", "0.6190799", "0.61566526", "0.60925615", "0.60682034", "0.6002589", "0.59904027", "0.5930793", "0.5907026", "0.5904838", "0.5902323", "0.58778685", "0.58416617", "0.58258176", "0.58110034", "0.5757958", "0.5741579", "0.5710248", "0.57085216", "0.5680774", "0.56716603", "0.56629574", "0.5662641", "0.5643609", "0.5635845", "0.5627301", "0.5605069", "0.5599455", "0.5598289", "0.55772334", "0.5574006", "0.5571472", "0.55665916", "0.5553053", "0.55518717", "0.5529964", "0.5518819", "0.5492775", "0.5489097", "0.5486433", "0.5476902", "0.5475419", "0.5474921", "0.54721135", "0.546658", "0.545225", "0.54385614", "0.5404473", "0.5395997", "0.53941953", "0.53937113", "0.53905207", "0.53904533", "0.539041", "0.5388549", "0.53805137", "0.5373326", "0.5371099", "0.5345297", "0.5342446", "0.53385144", "0.5326929", "0.53261006", "0.5323215", "0.532199", "0.5312409", "0.5310579", "0.5309963", "0.53093904", "0.5302001", "0.52768993", "0.52751875", "0.5266632", "0.52658767", "0.52654046", "0.52651393", "0.5261884", "0.52581173", "0.5254085", "0.5252276", "0.52519584", "0.5250296", "0.52350026", "0.5234039", "0.52338445", "0.5223462", "0.52217084", "0.52189875", "0.5213695", "0.5213168", "0.52082294", "0.52073354", "0.52073354", "0.5204707", "0.5200297", "0.5199332", "0.51964784" ]
0.0
-1
Compute observation vector for solving the exterior orientation parameters of a single image based on their approximate values
def __ComputeObservationVector(self, groundPoints): n = groundPoints.shape[0] # number of points # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T l0 = np.empty(n * 2) # Computation of the observation vector based on approximate exterior orientation parameters: l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2] l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2] return l0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ev2vi(eV,mu): \n return cv*np.sqrt( eV*(eV+2.e0*mu*mpc2))/(eV+mu*mpc2)", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def get_Objective(self):\r\n \r\n ans = 0\r\n for i in range(len(self.X.shape)):\r\n ans += self.parameter['phi'][i] * self.parameter['lambda1'] *norm(tl.unfold(self.Wlist[i], i), 'nuc') + (1 / self.parameter['m']) * norm(tl.unfold(self.X - self.Wlist[i] - self.V, i))\r\n\r\n # Augmented part is calculated seperately. \r\n augment_part1 = 0.5 * self.parameter['rho1'] * norm(self.V - self.T + self.F1)\r\n augment_part2 = 0.5 * self.parameter['rho2'] * norm(tl.fold(np.dot(self.Dmatrix, tl.unfold(self.T, 0)), 0, self.T.shape) - self.S + self.F2)\r\n\r\n # Combine the result for final objective function\r\n ans += self.parameter['beta1'] * norm(self.V.reshape(self.totaldim), 1) + self.parameter['beta2'] * norm(self.S.reshape(self.totaldim), 1) + augment_part1 + augment_part2 \r\n return ans", "def compute_observation(self):\n robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)\n robotEuler = p.getEulerFromQuaternion(robotOrn)\n linear, angular = p.getBaseVelocity(self.botId)\n return (np.array([robotEuler[0],angular[0],self.vt], dtype='float32'))", "def diriv(x, params):\n return np.array([x,1])", "def ienkf(A0,x,x0,yobs,T,H,R):\n nens = A0.shape[1]\n Iens = np.matrix(np.identity(nens))\n \n #Anomalies in state space\n A = A0 * T\n \n #Ensemble in state space\n E = x + A\n \n #Ensemble in observation space\n Ey = np.matrix(H(E))\n \n #Ensemle mean in observation space\n y = np.mean(Ey,axis=1)\n \n #Anomaies in observation space\n Ay = Ey - y\n Ay = Ay*np.linalg.inv(T)\n \n #Innovation vector\n dy = yobs - y\n \n \n Rmsq = np.linalg.inv(scipy.linalg.sqrtm(R))\n s = Rmsq*dy/np.sqrt(nens-1)\n S = Rmsq*Ay/np.sqrt(nens-1)\n V = np.linalg.inv(Iens + S.T*S)\n b = V*S.T*s\n dx = A0*b + A0 * V * np.linalg.pinv(A0.T*A0) * A0.T * (x-x0)\n T = scipy.linalg.sqrtm(V)\n return (dx,T)", "def EI(x, gp, ndim,fMax, epsilon=0.1):\n\t#epsilon = 0.1\n\tx1=np.array(x).reshape(-1,ndim)\n\tmuNew, stdNew = gp.predict(x1, return_std=True)\n\t#fMax=max(Y_init)\n\tZ = (muNew - fMax - epsilon)/stdNew\n\treturn -((muNew - fMax - epsilon)* scipy.stats.norm.cdf(Z) + stdNew*scipy.stats.norm.pdf(Z))", "def HOG(img, x, y):\n #TODO: write a HOG descriptor here\n des=[]\n row=0\n sub_image = img[x-8:x+8,y-8:y+8]\n while row < len(sub_image):\n col=0\n while col < len(sub_image[0]):\n temp_vector = [0 for i in range(8)]\n new_subimage = sub_image[row:row+4,col:col+4]\n x_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=1,dy=0)\n y_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=0,dy=1)\n theta = np.empty([x_gradient.shape[0],x_gradient.shape[1]])\n for i in range(len(x_gradient)):\n for j in range(len(x_gradient[0])):\n if x_gradient[i,j] == 0:\n theta[i,j] = 90\n else:\n theta[i,j] = np.arctan(y_gradient[i,j]/x_gradient[i,j])*(180/np.pi)\n theta_iter = theta.flatten() #To avoid nested for loops for 4x4 theta\n for i in range(len(theta_iter)):\n if theta_iter[i] < 45:\n temp_vector[0]=temp_vector[0]+1\n elif theta_iter[i] >= 45 and theta_iter[i] < 90:\n temp_vector[1]=temp_vector[1]+1\n elif theta_iter[i] >= 90 and theta_iter[i] < 135:\n temp_vector[2]=temp_vector[2]+1\n elif theta_iter[i] >= 135 and theta_iter[i] < 180:\n temp_vector[3]=temp_vector[3]+1\n elif theta_iter[i] >= 180 and theta_iter[i] < 225:\n temp_vector[4]=temp_vector[4]+1\n elif theta_iter[i] >= 225 and theta_iter[i] < 270:\n temp_vector[5]=temp_vector[5]+1\n elif theta_iter[i] >= 270 and theta_iter[i] < 315:\n temp_vector[6]=temp_vector[6]+1\n elif theta_iter[i] >= 315 and theta_iter[i] < 360:\n temp_vector[7]=temp_vector[7]+1\n des.extend(temp_vector)\n col=col+4\n row=row+4\n return des", "def erode_pvm(image, selem):\n assert len(image.shape) == len(selem.shape), \"Image and region must have \" \\\n \"identical dimensionality.\"\n eroded = ndi.correlate(1 - image.astype(np.float64), selem.astype(np.float64),\n mode='reflect')\n eroded = 1. - eroded # Invert\n\n return np.maximum(eroded, 0.0)", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def calc_emi(tgt_pt, src_pt, src_dir, coef=1):\r\n emi_params = [\r\n numpy.zeros(tgt_pt.shape[-1], tgt_pt.dtype),\r\n numpy.zeros((tgt_pt.shape[-1], tgt_pt.shape[-1]), tgt_pt.dtype)\r\n ]\r\n\r\n # Start and end 'r' vectors\r\n r0 = tgt_pt - src_pt\r\n r1 = r0 - src_dir\r\n\r\n # Calculate the integral from Biot–Savart law (https://en.wikipedia.org/wiki/Biot–Savart_law):\r\n # dl x r / sqrt(l^2 + R^2)^3\r\n #\r\n # The \"l\" origin is selected at the closest point to the target to simplify calculations.\r\n # Thus \"r = l^2 + R^2\" and \"|dl x r| = |dl|.R\", where R is distance between the target and origin.\r\n #\r\n # Use integral calculator https://www.integral-calculator.com/ (substitute l with x):\r\n # int[ R/sqrt(x^2 + R^2)^3 dx ] = x / (R * sqrt(x^2 + R^2)) + C\r\n src_dir_len2 = src_dir.dot(src_dir)\r\n if not src_dir_len2:\r\n return emi_params # Zero length, return zero EMI params\r\n\r\n # Vector projections of \"r0\" and \"r1\" in the direction of \"src_dir\"\r\n # The '-' is to set the origin at the projected point, instead of at src_pt\r\n l0 = -src_dir.dot(src_dir.dot(r0) / src_dir_len2)\r\n l1 = l0 + src_dir\r\n R = l0 + r0\r\n\r\n #\r\n # Integral at the start of interval\r\n #\r\n # Start with l0 x R to get a direction vector with length of |l0|.|R|\r\n vect0 = numpy.cross(l0, R)\r\n\r\n # Divide by 'r0'\r\n r0_len = numpy.sqrt(r0.dot(r0))\r\n if not r0_len:\r\n return None # Target point coincides with \"src_pt\"\r\n vect0 /= r0_len\r\n\r\n #\r\n # Integral at the end of interval\r\n #\r\n # Start with l1 x R to get a direction vector with length of |l1|.|R|\r\n vect1 = numpy.cross(l1, R)\r\n\r\n # Divide by 'r1'\r\n r1_len = numpy.sqrt(r1.dot(r1))\r\n if not r1_len:\r\n return None # Target point coincides with \"src_pt + src_dir\"\r\n vect1 /= r1_len\r\n\r\n #\r\n # Combine both integrals\r\n #\r\n # Divide result by 'R^2', resulting:\r\n # |l|.|R| / |r| / |R|^2 = |l| / (|R|.|r|)\r\n R_len2 = R.dot(R)\r\n if not R_len2:\r\n return None # Target point lies on the source line\r\n\r\n B = (vect1 - vect0) / R_len2\r\n\r\n # Scale by a coefficient, like current, magnetic constant and 1/(4*pi)\r\n B *= coef\r\n\r\n emi_params[0] = B\r\n\r\n # Calculate the partial derivatives from Biot–Savart law \"R/sqrt(l^2 + R^2)^3\" (see above)\r\n # along \"l\" and \"R\" axes, then integrate each of them along 'l'.\r\n #\r\n # The individual gradient vector components are the values of these integrals. The 'l'\r\n # component is along the 'src_dir' direction and 'R' component is to the direction of its\r\n # perpendicular through 'tgt_pt'.\r\n\r\n # Gradient component along 'l' (substitute l with x):\r\n # int[ dF(x)/dx dx] = F(x) => gradBx = R/sqrt(x^2 + R^2)^3 - R/sqrt(x^2 + R^2)^3 + C\r\n # Finally:\r\n # R * (1/r1^3 - 1/r0^3)\r\n R_len = numpy.sqrt(R_len2)\r\n\r\n l_comp = R_len * ( 1 / r1_len ** 3 - 1 / r0_len ** 3)\r\n\r\n # Gradient component along 'R':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute R with x):\r\n # input: x / sqrt(x^2 + l^2)^3, result: - (2x^2 - l^2) / (x^2 + l^2)^(5/2)\r\n # Substitute back x to R, then l with x:\r\n # result: (x^2 - 2R^2) / sqrt(x^2 + R^2)^5\r\n # Use integral calculator https://www.integral-calculator.com/ (back R and x):\r\n # input: (x^2 - 2R^2) / sqrt(x^2 + R^2)^5, result: - (x^3 + 2xR^2) / ( R^2(x^2 + R^2)^(3/2) ) + C\r\n # Simplify (substitute back x to l):\r\n # - (l^3 + 2*l*R^2) / ( R^2(l^2 + R^2)^(3/2) ) = - l(l^2 + R^2 + R^2) / ( R^2 * r^3 ) =\r\n # = - l(r^2 + R^2) / ( R^2 * r^3 )\r\n # Finally:\r\n # - l1(r1^2 + R^2) / ( R^2 * r1^3 ) + l1(r1^2 + R^2) / ( R^2 * r0^3 )\r\n l0_len = numpy.sqrt(l0.dot(l0))\r\n if l0.dot(src_dir) < 0:\r\n l0_len = -l0_len\r\n l1_len = numpy.sqrt(l1.dot(l1))\r\n if l1.dot(src_dir) < 0:\r\n l1_len = -l1_len\r\n\r\n R_comp = -l1_len*(r1_len ** 2 + R_len2) / (R_len2 * r1_len ** 3)\r\n R_comp -= -l0_len*(r0_len ** 2 + R_len2) / (R_len2 * r0_len ** 3)\r\n\r\n # The '-' is to flip direction to point toward field magnitude increase\r\n l_comp *= -coef\r\n R_comp *= coef\r\n\r\n # Combine l_comp and R_comp into a Jacobian matrix\r\n emi_params[1] = build_jacobian(l_comp, R_comp, src_dir, R, B)\r\n\r\n return emi_params", "def approximate_nonlinear_vector_field(dataset_path):\n\n file_X0 = \"nonlinear_vectorfield_data_x0.txt\"\n names_X0 = ['X0_x', 'X0_y']\n data_X0 = pd.read_csv(dataset_path / file_X0, sep=' ', names=names_X0).to_numpy()\n plt.scatter(data_X0[:, 0], data_X0[:, 1])\n\n names_X1 = ['X1_x', 'X1_y']\n file_X1 = \"nonlinear_vectorfield_data_x1.txt\"\n data_X1 = pd.read_csv(dataset_path / file_X1, sep=' ', names=names_X1).to_numpy()\n plt.scatter(data_X1[:, 0], data_X1[:, 1])\n plt.title(\"Given data set X0 and X1\")\n plt.show()\n\n \"\"\"\n Following block calculates the approximate values using differential\n solver solve_ivp\n \"\"\"\n V = (data_X1 - data_X0) / 0.1\n approx_func_At = np.linalg.inv(data_X0.T @ data_X0) @ data_X0.T @ V\n approx_values = []\n for i in range(data_X0.shape[0]):\n sol = solve_ivp(fun=derivative_func, t_span=[0, 10], t_eval=[0.1],\n y0=data_X0[i, :], args=(approx_func_At,))\n approx_values.append(sol.y)\n approx_values = np.array(approx_values)\n approx_values = approx_values.reshape((2000, 2))\n\n \"\"\"\n We now plot the original data of X1 and the newly approximated data.\n \"\"\"\n plt.scatter(data_X1[:, 0], data_X1[:, 1])\n plt.scatter(approx_values[:, 0], approx_values[:, 1], c='green')\n plt.title(\"Given X1 and approximated values\")\n plt.title(\"Approximated vector field\")\n plt.show()\n\n \"\"\"\n We now plot the vector filed and the phase portrait.\n \"\"\"\n x, y = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10))\n u, v = np.zeros((10, 10)), np.zeros((10, 10))\n for i in range(0, 10):\n for j in range(0, 10):\n u[i, j] = approx_values.T[0, i]\n v[i, j] = approx_values.T[1, j]\n plt.quiver(x, y, u, v)\n plt.streamplot(x, y, u, v)\n plt.title(\"Approximated Vector field\")\n plt.show()\n\n \"\"\"\n Following block calculates the mean squared error of the X1 and calculate\n approximated values.\n \"\"\"\n MSE = np.square(data_X1 - approx_values).mean()\n print(MSE)", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def get_correction(d, a, hfov, img_x):\n\n width = 2 * d*math.tan((hfov/2)*math.pi/180) # in meters\n one_meter = img_x / width\n return int(a*one_meter)", "def epipoles_location(f_mat):\r\n u, s, vh = np.linalg.svd(f_mat)\r\n e_l = vh[-1, :]\r\n e_r = u[:, -1]\r\n # get x, y by dividing by w\r\n e_l = (e_l[0] / e_l[2], e_l[1] / e_l[2])\r\n e_r = (e_r[0] / e_r[2], e_r[1] / e_r[2])\r\n return e_l, e_r", "def photometric_calibration():\n pass", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def I_int(expt, z):\n u = kperp * r(z) / (2. * np.pi) # UV plane: |u| = d / lambda\n nu = expt['nu_line'] / (1. + z)\n fov = (1.02 / (nu * expt['Ddish']) * (3e8 / 1e6))**2.\n \n l = 3e8 / (nu * 1e6) # Wavelength (m)\n u_min = expt['Dmin'] / l\n u_max = expt['Dmax'] / l\n \n # New calc.\n n_u = expt['Ndish']*(expt['Ndish'] - 1.) * l**2. * np.ones(u.shape) \\\n / (2. * np.pi * (expt['Dmax']**2. - expt['Dmin']**2.) )\n n_u[np.where(u < u_min)] = 1. / INF_NOISE\n n_u[np.where(u > u_max)] = 1. / INF_NOISE\n \n # Interferometer multiplicity factor, /I/\n I = 4./9. * fov / n_u\n return I", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def detect_velocity(image):\n nonlocal prev, v_last\n curr_bgr = cv.warpPerspective(image, M, (160, 120))\n curr = cv.cvtColor(curr_bgr, cv.COLOR_BGR2GRAY)\n\n if prev is None:\n prev = curr\n v_last = 0.0\n return v_last, curr_bgr, np.zeros_like(image)\n\n flow = cv.calcOpticalFlowFarneback(\n prev, # Previous image\n curr, # Current image\n None, # Computed flow image that has the same size oas prev and type CV_32FC2.\n 0.5, # Specifies the image scale (<1) to build pyramids for each image.\n 3, # Number of pyramid layers including the initial image.\n 15, # winsize, averaging windows size.\n 3, # iterations, number of iterations the algorithm does at each pyramid level.\n 5, # standard deviation of the Gaussian that is used to smooth derivative\n 1.5,\n 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n\n v = mag * np.sin(ang)\n\n ######################\n ## Histogram for mag\n ar = np.arange(-20.0, 20.0, 0.50, dtype=np.float)\n his = np.histogram(v, bins=ar)\n\n for i, n in enumerate(his[0]):\n bgr = (255, 255, 0)\n if his[1][i] < 0:\n bgr = (0, 255, 255)\n\n #print('[{}] {} - {}'.format(i, n, his[1][i]))\n cv.rectangle( image, #curr_bgr,\n (i*2, HEIGHT),\n (i*2, HEIGHT - int(n / 10)),\n bgr, #(0, 255, 255),\n cv.FILLED)\n\n hsv = np.zeros_like(image)\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 1] = 255\n hsv[..., 2] = cv.normalize(np.abs(v), None, 0, 255, cv.NORM_MINMAX)\n hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n ##\n ######################\n\n v_abs = np.absolute(v)\n v = v[v_abs >= np.percentile(v_abs, VELOCITY_CUTOFF_PCT)]\n\n v_max = v_last + MAX_ACC\n v_min = v_last - MAX_ACC\n v = np.clip(v, v_min, v_max)\n if v.size > 0:\n v_avg = v.mean()\n else:\n if v_last > 0:\n v_avg = max(v_last - MAX_ACC, 0)\n elif v_last < 0:\n v_avg = min(v_last + MAX_ACC, 0)\n else:\n v_avg = 0\n\n prev = curr\n v_last = v_avg\n return v_last, curr_bgr, hsv_bgr", "def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def prediction(self, v, imu_meas):\n # YOUR CODE HERE\n pass", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def x(self) -> float:\n return self._ohms.imag", "def get_vertical_vector(q):\n P0, P1, P2, P3 = q\n P0_up = copy.deepcopy(P0)\n P0_up.depth = P0_up.depth - 1.0\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P0_up)\n v1 = (p1 - p0).norm()\n return v1", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def imu_get_euler(self):\n return self.imu.get_euler()", "def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):", "def calculate_ic(self):\n # dt:\n dt = self.E\n\n # dr:\n dr = np.sqrt(self.E ** 2 - (self.Q + self.L ** 2) / self.r ** 2)\n #print(dr)\n if np.isnan(dr):\n dr = 0\n #dr = self._check_dr_sign(self.alpha)\n\n # dtheta:\n omega = self.Q - self.L ** 2 * (np.cos(self.theta) / np.sin(self.theta)) ** 2\n if omega < 0:\n omega = np.abs(omega)\n dtheta = np.sqrt(omega) / self.r**2\n if self.eta < np.pi / 2:\n dtheta *= -1\n\n # dphi:\n dphi = self.L / (self.r * np.sin(self.theta)) ** 2\n\n return dt, dr, dtheta, dphi", "def vi2ev(v,mu):\n return 0.5*mu*mp*v**2/eV2J", "def cur_approx(self):\n return invert_normal_params(self.Q, self.r)", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def expected_improvement(ymin, mu, sig):\n p_imp = norm.cdf((ymin-mu)/sig)\n p_ymin = norm.pdf((ymin-mu)/sig)\n ei = (ymin-mu)*p_imp + sig*p_ymin\n return ei", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def I(x, y, l, p):\n \n return 0.5 / (mu * c) * A0**2 * ( u(x, y, l, p) )**2", "def dilate_pvm(image, selem):\n assert len(image.shape) == len(selem.shape), \"Image and region must have \" \\\n \"identical dimensionality.\"\n dilated = ndi.correlate(image.astype(np.float64), selem.astype(np.float64),\n mode='reflect')\n return np.minimum(dilated, 1.0)", "def get_y(EQ, M):\n return (EQ[1] * ((-1) * EQ[0] * M[0] + EQ[1] * M[1]) - EQ[0] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def IK_geometric(dh_params, pose):\n pass", "def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y", "def ev2ve(eV): \n return cv*np.sqrt( eV*(eV+2.e0*mec2))/(eV+mec2)", "def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.__ComputeApproximateVals(cameraPoints, groundPoints)\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def emissivity_calc (pv, ndvi):\n ndvi_dest = ndvi.copy()\n ndvi_dest[np.where(ndvi < 0)] = 0.991\n ndvi_dest[np.where((0 <= ndvi) & (ndvi < 0.2)) ] = 0.966\n ndvi_dest[np.where((0.2 <= ndvi) & (ndvi < 0.5)) ] = (0.973 * pv[np.where((0.2 <= ndvi) & (ndvi < 0.5)) ]) + (0.966 * (1 - pv[np.where((0.2 <= ndvi) & (ndvi < 0.5)) ]) + 0.005)\n ndvi_dest[np.where(ndvi >= 0.5)] = 0.973\n return ndvi_dest", "def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs", "def sepinsky_A_parameter(eccentricity=0.0, angular_velocity_ratio=1.0, true_anomaly=numpy.pi):\n numerator = angular_velocity_ratio**2 * (1.0 + eccentricity)**4\n denominator = (1.0 + eccentricity * numpy.cos(true_anomaly))**3\n return numerator / denominator", "def compute_hand_eye_calibration_BASELINE(dq_B_H_vec, dq_W_E_vec, config):\n assert len(dq_W_E_vec) == len(dq_B_H_vec)\n num_poses = len(dq_W_E_vec)\n\n start_time = timeit.default_timer()\n\n # Enforce the same sign of the rotation quaternion.\n for i in range(num_poses):\n dq_B_H = dq_B_H_vec[i]\n dq_W_E = dq_W_E_vec[i]\n if ((dq_W_E.q_rot.w < 0. and dq_B_H.q_rot.w > 0.) or\n (dq_W_E.q_rot.w > 0. and dq_B_H.q_rot.w < 0.)):\n dq_W_E_vec[i].dq = -dq_W_E_vec[i].dq.copy()\n\n # 0.0 Reject pairs whose motion is not informative,\n # i.e. their screw axis dot product is large\n if config.prefilter_poses_enabled:\n dq_B_H_vec_filtered, dq_W_E_vec_filtered = prefilter_using_screw_axis(\n dq_B_H_vec, dq_W_E_vec, config.prefilter_dot_product_threshold)\n else:\n dq_B_H_vec_filtered = dq_B_H_vec\n dq_W_E_vec_filtered = dq_W_E_vec\n num_poses_after_filtering = len(dq_W_E_vec_filtered)\n\n best_idx = -1\n best_num_inliers = config.min_num_inliers - 1\n best_dq_W_E_vec_inlier = []\n best_dq_B_H_vec_inlier = []\n\n if config.enable_exhaustive_search:\n print(\"Do exhaustive search to find biggest subset of inliers...\")\n else:\n print(\"Search for first set of inliers bigger than {}...\".format(\n config.min_num_inliers))\n\n # 0.1 Reject pairs where scalar parts of dual quaternions do not match.\n # Loop over all the indices to find an index of a pose pair.\n for j in range(num_poses_after_filtering):\n # Re-align all dual quaternion to the j-th dual quaternion.\n dq_W_E_vec_aligned = align_paths_at_index(dq_W_E_vec_filtered, j)\n dq_B_H_vec_aligned = align_paths_at_index(dq_B_H_vec_filtered, j)\n\n dq_W_E_vec_inlier = []\n dq_B_H_vec_inlier = []\n\n # Loop over the indices again starting at the first index to find either:\n # - The first set of inliers of at least size min_num_inliers\n # OR\n # - The largest set of inliers using an exhaustive search\n for i in range(0, num_poses_after_filtering):\n dq_W_E = dq_W_E_vec_aligned[i]\n dq_B_H = dq_B_H_vec_aligned[i]\n scalar_parts_W_E = dq_W_E.scalar()\n scalar_parts_B_H = dq_B_H.scalar()\n # Append the inliers to the filtered dual quaternion vectors.\n if np.allclose(scalar_parts_W_E.dq, scalar_parts_B_H.dq, atol=1e-2):\n dq_W_E_vec_inlier.append(dq_W_E)\n dq_B_H_vec_inlier.append(dq_B_H)\n\n assert len(dq_W_E_vec_inlier) == len(dq_B_H_vec_inlier)\n\n if config.enable_exhaustive_search:\n has_the_most_inliers = (len(dq_W_E_vec_inlier) > best_num_inliers)\n if has_the_most_inliers:\n best_num_inliers = len(dq_W_E_vec_inlier)\n best_idx = j\n best_dq_W_E_vec_inlier = copy.deepcopy(dq_W_E_vec_inlier)\n best_dq_B_H_vec_inlier = copy.deepcopy(dq_B_H_vec_inlier)\n print(\"Found new best start idx: {} number of inliers: {}\".format(\n best_idx, best_num_inliers))\n else:\n has_enough_inliers = (len(dq_W_E_vec_inlier) > config.min_num_inliers)\n if has_enough_inliers:\n best_idx = j\n best_num_inliers = len(dq_W_E_vec_inlier)\n break\n\n assert (j + 1) < num_poses_after_filtering, (\n \"Reached over all filtered poses and couldn't find \"\n \"enough inliers. num_samples: {}, num_inliers: {}\".format(\n num_poses_after_filtering, len(dq_W_E_vec_inlier)))\n\n if config.enable_exhaustive_search:\n assert best_idx != -1, \"Not enough inliers found!\"\n dq_W_E_vec_inlier = best_dq_W_E_vec_filtered\n dq_B_H_vec_inlier = best_dq_B_H_vec_inlier\n\n aligned_dq_B_H = align_paths_at_index(dq_B_H_vec_inlier, best_idx)\n aligned_dq_W_E = align_paths_at_index(dq_W_E_vec_inlier, best_idx)\n\n print(\"Best start idx: {}\".format(best_idx))\n print(\"Removed {} outliers from the (prefiltered) poses.\".format(\n len(dq_B_H_vec_filtered) - len(dq_B_H_vec_inlier)))\n print(\"Running the hand-eye calibration with the remaining {} pairs of \"\n \"poses\".format(len(dq_B_H_vec_inlier)))\n\n try:\n # Compute hand-eye calibration on the inliers.\n (dq_H_E_estimated,\n singular_values,\n bad_singular_values) = compute_hand_eye_calibration(\n dq_B_H_vec_inlier, dq_W_E_vec_inlier,\n config.hand_eye_calibration_scalar_part_equality_tolerance)\n dq_H_E_estimated.normalize()\n except:\n print(\"\\n\\n Hand-eye calibration FAILED! \"\n \"algorithm_name: {} exception: \\n\\n\".format(\n config.algorithm_name, sys.exc_info()[0]))\n end_time = timeit.default_timer()\n runtime = end_time - start_time\n return (False, None, (None, None),\n None, num_poses_after_filtering, runtime, None, None)\n\n # Evaluate hand-eye calibration either on all poses aligned by the\n # sample index or only on the inliers.\n if config.ransac_evaluate_refined_model_on_inliers_only:\n (poses_B_H, poses_W_H) = get_aligned_poses(dq_B_H_vec_inlier,\n dq_W_E_vec_inlier,\n dq_H_E_estimated)\n else:\n # TODO(mfehr): There is some redundancy here, fix it!\n aligned_dq_B_H = align_paths_at_index(dq_B_H_vec, best_idx)\n aligned_dq_W_E = align_paths_at_index(dq_W_E_vec, best_idx)\n (poses_B_H, poses_W_H) = get_aligned_poses(aligned_dq_B_H,\n aligned_dq_W_E,\n dq_H_E_estimated)\n\n\n (rmse_position,\n rmse_orientation,\n inlier_flags) = evaluate_alignment(poses_B_H, poses_W_H, config, config.visualize)\n\n end_time = timeit.default_timer()\n runtime = end_time - start_time\n\n pose_vec = dq_H_E_estimated.to_pose()\n print(\"Solution found by aligned based on idx: {}\\n\"\n \"\\t\\tNumber of inliers: {}\\n\"\n \"\\t\\tRMSE position: {:10.4f}\\n\"\n \"\\t\\tRMSE orientation: {:10.4f}\\n\"\n \"\\t\\tdq_H_E: {}\\n\"\n \"\\t\\tpose_H_E: {}\\n\"\n \"\\t\\tTranslation norm: {:10.4f}\".format(\n best_idx, best_num_inliers, rmse_position,\n rmse_orientation, dq_H_E_estimated,\n pose_vec, np.linalg.norm(pose_vec[0:3])))\n\n return (True, dq_H_E_estimated,\n (rmse_position, rmse_orientation),\n best_num_inliers, num_poses_after_filtering, runtime, singular_values, bad_singular_values)", "def _extrapolate(self):\n maxrho = self.maxrho\n x = np.linspace(1.001, maxrho, int(self.nrho/5))\n rho1 = self.rho # rho up to 1\n dec_l = 0.01\n ni_ov = np.zeros((self.nion, len(x)), dtype=float)\n ninew = np.zeros((self.nion, self.nrho+len(x)),dtype=float)\n ne_ov1 = self.ne[self.nrho-1]*np.exp(-((x-1.)/dec_l))\n te_ov1 = self.te[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n ti_ov1 = self.ti[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n vt_ov1 = self.vt[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n for i in range(self.nion):\n ni_ov[i,:] = self.ni[i,self.nrho-1]*np.exp(-(x-1.)/dec_l)\n ninew[i,:] = np.concatenate([self.ni[i,:], ni_ov[i,:]])\n self.ni = ninew\n self.rho = np.concatenate([rho1, x])\n self.nrho = len(rho1)+len(x)\n self.ne = np.concatenate([self.ne, ne_ov1])\n self.te = np.concatenate([self.te, te_ov1])\n self.ti = np.concatenate([self.ti, ti_ov1])\n self.vt = np.concatenate([self.vt, vt_ov1])", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def log_operator(SE3): \n #print('SE3 log: ', SE3)\n R = SE3[:3,:3]\n t = SE3[:3,3]\n theta = arccos(0.5*(trace(R)-1)) # radians\n lnR = 0.5*(theta/sin(theta))*(R-R.T)\n omega = vee(lnR) # vee operator\n omega_skew_sym = lnR#skew_symmetric(omega.reshape(-1,))\n \n if theta <= 1e-10:\n V = eye(3)\n else:\n V = eye(3) + \\\n (theta**-2)*(1-cos(theta))*omega_skew_sym + \\\n (theta**-3)*(theta-sin(theta))*(omega_skew_sym @ omega_skew_sym)\n neu = inv(V) @ t\n\n # if theta <= 1e-10:\n # Vinv = eye(3)\n # else:\n # theta_half = 0.5*theta \n # Vinv = eye(3) - 0.5*omega_skew_sym + \\\n # (theta**-2)*(1- (theta_half*cos(theta_half)/sin(theta_half)))*(omega_skew_sym @ omega_skew_sym)\n # neu = Vinv @ t\n\n return np.hstack((neu, omega)).reshape(-1,1)", "def GetEigenvector(self, i):\n return _hypre.HypreAME_GetEigenvector(self, i)", "def _gv(self):\n return self.y - self.err_inf", "def xx(self):\n return self.exterior[:, 0]", "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def Q_e(params):\n return (params['Q_e_real'].value +\n 1j * params['Q_e_imag'].value)", "def partial_y(img):\n\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.array([[0.5], [0], [-0.5]])\n out = conv(img, kernel)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def calc_emi_dif(tgt_pt, src_pt, src_dir, coef=1):\r\n emi_params = [\r\n numpy.zeros(tgt_pt.shape[-1], tgt_pt.dtype),\r\n numpy.zeros((tgt_pt.shape[-1], tgt_pt.shape[-1]), tgt_pt.dtype)\r\n ]\r\n\r\n # 'r' vector\r\n r = tgt_pt - src_pt\r\n\r\n src_dir_len2 = src_dir.dot(src_dir)\r\n if not src_dir_len2:\r\n return emi_params # Zero length, return zero EMI params\r\n\r\n # Vector projections of \"r\" in the direction of \"src_dir\"\r\n l = src_dir.dot(src_dir.dot(r) / src_dir_len2)\r\n R = r - l\r\n\r\n r_len = numpy.sqrt(r.dot(r))\r\n if not r_len:\r\n return None # Target point coincides with \"src_pt\"\r\n\r\n # Calculate the differential Biot–Savart law (https://en.wikipedia.org/wiki/Biot–Savart_law):\r\n # dl x r / r^3\r\n B = numpy.cross(src_dir, r) / r_len ** 3\r\n\r\n # Scale by a coefficient, like current, magnetic constant and 1/(4*pi)\r\n B *= coef\r\n\r\n emi_params[0] = B\r\n\r\n # Calculate the partial derivatives from Biot–Savart law \"R/sqrt(l^2 + R^2)^3\" (see calc_emi())\r\n # along \"l\" and \"R\" axes.\r\n\r\n # Gradient component along 'l':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute l with x):\r\n # input: R / sqrt(x^2 + R^2)^3, result: -3Rx / (x^2 + R^2)^(5/2)\r\n # Substitute back x to l, then sqrt(l^2 + R^2) to r:\r\n # result: -3 * R * l / r^5\r\n R_len2 = R.dot(R)\r\n l_len2 = l.dot(l)\r\n R_len = numpy.sqrt(R_len2)\r\n l_len = numpy.sqrt(l_len2)\r\n if l.dot(src_dir) < 0:\r\n l_len = -l_len\r\n\r\n l_comp = -3 * R_len * l_len / r_len ** 5\r\n\r\n # Gradient component along 'R':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute R with x):\r\n # input: x / sqrt(x^2 + l^2)^3, result: - (2x^2 - l^2) / (x^2 + l^2)^(5/2)\r\n # Substitute back x to R, then sqrt(l^2 + R^2) to r:\r\n # result: (l^2 - 2R^2) / r^5\r\n\r\n R_comp = (l_len2 - 2 * R_len2) / r_len ** 5\r\n\r\n l_comp *= coef\r\n R_comp *= coef\r\n\r\n # Combine l_comp and R_comp into a Jacobian matrix\r\n emi_params[1] = build_jacobian(l_comp, R_comp, src_dir, R, B)\r\n\r\n return emi_params", "def compute_representers(V, inertia, rhs):\n\tM = inertia\n\n\tx = fem.Function(V)\n\tx2 = fem.Function(V)\n\n\tfem.solve(M, x2.vector(), rhs.vector())\n\n\t# H^2 metric\n\tv = fem.TestFunction(V)\n\tx3 = x2*v*dx()\n\tM3x = fem.assemble(x3)\n\tfem.solve(M,x.vector(),M3x)\n\n\n\t# Compute the norm\n\tH1 = x2.vector().inner(rhs.vector())\n\tH2 = x.vector().inner(rhs.vector())\n\n\treturn x2, x, H1, H2", "def propiosObservable(obs):\n for i in range(len(obs)):\n for j in range(len(obs[0])):\n obs[i][j]=complex(obs[i][j][0],obs[i][j][1])\n a=np.array(obs)\n x,v = np.linalg.eig(a)\n valPropios = [(c.real,c.imag) for c in x]\n vectPropios = [[(c.real,c.imag) for c in y]for y in v]\n return valPropios,vectPropios", "def get_mi_mvn(x, y):\n\n d = x.shape[1]\n\n # hx = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(x.T)))\n # hy = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(y.T)))\n # hxy = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(x.T, y=y.T)))\n # mi = hx + hy - hxy\n\n # hx = 0.5 * log(det(2*np.pi*np.e*np.cov(x.T)))\n # hy = 0.5 * log(det(2*np.pi*np.e*np.cov(y.T)))\n # hxy = 0.5 * log(det(2*np.pi*np.e*np.cov(np.c_[x,y].T)))\n hx = get_h_mvn(x)\n hy = get_h_mvn(y)\n hxy = get_h_mvn(np.c_[x,y])\n mi = hx + hy - hxy\n\n # mi = 0.5 * (log(det(np.cov(x.T))) + log(det(np.cov(y.T))) - log(det(np.cov(np.c_[x,y].T))))\n\n return mi", "def innovation(observation: np.ndarray, observation_predicted: np.ndarray) -> np.ndarray:\n return observation - observation_predicted", "def ivp(self):\n if self.__ivp is None:\n self.__ivp = ivp.IVP(self.evaluate_rhs, self.evaluate_jacobian)\n return self.__ivp", "def obj(k_next) : \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec", "def Iq(q, lorentz_scale, porod_scale, cor_length, porod_exp, lorentz_exp):\n with errstate(divide='ignore'):\n porod = porod_scale / q**porod_exp\n lorentz = lorentz_scale / (1.0 + (q * cor_length)**lorentz_exp)\n inten = porod + lorentz\n return inten", "def E(q, r0, x, y):\n den = np.hypot(x - r0[0], y - r0[1]) ** 3\n return q * (x - r0[0]) / den, q * (y - r0[1]) / den", "def localized_E(E1, i, j, x, y):\n oldval = x[i, j]\n newval = oldval * -1 # flip\n # local computations\n E2 = E1 - (h * oldval) + (h * newval)\n E2 = E2 + (eta * y[i, j] * oldval) - (eta * y[i, j] * newval)\n adjacent = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n neighbors = [x[i + di, j + dj] for di, dj in adjacent\n if is_valid(i + di, j + dj, x.shape)]\n E2 = E2 + beta * sum(a * oldval for a in neighbors)\n E2 = E2 - beta * sum(a * newval for a in neighbors)\n return oldval, newval, E1, E2", "def getEta(self, pose):\n vector_x = np.cos(self.ori) * (pose.x - self.pos.x) + np.sin(self.ori) * (pose.y - self.pos.y)\n vector_y = -np.sin(self.ori) * (pose.x - self.pos.x) + np.cos(self.ori) * (pose.y - self.pos.y)\n eta = math.atan2(vector_y, vector_x)\n return eta", "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def get_pose_estimation(self, img):\n\n # Convert image to a 1D numpy array\n input_data = np.expand_dims(img.copy(), axis=0)\n\n # check the type of the input tensor\n floating_model = self.input_details[0]['dtype'] == np.float32\n if floating_model:\n input_data = (np.float32(input_data) - 127.5) / 127.5\n\n # Setting the value of the input tensor\n self.interpreter.set_tensor(self.input_details[0]['index'], input_data)\n\n # Run the computation\n self.interpreter.invoke()\n\n # Extract output data from the interpreter\n output_data = self.interpreter.get_tensor(self.output_details[0]['index'])\n offset_data = self.interpreter.get_tensor(self.output_details[1]['index'])\n\n # Heatmaps contains the coincidence of keypoint, it can be used to locate the approximate location.\n heatmaps = np.squeeze(output_data)\n # Offset Vectors contains the exact position of each keypoint. First 17 layers correspond to the x\n # coordinates and the last 17 correspond to the y coordinates\n offsets = np.squeeze(offset_data)\n\n pose = get_keypoints_positions(heatmaps, offsets)\n # Show image with pose\n #cv2.imshow(\"frame\", cv2.resize(self.draw_kps(img, pose), (500, 500)))\n return pose", "def value_inv(self, theta):\n # diag_gamma = np.dot(theta.T, self.X.T)\n # logistic_term = self.logistic_fn(diag_gamma)\n # diag_gamma = logistic_term * (1.0 - logistic_term)\n # diag_gamma_inv = 1.0 / diag_gamma\n # gamma_inv = np.diag(diag_gamma_inv)\n # inv_mat = np.linalg.pinv(gamma_inv + self.XXt)\n # return self.alpha*(np.eye(self.dim) - np.dot(np.dot(self.X.T, inv_mat), self.X))\n G = self.value(theta) # d*d matrix inversion\n return np.linalg.pinv(G)", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def eclogite_foliated():\n\n rho = 3300.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 203.45; C[0,1] = 67.76; C[0,2] = 64.47; C[0,3] = 0.08; C[0,4] = 1.9; C[0,5] = -0.4\n C[1,0] = C[0,1]; C[1,1] = 220.58; C[1,2] = 63.65; C[1,3] = 0.46; C[1,4] = 0.59; C[1,5] = 0.06\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 189.75; C[2,3] = 0.13; C[2,4] = 0.95; C[2,5] = -0.2\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 66.32; C[3,4] = -0.27; C[3,5] = 0.73\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.77; C[4,5] = -0.02\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 70.75\n\n return C, rho", "def _measmod_ekf0(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def equilibrium_ionization(self):\n # Solve system of equations using singular value decomposition\n _, _, V = np.linalg.svd(self._rate_matrix.value)\n # Select columns of V with smallest eigenvalues (returned in descending order)\n # NOTE: must take the absolute value as the SVD solution is only accurate up\n # to the sign. We require that the solutions must be positive.\n ioneq = np.fabs(V[:, -1, :])\n ioneq /= ioneq.sum(axis=1)[:, np.newaxis]\n\n return u.Quantity(ioneq)", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def get_x(EQ, M):\n return (EQ[0] * (EQ[0] * M[0] - EQ[1] * M[1]) - EQ[1] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)", "def find_position(self, xv, yv):\n # Convert position in spheric coord\n phi = xv*self.FOV_img/360/self.img_res\n theta = yv*self.FOV_img_Y/180/self.img_res_Y\n phi2 = phi+(360-self.FOV_img)/2\n theta2 = theta+(180-self.FOV_img_Y)/2\n\n u, v, w = spheric2cart(np.radians(theta2), np.radians(phi2)) # give cartesian coord of pixel\n\n # ignore errors due to /0 -> inf, -inf\n # divide (w/v) and invalid arctan2()\n with np.errstate(all='ignore'): # OPTIMIZE: see comment about pi = -pi and don't matter if -0 or 0 -> just replace by pi\n beta = -np.arctan(w/v)\n# beta2 = -np.arctan2(w, v)\n\n# v2 = np.dot(rotation_matrix(beta), [u, v, w]) # take 3*3 created matrix and aplly to vector\n matrix = rotation_matrix(beta)\n u2 = matrix[0, 0]*u\n v2 = matrix[1, 1]*v+matrix[1, 2]*w\n w2 = matrix[2, 1]*v+matrix[2, 2]*w\n _, seen_angle = cart2spheric(u2, v2, w2) # return phi in equator \"projection\"\n\n seen_angle = np.degrees(seen_angle)\n seen_angle = np.mod(seen_angle, 360) # define phi [0, 360]\n\n# seen_angle[seen_angle > 360] -= 360\n deviated_angle = np.zeros(seen_angle.shape)\n deviated_angle[seen_angle < 180] = self.interpolation(seen_angle[seen_angle < 180])\n deviated_angle[seen_angle >= 180] = 360 - self.interpolation(360-seen_angle[seen_angle >= 180])\n# np.flip(deviated_angle, 1) \" mais probleme overlap entre left et right\n\n theta = pi/2# *np.ones(deviated_angle.shape)\n phi = np.radians(deviated_angle)\n u3, v3, w3 = spheric2cart(theta, phi) #get cart coord of deviated pixel\n\n matrix = rotation_matrix(-beta)\n u4 = matrix[0, 0]*u3\n v4 = matrix[1, 1]*v3+matrix[1, 2]*w3\n w4 = matrix[2, 1]*v3+matrix[2, 2]*w3\n\n theta, phi = cart2spheric(u4, v4, w4) #give spheric coord of deviated pixel\n\n theta, phi = np.degrees(theta), np.degrees(phi)\n\n phi -= (360-self.FOV_img)/2\n theta -= (180-self.FOV_img_Y)/2\n\n with np.errstate(all='ignore'): # OPTIMIZE\n phi = np.mod(phi, 360) # define phi [0, 360]\n theta = np.mod(theta, 180) # define phi [0, 360]\n\n phi[phi == 360] = 0\n xv2 = phi*360/self.FOV_img*self.img_res\n yv2 = theta*180/self.FOV_img_Y*self.img_res_Y #give deviated angle pixel position\n\n xv2[np.isnan(xv2)] = -1\n yv2[np.isnan(yv2)] = -1\n\n xv2 = np.array(xv2, dtype=int)\n yv2 = np.array(yv2, dtype=int)\n\n return xv2, yv2", "def value(self):\n updets = self._dets[0][:, :, self._det_map[0]]\n dndets = self._dets[1][:, :, self._det_map[1]]\n upref = np.amax(self._dets[0][1])\n dnref = np.amax(self._dets[1][1])\n phases = updets[0] * dndets[0]\n logvals = updets[1] - upref + dndets[1] - dnref\n\n wf_val = np.einsum(\n \"d,id->i\", self.parameters[\"det_coeff\"], phases * np.exp(logvals)\n )\n\n wf_sign = self.get_phase(wf_val)\n wf_logval = np.log(np.abs(wf_val)) + upref + dnref\n return wf_sign, wf_logval", "def _affine_coordinates(self, Vrep_object):\n if '_affine_coordinates_pivots' not in self.__dict__:\n v_list = [ vector(v) for v in self.Vrepresentation() ]\n if len(v_list)>0:\n origin = v_list[0]\n v_list = [ v - origin for v in v_list ]\n coordinates = matrix(v_list)\n self._affine_coordinates_pivots = coordinates.pivots()\n \n v = list(Vrep_object)\n if len(v) != self.ambient_dim():\n raise ValueError('Incorrect dimension: '+str(v))\n\n return vector(self.field(), [ v[i] for i in self._affine_coordinates_pivots ])", "def epidote():\n\n rho = 3465.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 211.5; C[0,1] = 65.6; C[0,2] = 43.2; C[0,3] = 0.; C[0,4] = -6.5; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 239.; C[1,2] = 43.6; C[1,3] = 0.; C[1,4] = -10.4; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 202.1; C[2,3] = 0.; C[2,4] = -20.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.1; C[3,4] = 0.; C[3,5] = -2.3\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 43.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.5\n\n return C, rho", "def imwofz_nonvector(x, y):\n ncut=27\n xy=x*y \n xyp=2.0*xy/jnp.pi \n exx=jnp.exp(-x*x) \n f=-exx*erfcx(y)*jnp.sin(2.0*xy)+x/jnp.pi*exx*jnp.sinc(xyp) \n n=jnp.arange(1,ncut+1) \n n2=n*n \n vec0=0.5*n/(0.25*n2+ y*y) \n vec1=jnp.exp(-(0.25*n2+x*x)) \n vec4=jnp.exp(-(0.5*n+x)*(0.5*n+x)) \n vec5=jnp.exp(-(0.5*n-x)*(0.5*n-x)) \n Sigma1=jnp.dot(vec0,vec1)\n Sigma4=jnp.dot(vec0,vec4)\n Sigma5=jnp.dot(vec0,vec5)\n f = f + 1.0/jnp.pi*(y*jnp.sin(2.0*xy)*Sigma1 + 0.5*(Sigma5-Sigma4))\n \n return f", "def end_effectors_pos(self):\n def relative_pos_in_egocentric_frame(physics):\n end_effector = physics.bind(self._entity.end_effectors).xpos\n torso = physics.bind(self._entity.root_body).xpos\n xmat = np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))\n return np.reshape(np.dot(end_effector - torso, xmat), -1)\n return observable.Generic(relative_pos_in_egocentric_frame)", "def each_evidence(y_, f, fh, v, s, vh, N, D):\n epsilon = 1e-5\n alpha = 1.0\n beta = 1.0\n lam = alpha / beta\n tmp = (vh @ (f @ np.ascontiguousarray(y_)))\n for _ in range(11):\n # should converge after at most 10 steps\n # typically converge after two or three steps\n gamma = (s / (s + lam)).sum()\n # A = v @ np.diag(alpha + beta * s) @ v.transpose() # no need to compute A\n # A_inv = v @ np.diag(1.0 / (alpha + beta * s)) @ v.transpose() # no need to compute A_inv\n m = v @ (tmp * beta / (alpha + beta * s))\n alpha_de = (m * m).sum()\n alpha = gamma / (alpha_de + epsilon)\n beta_de = ((y_ - fh @ m) ** 2).sum()\n beta = (N - gamma) / (beta_de + epsilon)\n new_lam = alpha / beta\n if np.abs(new_lam - lam) / lam < 0.01:\n break\n lam = new_lam\n evidence = D / 2.0 * np.log(alpha) \\\n + N / 2.0 * np.log(beta) \\\n - 0.5 * np.sum(np.log(alpha + beta * s)) \\\n - beta / 2.0 * (beta_de + epsilon) \\\n - alpha / 2.0 * (alpha_de + epsilon) \\\n - N / 2.0 * np.log(2 * np.pi)\n return evidence / N, alpha, beta, m", "def _measmod_ekf1(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1 - ivp.jacobian(t, h0 @ x) @ h0\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def yy(self):\n return self.exterior[:, 1]", "def solutionCovariance(self):\n return self.standardError2()*self.AtAinv", "def get_ivar(data, s):\n return data.ivar.value / (1 + s**2 * data.ivar.value)", "def impurity(x,iw):\n Vi = 0\n ai = 2*a0+iw*1.5*a0+0.5*(iw-1)*a0\n \n #Impurity size\n size = 0.25*1.5*a0\n if (x > ai-size and x < ai):\n Vi = 300/Eh\n elif(x == ai or x == ai-size):\n Vi = 150/Eh\n \n return Vi", "def compute_hand_eye_calibration(dq_B_H_vec_inliers, dq_W_E_vec_inliers,\n scalar_part_tolerance=1e-2,\n enforce_same_non_dual_scalar_sign=True):\n n_quaternions = len(dq_B_H_vec_inliers)\n\n # Verify that the first pose is at the origin.\n assert np.allclose(dq_B_H_vec_inliers[0].dq,\n [0., 0., 0., 1.0, 0., 0., 0., 0.],\n atol=1.e-8), dq_B_H_vec_inliers[0]\n assert np.allclose(dq_W_E_vec_inliers[0].dq,\n [0., 0., 0., 1.0, 0., 0., 0., 0.],\n atol=1.e-8), dq_W_E_vec_inliers[0]\n\n if enforce_same_non_dual_scalar_sign:\n for i in range(n_quaternions):\n dq_W_E = dq_W_E_vec_inliers[i]\n dq_B_H = dq_B_H_vec_inliers[i]\n if ((dq_W_E.q_rot.w < 0. and dq_B_H.q_rot.w > 0.) or\n (dq_W_E.q_rot.w > 0. and dq_B_H.q_rot.w < 0.)):\n dq_W_E_vec_inliers[i].dq = -dq_W_E_vec_inliers[i].dq.copy()\n\n # 0. Stop alignment if there are still pairs that do not have matching\n # scalar parts.\n for j in range(n_quaternions):\n dq_B_H = dq_W_E_vec_inliers[j]\n dq_W_E = dq_B_H_vec_inliers[j]\n\n scalar_parts_B_H = dq_B_H.scalar()\n scalar_parts_W_E = dq_W_E.scalar()\n\n assert np.allclose(scalar_parts_B_H.dq, scalar_parts_W_E.dq,\n atol=scalar_part_tolerance), (\n \"Mismatch of scalar parts of dual quaternion at idx {}:\"\n \" dq_B_H: {} dq_W_E: {}\".format(j, dq_B_H, dq_W_E))\n\n # 1.\n # Construct 6n x 8 matrix T\n t_matrix = setup_t_matrix(dq_B_H_vec_inliers, dq_W_E_vec_inliers)\n\n # 2.\n # Compute SVD of T and check if only two singular values are almost equal to\n # zero. Take the corresponding right-singular vectors (v_7 and v_8)\n U, s, V = np.linalg.svd(t_matrix)\n\n # Check if only the last two singular values are almost zero.\n bad_singular_values = False\n for i, singular_value in enumerate(s):\n if i < 6:\n if singular_value < 5e-1:\n bad_singular_values = True\n else:\n if singular_value > 5e-1:\n bad_singular_values = True\n v_7 = V[6, :].copy()\n v_8 = V[7, :].copy()\n # print(\"v_7: {}\".format(v_7))\n # print(\"v_8: {}\".format(v_8))\n\n # 3.\n # Compute the coefficients of (35) and solve it, finding two solutions for s.\n u_1 = v_7[0:4].copy()\n u_2 = v_8[0:4].copy()\n v_1 = v_7[4:8].copy()\n v_2 = v_8[4:8].copy()\n # print(\"u_1: {}, \\nu_2: {}, \\nv_1: {}, \\nv_2: {}\".format(u_1, u_2, v_1, v_2))\n\n a = np.dot(u_1.T, v_1)\n assert a != 0.0, \"This would involve division by zero.\"\n b = np.dot(u_1.T, v_2) + np.dot(u_2.T, v_1)\n c = np.dot(u_2.T, v_2)\n # print(\"a: {}, b: {}, c: {}\".format(a, b, c))\n square_root_term = b * b - 4.0 * a * c\n\n if square_root_term < -1e-2:\n assert False, \"square_root_term is too negative: {}\".format(\n square_root_term)\n if square_root_term < 0.0:\n square_root_term = 0.0\n s_1 = (-b + np.sqrt(square_root_term)) / (2.0 * a)\n s_2 = (-b - np.sqrt(square_root_term)) / (2.0 * a)\n # print(\"s_1: {}, s_2: {}\".format(s_1, s_2))\n\n # 4.\n # For these two s values, compute s^2*u_1^T*u_1 + 2*s*u_1^T*u_2 + u_2^T*u_2\n # From these choose the largest to compute lambda_2 and then lambda_1\n solution_1 = s_1 * s_1 * np.dot(u_1.T, u_1) + 2.0 * \\\n s_1 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)\n solution_2 = s_2 * s_2 * np.dot(u_1.T, u_1) + 2.0 * \\\n s_2 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)\n\n if solution_1 > solution_2:\n assert solution_1 > 0.0, solution_1\n lambda_2 = np.sqrt(1.0 / solution_1)\n lambda_1 = s_1 * lambda_2\n else:\n assert solution_2 > 0.0, solution_2\n lambda_2 = np.sqrt(1.0 / solution_2)\n lambda_1 = s_2 * lambda_2\n # print(\"lambda_1: {}, lambda_2: {}\".format(lambda_1, lambda_2))\n\n # 5.\n # The result is lambda_1*v_7 + lambda_2*v_8\n dq_H_E = DualQuaternion.from_vector(lambda_1 * v_7 + lambda_2 * v_8)\n # Normalize the output, to get rid of numerical errors.\n dq_H_E.normalize()\n\n if (dq_H_E.q_rot.w < 0.):\n dq_H_E.dq = -dq_H_E.dq.copy()\n return (dq_H_E, s, bad_singular_values)", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def test_y_generate():\n a = Attractor()\n #say x, y, z = [0.1, 0.0, 0.0]\n\n dx = (10.0 * (0.0 - 0.1)) * (80.0-0.0)/10000 + 0.1\n dy = (0.1 * (28 - 0.0) - 0.0) * (80.0-0.0)/10000 + 0.0\n dz = ((0.1 * 0.0) - (8/3 * 0.0)) * (80.0-0.0)/10000 + 0.0\n ex_1 = np.array([dx, dy, dz])\n\n dx2 = (10.0 * (dy - dx)) * (80.0-0.0)/10000.0 + dx \n dy2 = (dx * (28.0 - dz) - dy) * (80.0-0.0)/10000.0 + dy\n dz2 = ((dx * dy) - (8/3 * dz)) * (80.0-0.0)/10000.0 + dz\n ex_2 = np.array([dx2, dy2, dz2])\n\n dx3 = (10.0 * (dy2 - dx2)) * (80.0-0.0)/10000.0 + dx2\n dy3 = (dx2 * (28.0 - dz2) - dy2) * (80.0-0.0)/10000.0 + dy2\n dz3 = ((dx2 * dy2) - (8/3 * dz2)) * (80.0-0.0)/10000.0 + dz2\n ex_3 = np.array([dx3, dy3, dz3])\n\n dx4 = (10.0 * (dy3 - dx3)) * (80.0-0.0)/10000.0 + dx3\n dy4 = (dx3 * (28 - dz3) - dy3) * (80.0-0.0)/10000.0 + dy3\n dz4 = ((dx3 * dy3) - (8/3 * dz3)) * (80.0-0.0)/10000.0 + dz3\n ex_4 = np.array([dx4, dy4, dz4])\n\n dx5 = (10.0 * (dy4 - dx4)) * (80.0-0.0)/10000.0 + dx4\n dy5 = (dx4 * (28 - dz4) - dy4) * (80.0-0.0)/10000.0 + dy4\n dz5 = ((dx4 * dy4) - (8/3 * dz4)) * (80.0-0.0)/10000.0 + dz4\n ex_5 = np.array([dx5, dy5, dz5])\n\n \n a.evolve(order = 4)\n y_list = a.solution['y'].tolist()\n \n for i in y_list[:6]:\n yy = round(i, 2)\n for j in [0.0, dy, dy2, dy3, dy4, dy5]:\n yyy = round(j, 2)\n \n print (\"Actual increments: \", yy)#str(a.solution()['x']).strip('[]'))\n print (\"Expected increments: \", yyy)\n assert yy == yyy", "def A_calc(self, x, y, theta, v, omega, dt):\n # Initialize 5x5 A matrix\n A = np.zeros((5,5))\n A[0,0] = 1\n A[1,1] = 1\n A[2,2] = 1\n A[3,3] = 1\n A[4,4] = 1\n \n A[0,2] = -1 * v * np.sin(theta) * dt\n A[0,3] = np.cos(theta) * dt\n A[1,2] = v * np.cos(theta) * dt\n A[1,3] = np.sin(theta) * dt\n A[2,4] = dt\n \n return(A)", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def evaluate(t, x, y):\n r = np.sqrt(x**2 + y**2)\n return contrast * np.cos(kx_g*x + ky_g*y - w_g*t) * (1 - heaviside(r - patch_diameter*0.5))", "def irs_method(state):\n\n # First, importing all variables from the dictionary 'state'\n theta_ein2cm = state['theta_ein2cm']\n beta_boundary = state['beta_boundary']\n beta_res = state['beta_res']\n epsilon = state['epsilon']\n mu_h = state['mu_h']\n mu_v = state['mu_v']\n m = state['m']\n zeta = state['zeta']\n max_memory = state['max_memory']\n rays_per_pixel = state['rays_per_pixel']\n\n pixel2cm = theta_ein2cm * beta_boundary / beta_res # size of 1 pixel in cm in the source plane\n print('The physical size of 1 pixel is ' + str(beta_boundary / beta_res) + ' Einstein radii\\nor ' + str(\n np.format_float_scientific(pixel2cm, 2)) + ' cm in the source plane\\n')\n\n theta_boundaries = [epsilon * mu_h * beta_boundary / 2,\n epsilon * mu_v * beta_boundary / 2]\n # The number of images to draw in IRS method, assuming an ellipse in the image plane\n num_of_img = int((beta_res * epsilon) ** 2 * mu_v * mu_h * rays_per_pixel)\n print('A total of ' + str(num_of_img) + ' images for IRS method')\n state['num_of_img'] = num_of_img\n print(str(num_of_img / beta_res ** 2) + ' rays per source plane pixels')\n # The area in (Einstein-radii)^2 that each ray uniquely occupies\n s_ray = (epsilon ** 2 * mu_h * mu_v * beta_boundary ** 2) / num_of_img\n\n l_tmp = int(max_memory / m.shape[0] * 10 ** 9 / 8) # the maximum number of images to vector-compute\n n_runs = max(int(num_of_img / l_tmp), 1) # the number of sub arrays to vector-compute\n print('Max memory for array: ' + str(l_tmp * m.shape[0] * 8 / 10 ** 9) + 'GB')\n mu_grid = np.zeros((beta_res, beta_res)) # this will save the total number of rays per cell in the source plane\n start_time = time.time()\n theta = []\n beta = []\n num_cores = multiprocessing.cpu_count()\n print(str(num_cores) + ' active CPU cores')\n # starting the parallel routine, the variable mu_grid_temp_array is just a placeholder.\n mu_grid_temp_array = Parallel(n_jobs=num_cores, require='sharedmem')\\\n (delayed(parallel_irs)(i,mu_grid,l_tmp,n_runs,s_ray,theta_boundaries,start_time,state) for i in range(n_runs))\n\n if n_runs * l_tmp < num_of_img: # if some values are left\n # Drawing images locations\n theta = random_image_draw(int(num_of_img - n_runs * l_tmp), theta_boundaries[0], theta_boundaries[1])\n # Calculating locations of sources and corresponding magnitudes\n beta = af.img2src(theta, m, zeta, state)\n # Binning sources magnification\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n mu_grid += mu_grid_temp\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n else:\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n beta = np.ones(2, 2) # Just so that the next line can run smoothly and return beta_grid_h and beta_grid_v\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n\n return beta_grid_h, beta_grid_v, mu_grid" ]
[ "0.60023314", "0.5932807", "0.5920514", "0.5843368", "0.58112794", "0.57958204", "0.5776637", "0.573947", "0.5736349", "0.5707544", "0.5702388", "0.5691359", "0.5680941", "0.5660144", "0.5651713", "0.5644874", "0.56244063", "0.55701184", "0.55645734", "0.5539961", "0.5539685", "0.5495305", "0.5480792", "0.54703736", "0.5468112", "0.5449851", "0.5422867", "0.54145455", "0.5400252", "0.53898436", "0.5377388", "0.53694016", "0.5368866", "0.5355502", "0.535047", "0.5349858", "0.53485346", "0.533997", "0.53322953", "0.53255296", "0.5322177", "0.5315177", "0.5313431", "0.5304882", "0.5298799", "0.52937895", "0.5264389", "0.5243359", "0.5239744", "0.52378064", "0.52301145", "0.5228335", "0.5225897", "0.52181727", "0.5216479", "0.5214301", "0.52109027", "0.5205788", "0.5203815", "0.5201643", "0.5200467", "0.5199439", "0.5199331", "0.5197516", "0.51918834", "0.518826", "0.51871747", "0.5185232", "0.51818717", "0.51814896", "0.5177565", "0.5177557", "0.5177501", "0.51770025", "0.5172452", "0.5169073", "0.5157938", "0.51573503", "0.51524395", "0.5151175", "0.515006", "0.5145997", "0.51452535", "0.51401967", "0.51401097", "0.51382023", "0.513805", "0.51359165", "0.5134984", "0.5133788", "0.51334655", "0.5128334", "0.51269686", "0.51264095", "0.5119112", "0.5116034", "0.51139724", "0.5110187", "0.5108023", "0.5103466" ]
0.56527215
14
Compute observation vector for solving the exterior orientation parameters of a single image based on their approximate values
def __ComputeObservationVector_RzRyRz(self, groundPoints): n = groundPoints.shape[0] # number of points # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotated_XYZ = np.dot(self.rotationMatrix_RzRyRz.T, dXYZ).T l0 = np.empty(n * 2) # Computation of the observation vector based on approximate exterior orientation parameters: l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2] l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2] return l0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ev2vi(eV,mu): \n return cv*np.sqrt( eV*(eV+2.e0*mu*mpc2))/(eV+mu*mpc2)", "def ComputeInnerOrientation(self, imagePoints):\n # implementing observation vectors\n imagePoints = imagePoints.reshape(np.size(imagePoints), 1)\n\n fMarks = self.camera.fiducialMarks.reshape(np.size(self.camera.fiducialMarks), 1)\n\n n = int(len(imagePoints)) # number of observations\n u = 6 # 6 orientation parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(imagePoints)):\n if i % 2 == 0:\n A[i, 0] = 1;\n A[i, 1] = 0;\n A[i, 2] = fMarks[j];\n A[i, 3] = fMarks[j + 1];\n A[i, 4] = 0\n A[i, 5] = 0\n else:\n A[i, 0] = 0;\n A[i, 1] = 1;\n A[i, 2] = 0;\n A[i, 3] = 0;\n A[i, 4] = fMarks[j];\n A[i, 5] = fMarks[j + 1]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), imagePoints))\n v = np.dot(A, X) - imagePoints\n\n adjustment_results = {\"params\": X, \"residuals\": v, \"N\": np.dot(np.transpose(A), A)}\n\n self.__innerOrientationParameters = X # updating the inner orientation params\n\n return adjustment_results", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def get_Objective(self):\r\n \r\n ans = 0\r\n for i in range(len(self.X.shape)):\r\n ans += self.parameter['phi'][i] * self.parameter['lambda1'] *norm(tl.unfold(self.Wlist[i], i), 'nuc') + (1 / self.parameter['m']) * norm(tl.unfold(self.X - self.Wlist[i] - self.V, i))\r\n\r\n # Augmented part is calculated seperately. \r\n augment_part1 = 0.5 * self.parameter['rho1'] * norm(self.V - self.T + self.F1)\r\n augment_part2 = 0.5 * self.parameter['rho2'] * norm(tl.fold(np.dot(self.Dmatrix, tl.unfold(self.T, 0)), 0, self.T.shape) - self.S + self.F2)\r\n\r\n # Combine the result for final objective function\r\n ans += self.parameter['beta1'] * norm(self.V.reshape(self.totaldim), 1) + self.parameter['beta2'] * norm(self.S.reshape(self.totaldim), 1) + augment_part1 + augment_part2 \r\n return ans", "def compute_observation(self):\n robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)\n robotEuler = p.getEulerFromQuaternion(robotOrn)\n linear, angular = p.getBaseVelocity(self.botId)\n return (np.array([robotEuler[0],angular[0],self.vt], dtype='float32'))", "def diriv(x, params):\n return np.array([x,1])", "def ienkf(A0,x,x0,yobs,T,H,R):\n nens = A0.shape[1]\n Iens = np.matrix(np.identity(nens))\n \n #Anomalies in state space\n A = A0 * T\n \n #Ensemble in state space\n E = x + A\n \n #Ensemble in observation space\n Ey = np.matrix(H(E))\n \n #Ensemle mean in observation space\n y = np.mean(Ey,axis=1)\n \n #Anomaies in observation space\n Ay = Ey - y\n Ay = Ay*np.linalg.inv(T)\n \n #Innovation vector\n dy = yobs - y\n \n \n Rmsq = np.linalg.inv(scipy.linalg.sqrtm(R))\n s = Rmsq*dy/np.sqrt(nens-1)\n S = Rmsq*Ay/np.sqrt(nens-1)\n V = np.linalg.inv(Iens + S.T*S)\n b = V*S.T*s\n dx = A0*b + A0 * V * np.linalg.pinv(A0.T*A0) * A0.T * (x-x0)\n T = scipy.linalg.sqrtm(V)\n return (dx,T)", "def EI(x, gp, ndim,fMax, epsilon=0.1):\n\t#epsilon = 0.1\n\tx1=np.array(x).reshape(-1,ndim)\n\tmuNew, stdNew = gp.predict(x1, return_std=True)\n\t#fMax=max(Y_init)\n\tZ = (muNew - fMax - epsilon)/stdNew\n\treturn -((muNew - fMax - epsilon)* scipy.stats.norm.cdf(Z) + stdNew*scipy.stats.norm.pdf(Z))", "def HOG(img, x, y):\n #TODO: write a HOG descriptor here\n des=[]\n row=0\n sub_image = img[x-8:x+8,y-8:y+8]\n while row < len(sub_image):\n col=0\n while col < len(sub_image[0]):\n temp_vector = [0 for i in range(8)]\n new_subimage = sub_image[row:row+4,col:col+4]\n x_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=1,dy=0)\n y_gradient = cv2.Sobel(new_subimage,ddepth=-1,dx=0,dy=1)\n theta = np.empty([x_gradient.shape[0],x_gradient.shape[1]])\n for i in range(len(x_gradient)):\n for j in range(len(x_gradient[0])):\n if x_gradient[i,j] == 0:\n theta[i,j] = 90\n else:\n theta[i,j] = np.arctan(y_gradient[i,j]/x_gradient[i,j])*(180/np.pi)\n theta_iter = theta.flatten() #To avoid nested for loops for 4x4 theta\n for i in range(len(theta_iter)):\n if theta_iter[i] < 45:\n temp_vector[0]=temp_vector[0]+1\n elif theta_iter[i] >= 45 and theta_iter[i] < 90:\n temp_vector[1]=temp_vector[1]+1\n elif theta_iter[i] >= 90 and theta_iter[i] < 135:\n temp_vector[2]=temp_vector[2]+1\n elif theta_iter[i] >= 135 and theta_iter[i] < 180:\n temp_vector[3]=temp_vector[3]+1\n elif theta_iter[i] >= 180 and theta_iter[i] < 225:\n temp_vector[4]=temp_vector[4]+1\n elif theta_iter[i] >= 225 and theta_iter[i] < 270:\n temp_vector[5]=temp_vector[5]+1\n elif theta_iter[i] >= 270 and theta_iter[i] < 315:\n temp_vector[6]=temp_vector[6]+1\n elif theta_iter[i] >= 315 and theta_iter[i] < 360:\n temp_vector[7]=temp_vector[7]+1\n des.extend(temp_vector)\n col=col+4\n row=row+4\n return des", "def erode_pvm(image, selem):\n assert len(image.shape) == len(selem.shape), \"Image and region must have \" \\\n \"identical dimensionality.\"\n eroded = ndi.correlate(1 - image.astype(np.float64), selem.astype(np.float64),\n mode='reflect')\n eroded = 1. - eroded # Invert\n\n return np.maximum(eroded, 0.0)", "def gyroi(E, B, mu, Zi,pitch):\n V = ev2vi(E, mu);\n Vperp = V*np.sqrt(1-pitch);\n return mu * mp * Vperp / Zi / eV2J / B;", "def calc_emi(tgt_pt, src_pt, src_dir, coef=1):\r\n emi_params = [\r\n numpy.zeros(tgt_pt.shape[-1], tgt_pt.dtype),\r\n numpy.zeros((tgt_pt.shape[-1], tgt_pt.shape[-1]), tgt_pt.dtype)\r\n ]\r\n\r\n # Start and end 'r' vectors\r\n r0 = tgt_pt - src_pt\r\n r1 = r0 - src_dir\r\n\r\n # Calculate the integral from Biot–Savart law (https://en.wikipedia.org/wiki/Biot–Savart_law):\r\n # dl x r / sqrt(l^2 + R^2)^3\r\n #\r\n # The \"l\" origin is selected at the closest point to the target to simplify calculations.\r\n # Thus \"r = l^2 + R^2\" and \"|dl x r| = |dl|.R\", where R is distance between the target and origin.\r\n #\r\n # Use integral calculator https://www.integral-calculator.com/ (substitute l with x):\r\n # int[ R/sqrt(x^2 + R^2)^3 dx ] = x / (R * sqrt(x^2 + R^2)) + C\r\n src_dir_len2 = src_dir.dot(src_dir)\r\n if not src_dir_len2:\r\n return emi_params # Zero length, return zero EMI params\r\n\r\n # Vector projections of \"r0\" and \"r1\" in the direction of \"src_dir\"\r\n # The '-' is to set the origin at the projected point, instead of at src_pt\r\n l0 = -src_dir.dot(src_dir.dot(r0) / src_dir_len2)\r\n l1 = l0 + src_dir\r\n R = l0 + r0\r\n\r\n #\r\n # Integral at the start of interval\r\n #\r\n # Start with l0 x R to get a direction vector with length of |l0|.|R|\r\n vect0 = numpy.cross(l0, R)\r\n\r\n # Divide by 'r0'\r\n r0_len = numpy.sqrt(r0.dot(r0))\r\n if not r0_len:\r\n return None # Target point coincides with \"src_pt\"\r\n vect0 /= r0_len\r\n\r\n #\r\n # Integral at the end of interval\r\n #\r\n # Start with l1 x R to get a direction vector with length of |l1|.|R|\r\n vect1 = numpy.cross(l1, R)\r\n\r\n # Divide by 'r1'\r\n r1_len = numpy.sqrt(r1.dot(r1))\r\n if not r1_len:\r\n return None # Target point coincides with \"src_pt + src_dir\"\r\n vect1 /= r1_len\r\n\r\n #\r\n # Combine both integrals\r\n #\r\n # Divide result by 'R^2', resulting:\r\n # |l|.|R| / |r| / |R|^2 = |l| / (|R|.|r|)\r\n R_len2 = R.dot(R)\r\n if not R_len2:\r\n return None # Target point lies on the source line\r\n\r\n B = (vect1 - vect0) / R_len2\r\n\r\n # Scale by a coefficient, like current, magnetic constant and 1/(4*pi)\r\n B *= coef\r\n\r\n emi_params[0] = B\r\n\r\n # Calculate the partial derivatives from Biot–Savart law \"R/sqrt(l^2 + R^2)^3\" (see above)\r\n # along \"l\" and \"R\" axes, then integrate each of them along 'l'.\r\n #\r\n # The individual gradient vector components are the values of these integrals. The 'l'\r\n # component is along the 'src_dir' direction and 'R' component is to the direction of its\r\n # perpendicular through 'tgt_pt'.\r\n\r\n # Gradient component along 'l' (substitute l with x):\r\n # int[ dF(x)/dx dx] = F(x) => gradBx = R/sqrt(x^2 + R^2)^3 - R/sqrt(x^2 + R^2)^3 + C\r\n # Finally:\r\n # R * (1/r1^3 - 1/r0^3)\r\n R_len = numpy.sqrt(R_len2)\r\n\r\n l_comp = R_len * ( 1 / r1_len ** 3 - 1 / r0_len ** 3)\r\n\r\n # Gradient component along 'R':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute R with x):\r\n # input: x / sqrt(x^2 + l^2)^3, result: - (2x^2 - l^2) / (x^2 + l^2)^(5/2)\r\n # Substitute back x to R, then l with x:\r\n # result: (x^2 - 2R^2) / sqrt(x^2 + R^2)^5\r\n # Use integral calculator https://www.integral-calculator.com/ (back R and x):\r\n # input: (x^2 - 2R^2) / sqrt(x^2 + R^2)^5, result: - (x^3 + 2xR^2) / ( R^2(x^2 + R^2)^(3/2) ) + C\r\n # Simplify (substitute back x to l):\r\n # - (l^3 + 2*l*R^2) / ( R^2(l^2 + R^2)^(3/2) ) = - l(l^2 + R^2 + R^2) / ( R^2 * r^3 ) =\r\n # = - l(r^2 + R^2) / ( R^2 * r^3 )\r\n # Finally:\r\n # - l1(r1^2 + R^2) / ( R^2 * r1^3 ) + l1(r1^2 + R^2) / ( R^2 * r0^3 )\r\n l0_len = numpy.sqrt(l0.dot(l0))\r\n if l0.dot(src_dir) < 0:\r\n l0_len = -l0_len\r\n l1_len = numpy.sqrt(l1.dot(l1))\r\n if l1.dot(src_dir) < 0:\r\n l1_len = -l1_len\r\n\r\n R_comp = -l1_len*(r1_len ** 2 + R_len2) / (R_len2 * r1_len ** 3)\r\n R_comp -= -l0_len*(r0_len ** 2 + R_len2) / (R_len2 * r0_len ** 3)\r\n\r\n # The '-' is to flip direction to point toward field magnitude increase\r\n l_comp *= -coef\r\n R_comp *= coef\r\n\r\n # Combine l_comp and R_comp into a Jacobian matrix\r\n emi_params[1] = build_jacobian(l_comp, R_comp, src_dir, R, B)\r\n\r\n return emi_params", "def approximate_nonlinear_vector_field(dataset_path):\n\n file_X0 = \"nonlinear_vectorfield_data_x0.txt\"\n names_X0 = ['X0_x', 'X0_y']\n data_X0 = pd.read_csv(dataset_path / file_X0, sep=' ', names=names_X0).to_numpy()\n plt.scatter(data_X0[:, 0], data_X0[:, 1])\n\n names_X1 = ['X1_x', 'X1_y']\n file_X1 = \"nonlinear_vectorfield_data_x1.txt\"\n data_X1 = pd.read_csv(dataset_path / file_X1, sep=' ', names=names_X1).to_numpy()\n plt.scatter(data_X1[:, 0], data_X1[:, 1])\n plt.title(\"Given data set X0 and X1\")\n plt.show()\n\n \"\"\"\n Following block calculates the approximate values using differential\n solver solve_ivp\n \"\"\"\n V = (data_X1 - data_X0) / 0.1\n approx_func_At = np.linalg.inv(data_X0.T @ data_X0) @ data_X0.T @ V\n approx_values = []\n for i in range(data_X0.shape[0]):\n sol = solve_ivp(fun=derivative_func, t_span=[0, 10], t_eval=[0.1],\n y0=data_X0[i, :], args=(approx_func_At,))\n approx_values.append(sol.y)\n approx_values = np.array(approx_values)\n approx_values = approx_values.reshape((2000, 2))\n\n \"\"\"\n We now plot the original data of X1 and the newly approximated data.\n \"\"\"\n plt.scatter(data_X1[:, 0], data_X1[:, 1])\n plt.scatter(approx_values[:, 0], approx_values[:, 1], c='green')\n plt.title(\"Given X1 and approximated values\")\n plt.title(\"Approximated vector field\")\n plt.show()\n\n \"\"\"\n We now plot the vector filed and the phase portrait.\n \"\"\"\n x, y = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10))\n u, v = np.zeros((10, 10)), np.zeros((10, 10))\n for i in range(0, 10):\n for j in range(0, 10):\n u[i, j] = approx_values.T[0, i]\n v[i, j] = approx_values.T[1, j]\n plt.quiver(x, y, u, v)\n plt.streamplot(x, y, u, v)\n plt.title(\"Approximated Vector field\")\n plt.show()\n\n \"\"\"\n Following block calculates the mean squared error of the X1 and calculate\n approximated values.\n \"\"\"\n MSE = np.square(data_X1 - approx_values).mean()\n print(MSE)", "def el2rv(mu,a,e,i,capom,om,f):\n\n prec = 1.0e-13 #user can change this if more precision needed (just runs slower)\n\n #compute the unit vector\n u = om + f\n xhat = np.cos(u)*np.cos(capom) - np.cos(i)*np.sin(capom)*np.sin(u)\n yhat = np.cos(u)*np.sin(capom) + np.cos(i)*np.cos(capom)*np.sin(u)\n zhat = np.sin(i)*np.sin(u)\n\n #compute the angular momentum vector (unit vector)\n hx = np.sin(capom)*np.sin(i)\n hy = -np.cos(capom)*np.sin(i)\n hz = np.cos(i)\n\n #assuming not parabolic, here the magnitudes of the vectors\n r = a * (1.0 - e*e) / (1.0 + e*np.cos(f))\n h = ( mu*a*(1.0 - e*e) )**0.5\n\n #position vectors\n x = r * xhat\n y = r * yhat\n z = r * zhat\n\n #compute components of vector theta hat\n thx = hy * zhat - hz * yhat\n thy = hz * xhat - hx * zhat\n thz = hx * yhat - hy * xhat\n\n #obtain the velocity vector's components and calculate v\n thdot = h/(r*r)\n rdot = e*mu*np.sin(f)/h\n\n vx = r * thdot * thx + rdot * xhat\n vy = r * thdot * thy + rdot * yhat\n vz = r * thdot * thz + rdot * zhat\n\n return x,y,z", "def __ComputeObservationVector(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def get_correction(d, a, hfov, img_x):\n\n width = 2 * d*math.tan((hfov/2)*math.pi/180) # in meters\n one_meter = img_x / width\n return int(a*one_meter)", "def epipoles_location(f_mat):\r\n u, s, vh = np.linalg.svd(f_mat)\r\n e_l = vh[-1, :]\r\n e_r = u[:, -1]\r\n # get x, y by dividing by w\r\n e_l = (e_l[0] / e_l[2], e_l[1] / e_l[2])\r\n e_r = (e_r[0] / e_r[2], e_r[1] / e_r[2])\r\n return e_l, e_r", "def photometric_calibration():\n pass", "def orientation(cnt):\n\t(x,y), (MA, ma), angle = cv2.fitEllipse(cnt)\n\treturn angle", "def I_int(expt, z):\n u = kperp * r(z) / (2. * np.pi) # UV plane: |u| = d / lambda\n nu = expt['nu_line'] / (1. + z)\n fov = (1.02 / (nu * expt['Ddish']) * (3e8 / 1e6))**2.\n \n l = 3e8 / (nu * 1e6) # Wavelength (m)\n u_min = expt['Dmin'] / l\n u_max = expt['Dmax'] / l\n \n # New calc.\n n_u = expt['Ndish']*(expt['Ndish'] - 1.) * l**2. * np.ones(u.shape) \\\n / (2. * np.pi * (expt['Dmax']**2. - expt['Dmin']**2.) )\n n_u[np.where(u < u_min)] = 1. / INF_NOISE\n n_u[np.where(u > u_max)] = 1. / INF_NOISE\n \n # Interferometer multiplicity factor, /I/\n I = 4./9. * fov / n_u\n return I", "def e(self):\n return np.matrix([self.y - self.arg[0,0]*self.x**3 - self.arg[1,0]*self.x**2 - self.arg[2,0]*self.x**1 - self.arg[3,0]])", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def detect_velocity(image):\n nonlocal prev, v_last\n curr_bgr = cv.warpPerspective(image, M, (160, 120))\n curr = cv.cvtColor(curr_bgr, cv.COLOR_BGR2GRAY)\n\n if prev is None:\n prev = curr\n v_last = 0.0\n return v_last, curr_bgr, np.zeros_like(image)\n\n flow = cv.calcOpticalFlowFarneback(\n prev, # Previous image\n curr, # Current image\n None, # Computed flow image that has the same size oas prev and type CV_32FC2.\n 0.5, # Specifies the image scale (<1) to build pyramids for each image.\n 3, # Number of pyramid layers including the initial image.\n 15, # winsize, averaging windows size.\n 3, # iterations, number of iterations the algorithm does at each pyramid level.\n 5, # standard deviation of the Gaussian that is used to smooth derivative\n 1.5,\n 0)\n\n mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1])\n\n v = mag * np.sin(ang)\n\n ######################\n ## Histogram for mag\n ar = np.arange(-20.0, 20.0, 0.50, dtype=np.float)\n his = np.histogram(v, bins=ar)\n\n for i, n in enumerate(his[0]):\n bgr = (255, 255, 0)\n if his[1][i] < 0:\n bgr = (0, 255, 255)\n\n #print('[{}] {} - {}'.format(i, n, his[1][i]))\n cv.rectangle( image, #curr_bgr,\n (i*2, HEIGHT),\n (i*2, HEIGHT - int(n / 10)),\n bgr, #(0, 255, 255),\n cv.FILLED)\n\n hsv = np.zeros_like(image)\n hsv[..., 0] = ang * 180 / np.pi / 2\n hsv[..., 1] = 255\n hsv[..., 2] = cv.normalize(np.abs(v), None, 0, 255, cv.NORM_MINMAX)\n hsv_bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)\n ##\n ######################\n\n v_abs = np.absolute(v)\n v = v[v_abs >= np.percentile(v_abs, VELOCITY_CUTOFF_PCT)]\n\n v_max = v_last + MAX_ACC\n v_min = v_last - MAX_ACC\n v = np.clip(v, v_min, v_max)\n if v.size > 0:\n v_avg = v.mean()\n else:\n if v_last > 0:\n v_avg = max(v_last - MAX_ACC, 0)\n elif v_last < 0:\n v_avg = min(v_last + MAX_ACC, 0)\n else:\n v_avg = 0\n\n prev = curr\n v_last = v_avg\n return v_last, curr_bgr, hsv_bgr", "def __v(pk: float, pna: float, pcl: float, pca: float) -> float:\n ex_ion = pk * ex_k + pna * ex_na + pcl * in_cl + pca * ex_ca\n in_ion = pk * in_k + pna * in_na + pcl * ex_cl + pca * in_ca\n v = r * t / f * np.log(ex_ion/in_ion) * 1000\n return v", "def calcEVals(self):\n self.eVals,self.eVecs = np.linalg.eigh(self.rhoOp)", "def prediction(self, v, imu_meas):\n # YOUR CODE HERE\n pass", "def _save_parameters(self):\n\n # eigenvectors are the coefficients of an ellipse in general form\n # a*x^2 + 2*b*x*y + c*y^2 + 2*d*x + 2*f*y + g = 0 [eqn. 15) from (**) or (***)\n a = self.coef[0, 0]\n b = self.coef[1, 0]/2.\n c = self.coef[2, 0]\n d = self.coef[3, 0]/2.\n f = self.coef[4, 0]/2.\n g = self.coef[5, 0]\n\n # finding center of ellipse [eqn.19 and 20] from (**)\n x0 = (c*d-b*f)/(b**2.-a*c)\n y0 = (a*f-b*d)/(b**2.-a*c)\n\n # Find the semi-axes lengths [eqn. 21 and 22] from (**)\n numerator = 2*(a*f*f+c*d*d+g*b*b-2*b*d*f-a*c*g)\n denominator1 = (b*b-a*c) * \\\n ((c-a)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n denominator2 = (b*b-a*c) * \\\n ((a-c)*np.sqrt(1+4*b*b/((a-c)*(a-c)))-(c+a))\n width = np.sqrt(numerator/denominator1)\n height = np.sqrt(numerator/denominator2)\n\n # angle of counterclockwise rotation of major-axis of ellipse to x-axis [eqn. 23] from (**)\n # or [eqn. 26] from (***).\n phi = .5*np.arctan((2.*b)/(a-c))\n\n self._center = [x0, y0]\n self._width = width\n self._height = height\n self._phi = phi", "def x(self) -> float:\n return self._ohms.imag", "def get_vertical_vector(q):\n P0, P1, P2, P3 = q\n P0_up = copy.deepcopy(P0)\n P0_up.depth = P0_up.depth - 1.0\n p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF\n p1 = Vector.fromPoint(P0_up)\n v1 = (p1 - p0).norm()\n return v1", "def _r_inv(self):\n # [output_dim, output_dim]\n return tf.linalg.cholesky_solve(\n self._chol_obs_covariance,\n tf.eye(self.emission.output_dim, dtype=self._chol_obs_covariance.dtype),\n )", "def imu_get_euler(self):\n return self.imu.get_euler()", "def isi_calc(self):\n arg = erfinv(0.8)*1.0E6/(self.speedup*self.br_nominal)\n print('arg: ', arg)\n\n # calculate center eye opening with no additional impairments\n self.isi_center = 2.0*erf(arg/self.tc) - self.l_1 # column Z\n\n # calculate center eye opening with residual DJ (DJ - DCD)\n self.isi_dj_center = (erf(arg*(1.0+self.dj_ui)/self.tc) + erf(arg*(1.0-self.dj_ui)/self.tc) - self.l_1) # column AD\n\n # calculate eye closing induced by interferometric effects from link end reflections\n mean_reflection = math.pow(10.0,0.05*(self.rx_reflection + self.tx_reflection)) # cell AB5\n er_lin = math.pow(10.0,0.1*self.er_dB_min) # cell AB7\n\n\n arg1 = np.sqrt(2.0*er_lin*self.isi_dj_center*(er_lin-1.0) + (er_lin+1.0)*self.l_1)\n print('arg1: ', arg1)\n arg2 = np.divide(arg1,self.isi_dj_center)\n arg3 = (2.0*self.ref_nf*np.power(10.0,-0.1*self.chil)*mean_reflection)\n self.isi_reflection = self.l_1-np.multiply(arg2,arg3)\n\n # calculate center eye opening with both residual DJ and reflection degradations included\n self.isi_dj_refl_closed = np.multiply(self.isi_dj_center, self.isi_reflection) # column AA\n print('isi_dj_refl_closed (AA) : ', self.isi_dj_refl_closed)\n \n # calculate eye opening at the corners with no additional impairments\n eff_rx_eye = 2.0*(0.5-self.X2)*self.speedup\n self.isi_corners = (erf(arg*(1.0+eff_rx_eye)/self.tc) + erf(arg*(1.0-eff_rx_eye)/self.tc) - self.l_1) # column AB\n\n # calculate eye opening at the corners with residual DJ impairment\n self.isi_dj_corners = (erf(arg*(1.0+eff_rx_eye+self.dj_ui)/self.tc) + erf(arg*(1.0-eff_rx_eye-self.dj_ui)/self.tc) - self.l_1) # column AC\n self.isi_tp4_rx = (erf(arg*(1.0+eff_rx_eye)/self.rx_1090_rise) + erf(arg*(1.0-eff_rx_eye)/self.rx_1090_rise) - 1) # cell AG5\n\n # end of GbE10.isi_calcdef isi_calc(self):", "def calculate_ic(self):\n # dt:\n dt = self.E\n\n # dr:\n dr = np.sqrt(self.E ** 2 - (self.Q + self.L ** 2) / self.r ** 2)\n #print(dr)\n if np.isnan(dr):\n dr = 0\n #dr = self._check_dr_sign(self.alpha)\n\n # dtheta:\n omega = self.Q - self.L ** 2 * (np.cos(self.theta) / np.sin(self.theta)) ** 2\n if omega < 0:\n omega = np.abs(omega)\n dtheta = np.sqrt(omega) / self.r**2\n if self.eta < np.pi / 2:\n dtheta *= -1\n\n # dphi:\n dphi = self.L / (self.r * np.sin(self.theta)) ** 2\n\n return dt, dr, dtheta, dphi", "def vi2ev(v,mu):\n return 0.5*mu*mp*v**2/eV2J", "def cur_approx(self):\n return invert_normal_params(self.Q, self.r)", "def ellipse_orientation(S):\n return 1/2 * np.arctan2(S[..., 2], S[..., 1])", "def expected_improvement(ymin, mu, sig):\n p_imp = norm.cdf((ymin-mu)/sig)\n p_ymin = norm.pdf((ymin-mu)/sig)\n ei = (ymin-mu)*p_imp + sig*p_ymin\n return ei", "def get_evaporation_latent_heat() -> float:\n theta = 28.0\n return 2500.8 - 2.3668 * theta", "def I(x, y, l, p):\n \n return 0.5 / (mu * c) * A0**2 * ( u(x, y, l, p) )**2", "def dilate_pvm(image, selem):\n assert len(image.shape) == len(selem.shape), \"Image and region must have \" \\\n \"identical dimensionality.\"\n dilated = ndi.correlate(image.astype(np.float64), selem.astype(np.float64),\n mode='reflect')\n return np.minimum(dilated, 1.0)", "def get_y(EQ, M):\n return (EQ[1] * ((-1) * EQ[0] * M[0] + EQ[1] * M[1]) - EQ[0] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)", "def __ComputeApproximateVals(self, cameraPoints, groundPoints):\n\n # Find approximate values\n cameraPoints = cameraPoints.reshape(np.size(cameraPoints), 1)\n groundPointsXY = groundPoints[0:2, :].T\n groundPointsXY = groundPointsXY.reshape(np.size(groundPointsXY), 1)\n groundPointsZ = groundPoints[2, :].T\n\n n = int(len(cameraPoints)) # number of observations\n u = 4 # 4 conform parameters\n\n A = np.zeros((n, u)) # A matrix (n,u)\n\n j = 0\n for i in range(len(cameraPoints)):\n if i % 2 == 0:\n A[i, 0] = 1\n A[i, 1] = 0\n A[i, 2] = cameraPoints[j]\n A[i, 3] = cameraPoints[j + 1]\n else:\n A[i, 0] = 0\n A[i, 1] = 1\n A[i, 2] = cameraPoints[j + 1]\n A[i, 3] = -cameraPoints[j]\n j += 2\n\n X = np.dot(la.inv(np.dot(np.transpose(A), A)), np.dot(np.transpose(A), groundPointsXY))\n\n # now we can compute the rest of the params\n X0 = X[0]\n Y0 = X[1]\n kappa = np.arctan2(-X[3], X[2])\n lam = np.sqrt(X[2] ** 2 + X[3] ** 2)\n Z0 = np.average(groundPointsZ) + (lam) * self.camera.focalLength\n\n adjustment_results = {\"X0\": X0[0], \"Y0\": Y0[0], \"Z0\": Z0[0], \"omega\": 0, \"phi\": 0,\n \"kappa\": np.rad2deg(kappa[0])}\n\n self.__exteriorOrientationParameters = np.array(\n [X0[0], Y0[0], Z0[0], 0, 0, kappa[0]]).T # updating the exterior orientation params\n # self.__exteriorOrientationParameters = np.array([202225, 742447, 657.81, 0, 0, kappa[0]]).T\n #return adjustment_results", "def IK_geometric(dh_params, pose):\n pass", "def compute_desired_velocity(self):\n mask_red = (self.image_red == 255) \\\n *(self.image_green == 0) \\\n *(self.image_blue == 0)\n ind_red = sp.where( mask_red )\n phi = sp.ones(self.image_red.shape)\n phi[ind_red] = 0\n phi = sp.ma.MaskedArray(phi, mask=self.mask)\n numpy.set_printoptions(threshold=sys.maxsize)\n self.door_distance = skfmm.distance(phi, dx=self.pixel_size)\n tmp_dist = self.door_distance.filled(9999)\n grad = sp.gradient(tmp_dist,edge_order=2)\n grad_X = -grad[1]/self.pixel_size\n grad_Y = -grad[0]/self.pixel_size\n norm = sp.sqrt(grad_X**2+grad_Y**2)\n norm = (norm>0)*norm+(norm==0)*0.001\n self.desired_velocity_X = self.vmax * (grad_X/norm)\n self.desired_velocity_Y = self.vmax * (grad_Y/norm)\n '''plt.subplot(1,2,1)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.subplot(1,2,2)\n plt.imshow(self.desired_velocity_X, cmap='hot', interpolation='nearest')\n plt.gca().invert_yaxis()\n plt.colorbar()\n plt.show()'''\n return self.door_distance, self.desired_velocity_X, self.desired_velocity_Y", "def ev2ve(eV): \n return cv*np.sqrt( eV*(eV+2.e0*mec2))/(eV+mec2)", "def ComputeExteriorOrientation(self, imagePoints, groundPoints, epsilon):\n # cameraPoints = self.ImageToCamera(imagePoints)\n cameraPoints = imagePoints\n self.__ComputeApproximateVals(cameraPoints, groundPoints)\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n while la.norm(deltaX) > epsilon:\n l0 = self.__ComputeObservationVector(groundPoints.T)\n l0 = np.reshape(l0, (-1, 1))\n l = cameraPoints.reshape(np.size(cameraPoints), 1) - l0\n A = self.__ComputeDesignMatrix(groundPoints.T)\n N = np.dot(A.T, A)\n u = np.dot(A.T, l)\n deltaX = np.dot(la.inv(N), u)\n # update orientation pars\n self.__exteriorOrientationParameters = np.add(self.__exteriorOrientationParameters, np.reshape(deltaX, 6))\n\n # compute residuals\n l_a = np.reshape(self.__ComputeObservationVector(groundPoints.T), (-1, 1))\n v = l_a - cameraPoints.reshape(np.size(cameraPoints), 1)\n if (np.size(A, 0) - np.size(deltaX)) != 0:\n sig = np.dot(v.T, v) / (np.size(A, 0) - np.size(deltaX))\n sigmaX = sig[0] * la.inv(N)\n else:\n sigmaX = None\n\n return [self.exteriorOrientationParameters, sigmaX, v]", "def emissivity_calc (pv, ndvi):\n ndvi_dest = ndvi.copy()\n ndvi_dest[np.where(ndvi < 0)] = 0.991\n ndvi_dest[np.where((0 <= ndvi) & (ndvi < 0.2)) ] = 0.966\n ndvi_dest[np.where((0.2 <= ndvi) & (ndvi < 0.5)) ] = (0.973 * pv[np.where((0.2 <= ndvi) & (ndvi < 0.5)) ]) + (0.966 * (1 - pv[np.where((0.2 <= ndvi) & (ndvi < 0.5)) ]) + 0.005)\n ndvi_dest[np.where(ndvi >= 0.5)] = 0.973\n return ndvi_dest", "def equation(self):\n mat = np.zeros((self.nunknowns, self.model.neq))\n rhs = np.zeros(self.nunknowns) # Needs to be initialized to zero\n rhs[0:self.nlayers - 1] = 0.0\n rhs[self.nlayers - 1] = self.Qc\n ieq = 0\n for e in self.model.elementlist:\n if e.nunknowns > 0:\n head = e.potinflayers(self.xc, self.yc, self.layers) / self.aq.Tcol[self.layers, :]\n mat[0:self.nlayers - 1, ieq:ieq + e.nunknowns] = head[:-1] - head[1:]\n if e == self:\n for i in range(self.nlayers - 1):\n mat[i, ieq + i] -= self.resfac[i]\n mat[i, ieq + i + 1] += self.resfac[i + 1]\n mat[self.nlayers - 1, ieq:ieq + self.nlayers] = 1.0\n ieq += e.nunknowns\n else:\n head = e.potentiallayers(self.xc, self.yc, self.layers) / self.aq.T[self.layers]\n rhs[0:self.nlayers - 1] -= head[:-1] - head[1:]\n return mat, rhs", "def sepinsky_A_parameter(eccentricity=0.0, angular_velocity_ratio=1.0, true_anomaly=numpy.pi):\n numerator = angular_velocity_ratio**2 * (1.0 + eccentricity)**4\n denominator = (1.0 + eccentricity * numpy.cos(true_anomaly))**3\n return numerator / denominator", "def compute_hand_eye_calibration_BASELINE(dq_B_H_vec, dq_W_E_vec, config):\n assert len(dq_W_E_vec) == len(dq_B_H_vec)\n num_poses = len(dq_W_E_vec)\n\n start_time = timeit.default_timer()\n\n # Enforce the same sign of the rotation quaternion.\n for i in range(num_poses):\n dq_B_H = dq_B_H_vec[i]\n dq_W_E = dq_W_E_vec[i]\n if ((dq_W_E.q_rot.w < 0. and dq_B_H.q_rot.w > 0.) or\n (dq_W_E.q_rot.w > 0. and dq_B_H.q_rot.w < 0.)):\n dq_W_E_vec[i].dq = -dq_W_E_vec[i].dq.copy()\n\n # 0.0 Reject pairs whose motion is not informative,\n # i.e. their screw axis dot product is large\n if config.prefilter_poses_enabled:\n dq_B_H_vec_filtered, dq_W_E_vec_filtered = prefilter_using_screw_axis(\n dq_B_H_vec, dq_W_E_vec, config.prefilter_dot_product_threshold)\n else:\n dq_B_H_vec_filtered = dq_B_H_vec\n dq_W_E_vec_filtered = dq_W_E_vec\n num_poses_after_filtering = len(dq_W_E_vec_filtered)\n\n best_idx = -1\n best_num_inliers = config.min_num_inliers - 1\n best_dq_W_E_vec_inlier = []\n best_dq_B_H_vec_inlier = []\n\n if config.enable_exhaustive_search:\n print(\"Do exhaustive search to find biggest subset of inliers...\")\n else:\n print(\"Search for first set of inliers bigger than {}...\".format(\n config.min_num_inliers))\n\n # 0.1 Reject pairs where scalar parts of dual quaternions do not match.\n # Loop over all the indices to find an index of a pose pair.\n for j in range(num_poses_after_filtering):\n # Re-align all dual quaternion to the j-th dual quaternion.\n dq_W_E_vec_aligned = align_paths_at_index(dq_W_E_vec_filtered, j)\n dq_B_H_vec_aligned = align_paths_at_index(dq_B_H_vec_filtered, j)\n\n dq_W_E_vec_inlier = []\n dq_B_H_vec_inlier = []\n\n # Loop over the indices again starting at the first index to find either:\n # - The first set of inliers of at least size min_num_inliers\n # OR\n # - The largest set of inliers using an exhaustive search\n for i in range(0, num_poses_after_filtering):\n dq_W_E = dq_W_E_vec_aligned[i]\n dq_B_H = dq_B_H_vec_aligned[i]\n scalar_parts_W_E = dq_W_E.scalar()\n scalar_parts_B_H = dq_B_H.scalar()\n # Append the inliers to the filtered dual quaternion vectors.\n if np.allclose(scalar_parts_W_E.dq, scalar_parts_B_H.dq, atol=1e-2):\n dq_W_E_vec_inlier.append(dq_W_E)\n dq_B_H_vec_inlier.append(dq_B_H)\n\n assert len(dq_W_E_vec_inlier) == len(dq_B_H_vec_inlier)\n\n if config.enable_exhaustive_search:\n has_the_most_inliers = (len(dq_W_E_vec_inlier) > best_num_inliers)\n if has_the_most_inliers:\n best_num_inliers = len(dq_W_E_vec_inlier)\n best_idx = j\n best_dq_W_E_vec_inlier = copy.deepcopy(dq_W_E_vec_inlier)\n best_dq_B_H_vec_inlier = copy.deepcopy(dq_B_H_vec_inlier)\n print(\"Found new best start idx: {} number of inliers: {}\".format(\n best_idx, best_num_inliers))\n else:\n has_enough_inliers = (len(dq_W_E_vec_inlier) > config.min_num_inliers)\n if has_enough_inliers:\n best_idx = j\n best_num_inliers = len(dq_W_E_vec_inlier)\n break\n\n assert (j + 1) < num_poses_after_filtering, (\n \"Reached over all filtered poses and couldn't find \"\n \"enough inliers. num_samples: {}, num_inliers: {}\".format(\n num_poses_after_filtering, len(dq_W_E_vec_inlier)))\n\n if config.enable_exhaustive_search:\n assert best_idx != -1, \"Not enough inliers found!\"\n dq_W_E_vec_inlier = best_dq_W_E_vec_filtered\n dq_B_H_vec_inlier = best_dq_B_H_vec_inlier\n\n aligned_dq_B_H = align_paths_at_index(dq_B_H_vec_inlier, best_idx)\n aligned_dq_W_E = align_paths_at_index(dq_W_E_vec_inlier, best_idx)\n\n print(\"Best start idx: {}\".format(best_idx))\n print(\"Removed {} outliers from the (prefiltered) poses.\".format(\n len(dq_B_H_vec_filtered) - len(dq_B_H_vec_inlier)))\n print(\"Running the hand-eye calibration with the remaining {} pairs of \"\n \"poses\".format(len(dq_B_H_vec_inlier)))\n\n try:\n # Compute hand-eye calibration on the inliers.\n (dq_H_E_estimated,\n singular_values,\n bad_singular_values) = compute_hand_eye_calibration(\n dq_B_H_vec_inlier, dq_W_E_vec_inlier,\n config.hand_eye_calibration_scalar_part_equality_tolerance)\n dq_H_E_estimated.normalize()\n except:\n print(\"\\n\\n Hand-eye calibration FAILED! \"\n \"algorithm_name: {} exception: \\n\\n\".format(\n config.algorithm_name, sys.exc_info()[0]))\n end_time = timeit.default_timer()\n runtime = end_time - start_time\n return (False, None, (None, None),\n None, num_poses_after_filtering, runtime, None, None)\n\n # Evaluate hand-eye calibration either on all poses aligned by the\n # sample index or only on the inliers.\n if config.ransac_evaluate_refined_model_on_inliers_only:\n (poses_B_H, poses_W_H) = get_aligned_poses(dq_B_H_vec_inlier,\n dq_W_E_vec_inlier,\n dq_H_E_estimated)\n else:\n # TODO(mfehr): There is some redundancy here, fix it!\n aligned_dq_B_H = align_paths_at_index(dq_B_H_vec, best_idx)\n aligned_dq_W_E = align_paths_at_index(dq_W_E_vec, best_idx)\n (poses_B_H, poses_W_H) = get_aligned_poses(aligned_dq_B_H,\n aligned_dq_W_E,\n dq_H_E_estimated)\n\n\n (rmse_position,\n rmse_orientation,\n inlier_flags) = evaluate_alignment(poses_B_H, poses_W_H, config, config.visualize)\n\n end_time = timeit.default_timer()\n runtime = end_time - start_time\n\n pose_vec = dq_H_E_estimated.to_pose()\n print(\"Solution found by aligned based on idx: {}\\n\"\n \"\\t\\tNumber of inliers: {}\\n\"\n \"\\t\\tRMSE position: {:10.4f}\\n\"\n \"\\t\\tRMSE orientation: {:10.4f}\\n\"\n \"\\t\\tdq_H_E: {}\\n\"\n \"\\t\\tpose_H_E: {}\\n\"\n \"\\t\\tTranslation norm: {:10.4f}\".format(\n best_idx, best_num_inliers, rmse_position,\n rmse_orientation, dq_H_E_estimated,\n pose_vec, np.linalg.norm(pose_vec[0:3])))\n\n return (True, dq_H_E_estimated,\n (rmse_position, rmse_orientation),\n best_num_inliers, num_poses_after_filtering, runtime, singular_values, bad_singular_values)", "def _extrapolate(self):\n maxrho = self.maxrho\n x = np.linspace(1.001, maxrho, int(self.nrho/5))\n rho1 = self.rho # rho up to 1\n dec_l = 0.01\n ni_ov = np.zeros((self.nion, len(x)), dtype=float)\n ninew = np.zeros((self.nion, self.nrho+len(x)),dtype=float)\n ne_ov1 = self.ne[self.nrho-1]*np.exp(-((x-1.)/dec_l))\n te_ov1 = self.te[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n ti_ov1 = self.ti[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n vt_ov1 = self.vt[self.nrho-1]*np.exp(-(x-1.)/dec_l)\n for i in range(self.nion):\n ni_ov[i,:] = self.ni[i,self.nrho-1]*np.exp(-(x-1.)/dec_l)\n ninew[i,:] = np.concatenate([self.ni[i,:], ni_ov[i,:]])\n self.ni = ninew\n self.rho = np.concatenate([rho1, x])\n self.nrho = len(rho1)+len(x)\n self.ne = np.concatenate([self.ne, ne_ov1])\n self.te = np.concatenate([self.te, te_ov1])\n self.ti = np.concatenate([self.ti, ti_ov1])\n self.vt = np.concatenate([self.vt, vt_ov1])", "def derive(params):\n x, y, dx, dy = params\n r = (x ** 2 + y ** 2) ** 0.5\n return np.array([dx, dy, -G * M * x / (r ** 3), -G * M * y / (r ** 3)])", "def log_operator(SE3): \n #print('SE3 log: ', SE3)\n R = SE3[:3,:3]\n t = SE3[:3,3]\n theta = arccos(0.5*(trace(R)-1)) # radians\n lnR = 0.5*(theta/sin(theta))*(R-R.T)\n omega = vee(lnR) # vee operator\n omega_skew_sym = lnR#skew_symmetric(omega.reshape(-1,))\n \n if theta <= 1e-10:\n V = eye(3)\n else:\n V = eye(3) + \\\n (theta**-2)*(1-cos(theta))*omega_skew_sym + \\\n (theta**-3)*(theta-sin(theta))*(omega_skew_sym @ omega_skew_sym)\n neu = inv(V) @ t\n\n # if theta <= 1e-10:\n # Vinv = eye(3)\n # else:\n # theta_half = 0.5*theta \n # Vinv = eye(3) - 0.5*omega_skew_sym + \\\n # (theta**-2)*(1- (theta_half*cos(theta_half)/sin(theta_half)))*(omega_skew_sym @ omega_skew_sym)\n # neu = Vinv @ t\n\n return np.hstack((neu, omega)).reshape(-1,1)", "def GetEigenvector(self, i):\n return _hypre.HypreAME_GetEigenvector(self, i)", "def _gv(self):\n return self.y - self.err_inf", "def xx(self):\n return self.exterior[:, 0]", "def _vce(self):\n sum = 0.0\n for sail in self.sails:\n cl2 = sail.cl(self.awa)**2\n cd2 = sail.cd(self.awa)**2\n sum += sail.area * sail.vce * sail.bk * np.sqrt(cl2+cd2)\n self._area()\n deltaCH = 0 if self.sails[1].up!=True else (1-self.ftj)*0.05*self.sails[1].IG\n Zce = sum/(self.area*np.sqrt(self.cl**2+self.cd**2)) - deltaCH\n return (Zce*(1-0.203*(1-self.flat)-0.451*(1-self.flat)*(1-self.fractionality)))", "def Q_e(params):\n return (params['Q_e_real'].value +\n 1j * params['Q_e_imag'].value)", "def partial_y(img):\n\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.array([[0.5], [0], [-0.5]])\n out = conv(img, kernel)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def calc_emi_dif(tgt_pt, src_pt, src_dir, coef=1):\r\n emi_params = [\r\n numpy.zeros(tgt_pt.shape[-1], tgt_pt.dtype),\r\n numpy.zeros((tgt_pt.shape[-1], tgt_pt.shape[-1]), tgt_pt.dtype)\r\n ]\r\n\r\n # 'r' vector\r\n r = tgt_pt - src_pt\r\n\r\n src_dir_len2 = src_dir.dot(src_dir)\r\n if not src_dir_len2:\r\n return emi_params # Zero length, return zero EMI params\r\n\r\n # Vector projections of \"r\" in the direction of \"src_dir\"\r\n l = src_dir.dot(src_dir.dot(r) / src_dir_len2)\r\n R = r - l\r\n\r\n r_len = numpy.sqrt(r.dot(r))\r\n if not r_len:\r\n return None # Target point coincides with \"src_pt\"\r\n\r\n # Calculate the differential Biot–Savart law (https://en.wikipedia.org/wiki/Biot–Savart_law):\r\n # dl x r / r^3\r\n B = numpy.cross(src_dir, r) / r_len ** 3\r\n\r\n # Scale by a coefficient, like current, magnetic constant and 1/(4*pi)\r\n B *= coef\r\n\r\n emi_params[0] = B\r\n\r\n # Calculate the partial derivatives from Biot–Savart law \"R/sqrt(l^2 + R^2)^3\" (see calc_emi())\r\n # along \"l\" and \"R\" axes.\r\n\r\n # Gradient component along 'l':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute l with x):\r\n # input: R / sqrt(x^2 + R^2)^3, result: -3Rx / (x^2 + R^2)^(5/2)\r\n # Substitute back x to l, then sqrt(l^2 + R^2) to r:\r\n # result: -3 * R * l / r^5\r\n R_len2 = R.dot(R)\r\n l_len2 = l.dot(l)\r\n R_len = numpy.sqrt(R_len2)\r\n l_len = numpy.sqrt(l_len2)\r\n if l.dot(src_dir) < 0:\r\n l_len = -l_len\r\n\r\n l_comp = -3 * R_len * l_len / r_len ** 5\r\n\r\n # Gradient component along 'R':\r\n # Use derivative calculator https://www.derivative-calculator.net/ (substitute R with x):\r\n # input: x / sqrt(x^2 + l^2)^3, result: - (2x^2 - l^2) / (x^2 + l^2)^(5/2)\r\n # Substitute back x to R, then sqrt(l^2 + R^2) to r:\r\n # result: (l^2 - 2R^2) / r^5\r\n\r\n R_comp = (l_len2 - 2 * R_len2) / r_len ** 5\r\n\r\n l_comp *= coef\r\n R_comp *= coef\r\n\r\n # Combine l_comp and R_comp into a Jacobian matrix\r\n emi_params[1] = build_jacobian(l_comp, R_comp, src_dir, R, B)\r\n\r\n return emi_params", "def compute_representers(V, inertia, rhs):\n\tM = inertia\n\n\tx = fem.Function(V)\n\tx2 = fem.Function(V)\n\n\tfem.solve(M, x2.vector(), rhs.vector())\n\n\t# H^2 metric\n\tv = fem.TestFunction(V)\n\tx3 = x2*v*dx()\n\tM3x = fem.assemble(x3)\n\tfem.solve(M,x.vector(),M3x)\n\n\n\t# Compute the norm\n\tH1 = x2.vector().inner(rhs.vector())\n\tH2 = x.vector().inner(rhs.vector())\n\n\treturn x2, x, H1, H2", "def get_mi_mvn(x, y):\n\n d = x.shape[1]\n\n # hx = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(x.T)))\n # hy = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(y.T)))\n # hxy = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(x.T, y=y.T)))\n # mi = hx + hy - hxy\n\n # hx = 0.5 * log(det(2*np.pi*np.e*np.cov(x.T)))\n # hy = 0.5 * log(det(2*np.pi*np.e*np.cov(y.T)))\n # hxy = 0.5 * log(det(2*np.pi*np.e*np.cov(np.c_[x,y].T)))\n hx = get_h_mvn(x)\n hy = get_h_mvn(y)\n hxy = get_h_mvn(np.c_[x,y])\n mi = hx + hy - hxy\n\n # mi = 0.5 * (log(det(np.cov(x.T))) + log(det(np.cov(y.T))) - log(det(np.cov(np.c_[x,y].T))))\n\n return mi", "def propiosObservable(obs):\n for i in range(len(obs)):\n for j in range(len(obs[0])):\n obs[i][j]=complex(obs[i][j][0],obs[i][j][1])\n a=np.array(obs)\n x,v = np.linalg.eig(a)\n valPropios = [(c.real,c.imag) for c in x]\n vectPropios = [[(c.real,c.imag) for c in y]for y in v]\n return valPropios,vectPropios", "def innovation(observation: np.ndarray, observation_predicted: np.ndarray) -> np.ndarray:\n return observation - observation_predicted", "def ivp(self):\n if self.__ivp is None:\n self.__ivp = ivp.IVP(self.evaluate_rhs, self.evaluate_jacobian)\n return self.__ivp", "def obj(k_next) : \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec", "def Iq(q, lorentz_scale, porod_scale, cor_length, porod_exp, lorentz_exp):\n with errstate(divide='ignore'):\n porod = porod_scale / q**porod_exp\n lorentz = lorentz_scale / (1.0 + (q * cor_length)**lorentz_exp)\n inten = porod + lorentz\n return inten", "def E(q, r0, x, y):\n den = np.hypot(x - r0[0], y - r0[1]) ** 3\n return q * (x - r0[0]) / den, q * (y - r0[1]) / den", "def localized_E(E1, i, j, x, y):\n oldval = x[i, j]\n newval = oldval * -1 # flip\n # local computations\n E2 = E1 - (h * oldval) + (h * newval)\n E2 = E2 + (eta * y[i, j] * oldval) - (eta * y[i, j] * newval)\n adjacent = [(0, 1), (0, -1), (1, 0), (-1, 0)]\n neighbors = [x[i + di, j + dj] for di, dj in adjacent\n if is_valid(i + di, j + dj, x.shape)]\n E2 = E2 + beta * sum(a * oldval for a in neighbors)\n E2 = E2 - beta * sum(a * newval for a in neighbors)\n return oldval, newval, E1, E2", "def getEta(self, pose):\n vector_x = np.cos(self.ori) * (pose.x - self.pos.x) + np.sin(self.ori) * (pose.y - self.pos.y)\n vector_y = -np.sin(self.ori) * (pose.x - self.pos.x) + np.cos(self.ori) * (pose.y - self.pos.y)\n eta = math.atan2(vector_y, vector_x)\n return eta", "def A_coefficients_ellipsoid(v, DD, bDDisDelta=False):\n #v can be given as an array with X/Y/Z cartesian dimensions being the last.\n #\"\"\"\n if bDDisDelta:\n delta=DD\n else:\n delta=Ddelta_ellipsoid(dd)\n #v=_sanitise_v(v)\n #v2=np.square(v)\n #v4=np.square(v2)\n #fact2=np.multiply(0.75,np.sum(v4))-0.25\n v2 = [ v[i]*v[i] for i in range(3) ]\n v4 = [ v2[i]*v2[i] for i in range(3) ]\n fact2 = 0.25*( 3.0*(v4[0]+v4[1]+v4[2])-1.0)\n fact3 = 1.0/12.0*(delta[0]*(3*v4[0]+6*v2[1]*v2[2]-1) + delta[1]*(3*v4[1]+6*v2[0]*v2[2]-1) + delta[2]*(3*v4[2]+6*v2[0]*v2[1]-1))\n A=np.zeros(5)\n A[0]= 3*v2[1]*v2[2]\n A[1]= 3*v2[0]*v2[2]\n A[2]= 3*v2[0]*v2[1]\n A[3]= fact2-fact3\n A[4]= fact2+fact3\n return A", "def value_inv(self, theta):\n # diag_gamma = np.dot(theta.T, self.X.T)\n # logistic_term = self.logistic_fn(diag_gamma)\n # diag_gamma = logistic_term * (1.0 - logistic_term)\n # diag_gamma_inv = 1.0 / diag_gamma\n # gamma_inv = np.diag(diag_gamma_inv)\n # inv_mat = np.linalg.pinv(gamma_inv + self.XXt)\n # return self.alpha*(np.eye(self.dim) - np.dot(np.dot(self.X.T, inv_mat), self.X))\n G = self.value(theta) # d*d matrix inversion\n return np.linalg.pinv(G)", "def homogenous_transformation(x,y):\n y_start = 0.3\n y_stop = 1\n src = np.float32([[0.45,y_start],[0.55, y_start],[0.1,y_stop],[0.9, y_stop]])\n dst = np.float32([[0.45, y_start],[0.55, y_start],[0.45, y_stop],[0.55,y_stop]])\n M_inv = cv2.getPerspectiveTransform(dst,src)\n \n ones = np.ones((1,len(y)))\n coordinates = np.vstack((x, y, ones))\n trans = np.matmul(M_inv, coordinates)\n \n x_vals = trans[0,:]/trans[2,:]\n y_vals = trans[1,:]/trans[2,:]\n return x_vals, y_vals", "def get_pose_estimation(self, img):\n\n # Convert image to a 1D numpy array\n input_data = np.expand_dims(img.copy(), axis=0)\n\n # check the type of the input tensor\n floating_model = self.input_details[0]['dtype'] == np.float32\n if floating_model:\n input_data = (np.float32(input_data) - 127.5) / 127.5\n\n # Setting the value of the input tensor\n self.interpreter.set_tensor(self.input_details[0]['index'], input_data)\n\n # Run the computation\n self.interpreter.invoke()\n\n # Extract output data from the interpreter\n output_data = self.interpreter.get_tensor(self.output_details[0]['index'])\n offset_data = self.interpreter.get_tensor(self.output_details[1]['index'])\n\n # Heatmaps contains the coincidence of keypoint, it can be used to locate the approximate location.\n heatmaps = np.squeeze(output_data)\n # Offset Vectors contains the exact position of each keypoint. First 17 layers correspond to the x\n # coordinates and the last 17 correspond to the y coordinates\n offsets = np.squeeze(offset_data)\n\n pose = get_keypoints_positions(heatmaps, offsets)\n # Show image with pose\n #cv2.imshow(\"frame\", cv2.resize(self.draw_kps(img, pose), (500, 500)))\n return pose", "def like_one(theta,dt,dmag,sigma):\n\n gamma, A = theta\n aux=(1/np.sqrt(2*np.pi*Veff2(dt,sigma,A,gamma)))*np.exp(-1.0*(dmag**2)/(2.0*Veff2(dt,sigma,A,gamma)))\n\n return aux", "def eclogite_foliated():\n\n rho = 3300.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 203.45; C[0,1] = 67.76; C[0,2] = 64.47; C[0,3] = 0.08; C[0,4] = 1.9; C[0,5] = -0.4\n C[1,0] = C[0,1]; C[1,1] = 220.58; C[1,2] = 63.65; C[1,3] = 0.46; C[1,4] = 0.59; C[1,5] = 0.06\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 189.75; C[2,3] = 0.13; C[2,4] = 0.95; C[2,5] = -0.2\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 66.32; C[3,4] = -0.27; C[3,5] = 0.73\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 65.77; C[4,5] = -0.02\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 70.75\n\n return C, rho", "def _measmod_ekf0(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def equilibrium_ionization(self):\n # Solve system of equations using singular value decomposition\n _, _, V = np.linalg.svd(self._rate_matrix.value)\n # Select columns of V with smallest eigenvalues (returned in descending order)\n # NOTE: must take the absolute value as the SVD solution is only accurate up\n # to the sign. We require that the solutions must be positive.\n ioneq = np.fabs(V[:, -1, :])\n ioneq /= ioneq.sum(axis=1)[:, np.newaxis]\n\n return u.Quantity(ioneq)", "def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)", "def posdef_inv_eig(tensor, identity, damping):\n eigenvalues, eigenvectors = tf.self_adjoint_eig(tensor + damping * identity)\n return tf.matmul(eigenvectors / eigenvalues, eigenvectors, transpose_b=True)", "def get_x(EQ, M):\n return (EQ[0] * (EQ[0] * M[0] - EQ[1] * M[1]) - EQ[1] * EQ[2]) / (EQ[1] ** 2 + EQ[0] ** 2)", "def find_position(self, xv, yv):\n # Convert position in spheric coord\n phi = xv*self.FOV_img/360/self.img_res\n theta = yv*self.FOV_img_Y/180/self.img_res_Y\n phi2 = phi+(360-self.FOV_img)/2\n theta2 = theta+(180-self.FOV_img_Y)/2\n\n u, v, w = spheric2cart(np.radians(theta2), np.radians(phi2)) # give cartesian coord of pixel\n\n # ignore errors due to /0 -> inf, -inf\n # divide (w/v) and invalid arctan2()\n with np.errstate(all='ignore'): # OPTIMIZE: see comment about pi = -pi and don't matter if -0 or 0 -> just replace by pi\n beta = -np.arctan(w/v)\n# beta2 = -np.arctan2(w, v)\n\n# v2 = np.dot(rotation_matrix(beta), [u, v, w]) # take 3*3 created matrix and aplly to vector\n matrix = rotation_matrix(beta)\n u2 = matrix[0, 0]*u\n v2 = matrix[1, 1]*v+matrix[1, 2]*w\n w2 = matrix[2, 1]*v+matrix[2, 2]*w\n _, seen_angle = cart2spheric(u2, v2, w2) # return phi in equator \"projection\"\n\n seen_angle = np.degrees(seen_angle)\n seen_angle = np.mod(seen_angle, 360) # define phi [0, 360]\n\n# seen_angle[seen_angle > 360] -= 360\n deviated_angle = np.zeros(seen_angle.shape)\n deviated_angle[seen_angle < 180] = self.interpolation(seen_angle[seen_angle < 180])\n deviated_angle[seen_angle >= 180] = 360 - self.interpolation(360-seen_angle[seen_angle >= 180])\n# np.flip(deviated_angle, 1) \" mais probleme overlap entre left et right\n\n theta = pi/2# *np.ones(deviated_angle.shape)\n phi = np.radians(deviated_angle)\n u3, v3, w3 = spheric2cart(theta, phi) #get cart coord of deviated pixel\n\n matrix = rotation_matrix(-beta)\n u4 = matrix[0, 0]*u3\n v4 = matrix[1, 1]*v3+matrix[1, 2]*w3\n w4 = matrix[2, 1]*v3+matrix[2, 2]*w3\n\n theta, phi = cart2spheric(u4, v4, w4) #give spheric coord of deviated pixel\n\n theta, phi = np.degrees(theta), np.degrees(phi)\n\n phi -= (360-self.FOV_img)/2\n theta -= (180-self.FOV_img_Y)/2\n\n with np.errstate(all='ignore'): # OPTIMIZE\n phi = np.mod(phi, 360) # define phi [0, 360]\n theta = np.mod(theta, 180) # define phi [0, 360]\n\n phi[phi == 360] = 0\n xv2 = phi*360/self.FOV_img*self.img_res\n yv2 = theta*180/self.FOV_img_Y*self.img_res_Y #give deviated angle pixel position\n\n xv2[np.isnan(xv2)] = -1\n yv2[np.isnan(yv2)] = -1\n\n xv2 = np.array(xv2, dtype=int)\n yv2 = np.array(yv2, dtype=int)\n\n return xv2, yv2", "def value(self):\n updets = self._dets[0][:, :, self._det_map[0]]\n dndets = self._dets[1][:, :, self._det_map[1]]\n upref = np.amax(self._dets[0][1])\n dnref = np.amax(self._dets[1][1])\n phases = updets[0] * dndets[0]\n logvals = updets[1] - upref + dndets[1] - dnref\n\n wf_val = np.einsum(\n \"d,id->i\", self.parameters[\"det_coeff\"], phases * np.exp(logvals)\n )\n\n wf_sign = self.get_phase(wf_val)\n wf_logval = np.log(np.abs(wf_val)) + upref + dnref\n return wf_sign, wf_logval", "def _affine_coordinates(self, Vrep_object):\n if '_affine_coordinates_pivots' not in self.__dict__:\n v_list = [ vector(v) for v in self.Vrepresentation() ]\n if len(v_list)>0:\n origin = v_list[0]\n v_list = [ v - origin for v in v_list ]\n coordinates = matrix(v_list)\n self._affine_coordinates_pivots = coordinates.pivots()\n \n v = list(Vrep_object)\n if len(v) != self.ambient_dim():\n raise ValueError('Incorrect dimension: '+str(v))\n\n return vector(self.field(), [ v[i] for i in self._affine_coordinates_pivots ])", "def imwofz_nonvector(x, y):\n ncut=27\n xy=x*y \n xyp=2.0*xy/jnp.pi \n exx=jnp.exp(-x*x) \n f=-exx*erfcx(y)*jnp.sin(2.0*xy)+x/jnp.pi*exx*jnp.sinc(xyp) \n n=jnp.arange(1,ncut+1) \n n2=n*n \n vec0=0.5*n/(0.25*n2+ y*y) \n vec1=jnp.exp(-(0.25*n2+x*x)) \n vec4=jnp.exp(-(0.5*n+x)*(0.5*n+x)) \n vec5=jnp.exp(-(0.5*n-x)*(0.5*n-x)) \n Sigma1=jnp.dot(vec0,vec1)\n Sigma4=jnp.dot(vec0,vec4)\n Sigma5=jnp.dot(vec0,vec5)\n f = f + 1.0/jnp.pi*(y*jnp.sin(2.0*xy)*Sigma1 + 0.5*(Sigma5-Sigma4))\n \n return f", "def epidote():\n\n rho = 3465.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 211.5; C[0,1] = 65.6; C[0,2] = 43.2; C[0,3] = 0.; C[0,4] = -6.5; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 239.; C[1,2] = 43.6; C[1,3] = 0.; C[1,4] = -10.4; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 202.1; C[2,3] = 0.; C[2,4] = -20.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 39.1; C[3,4] = 0.; C[3,5] = -2.3\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 43.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 79.5\n\n return C, rho", "def each_evidence(y_, f, fh, v, s, vh, N, D):\n epsilon = 1e-5\n alpha = 1.0\n beta = 1.0\n lam = alpha / beta\n tmp = (vh @ (f @ np.ascontiguousarray(y_)))\n for _ in range(11):\n # should converge after at most 10 steps\n # typically converge after two or three steps\n gamma = (s / (s + lam)).sum()\n # A = v @ np.diag(alpha + beta * s) @ v.transpose() # no need to compute A\n # A_inv = v @ np.diag(1.0 / (alpha + beta * s)) @ v.transpose() # no need to compute A_inv\n m = v @ (tmp * beta / (alpha + beta * s))\n alpha_de = (m * m).sum()\n alpha = gamma / (alpha_de + epsilon)\n beta_de = ((y_ - fh @ m) ** 2).sum()\n beta = (N - gamma) / (beta_de + epsilon)\n new_lam = alpha / beta\n if np.abs(new_lam - lam) / lam < 0.01:\n break\n lam = new_lam\n evidence = D / 2.0 * np.log(alpha) \\\n + N / 2.0 * np.log(beta) \\\n - 0.5 * np.sum(np.log(alpha + beta * s)) \\\n - beta / 2.0 * (beta_de + epsilon) \\\n - alpha / 2.0 * (alpha_de + epsilon) \\\n - N / 2.0 * np.log(2 * np.pi)\n return evidence / N, alpha, beta, m", "def end_effectors_pos(self):\n def relative_pos_in_egocentric_frame(physics):\n end_effector = physics.bind(self._entity.end_effectors).xpos\n torso = physics.bind(self._entity.root_body).xpos\n xmat = np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))\n return np.reshape(np.dot(end_effector - torso, xmat), -1)\n return observable.Generic(relative_pos_in_egocentric_frame)", "def solutionCovariance(self):\n return self.standardError2()*self.AtAinv", "def yy(self):\n return self.exterior[:, 1]", "def _measmod_ekf1(ivp, prior, evlvar):\n spatialdim = prior.spatialdim\n h0 = prior.proj2coord(coord=0)\n h1 = prior.proj2coord(coord=1)\n\n def dyna(t, x, **kwargs):\n return h1 @ x - ivp.rhs(t, h0 @ x)\n\n def diff(t, **kwargs):\n return evlvar * np.eye(spatialdim)\n\n def jaco(t, x, **kwargs):\n return h1 - ivp.jacobian(t, h0 @ x) @ h0\n\n return DiscreteGaussianModel(dyna, diff, jaco)", "def get_ivar(data, s):\n return data.ivar.value / (1 + s**2 * data.ivar.value)", "def impurity(x,iw):\n Vi = 0\n ai = 2*a0+iw*1.5*a0+0.5*(iw-1)*a0\n \n #Impurity size\n size = 0.25*1.5*a0\n if (x > ai-size and x < ai):\n Vi = 300/Eh\n elif(x == ai or x == ai-size):\n Vi = 150/Eh\n \n return Vi", "def compute_hand_eye_calibration(dq_B_H_vec_inliers, dq_W_E_vec_inliers,\n scalar_part_tolerance=1e-2,\n enforce_same_non_dual_scalar_sign=True):\n n_quaternions = len(dq_B_H_vec_inliers)\n\n # Verify that the first pose is at the origin.\n assert np.allclose(dq_B_H_vec_inliers[0].dq,\n [0., 0., 0., 1.0, 0., 0., 0., 0.],\n atol=1.e-8), dq_B_H_vec_inliers[0]\n assert np.allclose(dq_W_E_vec_inliers[0].dq,\n [0., 0., 0., 1.0, 0., 0., 0., 0.],\n atol=1.e-8), dq_W_E_vec_inliers[0]\n\n if enforce_same_non_dual_scalar_sign:\n for i in range(n_quaternions):\n dq_W_E = dq_W_E_vec_inliers[i]\n dq_B_H = dq_B_H_vec_inliers[i]\n if ((dq_W_E.q_rot.w < 0. and dq_B_H.q_rot.w > 0.) or\n (dq_W_E.q_rot.w > 0. and dq_B_H.q_rot.w < 0.)):\n dq_W_E_vec_inliers[i].dq = -dq_W_E_vec_inliers[i].dq.copy()\n\n # 0. Stop alignment if there are still pairs that do not have matching\n # scalar parts.\n for j in range(n_quaternions):\n dq_B_H = dq_W_E_vec_inliers[j]\n dq_W_E = dq_B_H_vec_inliers[j]\n\n scalar_parts_B_H = dq_B_H.scalar()\n scalar_parts_W_E = dq_W_E.scalar()\n\n assert np.allclose(scalar_parts_B_H.dq, scalar_parts_W_E.dq,\n atol=scalar_part_tolerance), (\n \"Mismatch of scalar parts of dual quaternion at idx {}:\"\n \" dq_B_H: {} dq_W_E: {}\".format(j, dq_B_H, dq_W_E))\n\n # 1.\n # Construct 6n x 8 matrix T\n t_matrix = setup_t_matrix(dq_B_H_vec_inliers, dq_W_E_vec_inliers)\n\n # 2.\n # Compute SVD of T and check if only two singular values are almost equal to\n # zero. Take the corresponding right-singular vectors (v_7 and v_8)\n U, s, V = np.linalg.svd(t_matrix)\n\n # Check if only the last two singular values are almost zero.\n bad_singular_values = False\n for i, singular_value in enumerate(s):\n if i < 6:\n if singular_value < 5e-1:\n bad_singular_values = True\n else:\n if singular_value > 5e-1:\n bad_singular_values = True\n v_7 = V[6, :].copy()\n v_8 = V[7, :].copy()\n # print(\"v_7: {}\".format(v_7))\n # print(\"v_8: {}\".format(v_8))\n\n # 3.\n # Compute the coefficients of (35) and solve it, finding two solutions for s.\n u_1 = v_7[0:4].copy()\n u_2 = v_8[0:4].copy()\n v_1 = v_7[4:8].copy()\n v_2 = v_8[4:8].copy()\n # print(\"u_1: {}, \\nu_2: {}, \\nv_1: {}, \\nv_2: {}\".format(u_1, u_2, v_1, v_2))\n\n a = np.dot(u_1.T, v_1)\n assert a != 0.0, \"This would involve division by zero.\"\n b = np.dot(u_1.T, v_2) + np.dot(u_2.T, v_1)\n c = np.dot(u_2.T, v_2)\n # print(\"a: {}, b: {}, c: {}\".format(a, b, c))\n square_root_term = b * b - 4.0 * a * c\n\n if square_root_term < -1e-2:\n assert False, \"square_root_term is too negative: {}\".format(\n square_root_term)\n if square_root_term < 0.0:\n square_root_term = 0.0\n s_1 = (-b + np.sqrt(square_root_term)) / (2.0 * a)\n s_2 = (-b - np.sqrt(square_root_term)) / (2.0 * a)\n # print(\"s_1: {}, s_2: {}\".format(s_1, s_2))\n\n # 4.\n # For these two s values, compute s^2*u_1^T*u_1 + 2*s*u_1^T*u_2 + u_2^T*u_2\n # From these choose the largest to compute lambda_2 and then lambda_1\n solution_1 = s_1 * s_1 * np.dot(u_1.T, u_1) + 2.0 * \\\n s_1 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)\n solution_2 = s_2 * s_2 * np.dot(u_1.T, u_1) + 2.0 * \\\n s_2 * np.dot(u_1.T, u_2) + np.dot(u_2.T, u_2)\n\n if solution_1 > solution_2:\n assert solution_1 > 0.0, solution_1\n lambda_2 = np.sqrt(1.0 / solution_1)\n lambda_1 = s_1 * lambda_2\n else:\n assert solution_2 > 0.0, solution_2\n lambda_2 = np.sqrt(1.0 / solution_2)\n lambda_1 = s_2 * lambda_2\n # print(\"lambda_1: {}, lambda_2: {}\".format(lambda_1, lambda_2))\n\n # 5.\n # The result is lambda_1*v_7 + lambda_2*v_8\n dq_H_E = DualQuaternion.from_vector(lambda_1 * v_7 + lambda_2 * v_8)\n # Normalize the output, to get rid of numerical errors.\n dq_H_E.normalize()\n\n if (dq_H_E.q_rot.w < 0.):\n dq_H_E.dq = -dq_H_E.dq.copy()\n return (dq_H_E, s, bad_singular_values)", "def F_trans(self):\n common_scale = self.edp_par['common_scale'].value\n R_HM = self.edp_par['R_HM'].value\n X_h = self.edp_par['X_h'].value\n psi = self.edp_par['psi'].value \n arg = self.qz*X_h*np.cos(psi) - self.qx*X_h*np.sin(psi)\n return common_scale * (R_HM*np.cos(arg) - 1)", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def A_calc(self, x, y, theta, v, omega, dt):\n # Initialize 5x5 A matrix\n A = np.zeros((5,5))\n A[0,0] = 1\n A[1,1] = 1\n A[2,2] = 1\n A[3,3] = 1\n A[4,4] = 1\n \n A[0,2] = -1 * v * np.sin(theta) * dt\n A[0,3] = np.cos(theta) * dt\n A[1,2] = v * np.cos(theta) * dt\n A[1,3] = np.sin(theta) * dt\n A[2,4] = dt\n \n return(A)", "def test_y_generate():\n a = Attractor()\n #say x, y, z = [0.1, 0.0, 0.0]\n\n dx = (10.0 * (0.0 - 0.1)) * (80.0-0.0)/10000 + 0.1\n dy = (0.1 * (28 - 0.0) - 0.0) * (80.0-0.0)/10000 + 0.0\n dz = ((0.1 * 0.0) - (8/3 * 0.0)) * (80.0-0.0)/10000 + 0.0\n ex_1 = np.array([dx, dy, dz])\n\n dx2 = (10.0 * (dy - dx)) * (80.0-0.0)/10000.0 + dx \n dy2 = (dx * (28.0 - dz) - dy) * (80.0-0.0)/10000.0 + dy\n dz2 = ((dx * dy) - (8/3 * dz)) * (80.0-0.0)/10000.0 + dz\n ex_2 = np.array([dx2, dy2, dz2])\n\n dx3 = (10.0 * (dy2 - dx2)) * (80.0-0.0)/10000.0 + dx2\n dy3 = (dx2 * (28.0 - dz2) - dy2) * (80.0-0.0)/10000.0 + dy2\n dz3 = ((dx2 * dy2) - (8/3 * dz2)) * (80.0-0.0)/10000.0 + dz2\n ex_3 = np.array([dx3, dy3, dz3])\n\n dx4 = (10.0 * (dy3 - dx3)) * (80.0-0.0)/10000.0 + dx3\n dy4 = (dx3 * (28 - dz3) - dy3) * (80.0-0.0)/10000.0 + dy3\n dz4 = ((dx3 * dy3) - (8/3 * dz3)) * (80.0-0.0)/10000.0 + dz3\n ex_4 = np.array([dx4, dy4, dz4])\n\n dx5 = (10.0 * (dy4 - dx4)) * (80.0-0.0)/10000.0 + dx4\n dy5 = (dx4 * (28 - dz4) - dy4) * (80.0-0.0)/10000.0 + dy4\n dz5 = ((dx4 * dy4) - (8/3 * dz4)) * (80.0-0.0)/10000.0 + dz4\n ex_5 = np.array([dx5, dy5, dz5])\n\n \n a.evolve(order = 4)\n y_list = a.solution['y'].tolist()\n \n for i in y_list[:6]:\n yy = round(i, 2)\n for j in [0.0, dy, dy2, dy3, dy4, dy5]:\n yyy = round(j, 2)\n \n print (\"Actual increments: \", yy)#str(a.solution()['x']).strip('[]'))\n print (\"Expected increments: \", yyy)\n assert yy == yyy", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = [email protected]\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( [email protected]/self.alpha))", "def evaluate(t, x, y):\n r = np.sqrt(x**2 + y**2)\n return contrast * np.cos(kx_g*x + ky_g*y - w_g*t) * (1 - heaviside(r - patch_diameter*0.5))", "def irs_method(state):\n\n # First, importing all variables from the dictionary 'state'\n theta_ein2cm = state['theta_ein2cm']\n beta_boundary = state['beta_boundary']\n beta_res = state['beta_res']\n epsilon = state['epsilon']\n mu_h = state['mu_h']\n mu_v = state['mu_v']\n m = state['m']\n zeta = state['zeta']\n max_memory = state['max_memory']\n rays_per_pixel = state['rays_per_pixel']\n\n pixel2cm = theta_ein2cm * beta_boundary / beta_res # size of 1 pixel in cm in the source plane\n print('The physical size of 1 pixel is ' + str(beta_boundary / beta_res) + ' Einstein radii\\nor ' + str(\n np.format_float_scientific(pixel2cm, 2)) + ' cm in the source plane\\n')\n\n theta_boundaries = [epsilon * mu_h * beta_boundary / 2,\n epsilon * mu_v * beta_boundary / 2]\n # The number of images to draw in IRS method, assuming an ellipse in the image plane\n num_of_img = int((beta_res * epsilon) ** 2 * mu_v * mu_h * rays_per_pixel)\n print('A total of ' + str(num_of_img) + ' images for IRS method')\n state['num_of_img'] = num_of_img\n print(str(num_of_img / beta_res ** 2) + ' rays per source plane pixels')\n # The area in (Einstein-radii)^2 that each ray uniquely occupies\n s_ray = (epsilon ** 2 * mu_h * mu_v * beta_boundary ** 2) / num_of_img\n\n l_tmp = int(max_memory / m.shape[0] * 10 ** 9 / 8) # the maximum number of images to vector-compute\n n_runs = max(int(num_of_img / l_tmp), 1) # the number of sub arrays to vector-compute\n print('Max memory for array: ' + str(l_tmp * m.shape[0] * 8 / 10 ** 9) + 'GB')\n mu_grid = np.zeros((beta_res, beta_res)) # this will save the total number of rays per cell in the source plane\n start_time = time.time()\n theta = []\n beta = []\n num_cores = multiprocessing.cpu_count()\n print(str(num_cores) + ' active CPU cores')\n # starting the parallel routine, the variable mu_grid_temp_array is just a placeholder.\n mu_grid_temp_array = Parallel(n_jobs=num_cores, require='sharedmem')\\\n (delayed(parallel_irs)(i,mu_grid,l_tmp,n_runs,s_ray,theta_boundaries,start_time,state) for i in range(n_runs))\n\n if n_runs * l_tmp < num_of_img: # if some values are left\n # Drawing images locations\n theta = random_image_draw(int(num_of_img - n_runs * l_tmp), theta_boundaries[0], theta_boundaries[1])\n # Calculating locations of sources and corresponding magnitudes\n beta = af.img2src(theta, m, zeta, state)\n # Binning sources magnification\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n mu_grid += mu_grid_temp\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n else:\n print('Finished shooting in ' + str(time.time() - start_time) + 's')\n beta = np.ones(2, 2) # Just so that the next line can run smoothly and return beta_grid_h and beta_grid_v\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n\n return beta_grid_h, beta_grid_v, mu_grid" ]
[ "0.600269", "0.5934908", "0.5921452", "0.58419925", "0.58109325", "0.5797084", "0.5777378", "0.57385683", "0.57354975", "0.57078993", "0.5702315", "0.5689945", "0.5679907", "0.5660313", "0.5653539", "0.5650662", "0.56459534", "0.5623185", "0.55718786", "0.55647194", "0.55390006", "0.5538064", "0.54955274", "0.548023", "0.5469401", "0.54666287", "0.54486394", "0.54232156", "0.5415341", "0.54014677", "0.5392137", "0.53773916", "0.5369663", "0.5368328", "0.53543854", "0.53529143", "0.53505147", "0.53494364", "0.5340691", "0.5332554", "0.53259116", "0.53206164", "0.5315668", "0.531169", "0.5304329", "0.5299408", "0.5293038", "0.52636075", "0.5244854", "0.52376854", "0.52365893", "0.52290154", "0.52287257", "0.52275896", "0.5217535", "0.5216218", "0.5215106", "0.52100164", "0.5204103", "0.52015334", "0.5201182", "0.51998436", "0.5199727", "0.5199405", "0.5198784", "0.519039", "0.5187672", "0.51851255", "0.518501", "0.5183025", "0.5180838", "0.51796824", "0.5177448", "0.51769173", "0.51768607", "0.5172329", "0.51681495", "0.5158851", "0.51565474", "0.5154552", "0.5151373", "0.51505286", "0.5146045", "0.5145", "0.5140818", "0.5139329", "0.5137672", "0.5136683", "0.5134959", "0.51349235", "0.51349026", "0.51336896", "0.5129331", "0.5126193", "0.5125388", "0.51217484", "0.5114913", "0.51144594", "0.51117384", "0.5105873", "0.51027834" ]
0.0
-1
Compute the derivatives of the collinear law (design matrix)
def __ComputeDesignMatrix(self, groundPoints): # initialization for readability omega = self.exteriorOrientationParameters[3] phi = self.exteriorOrientationParameters[4] kappa = self.exteriorOrientationParameters[5] # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotationMatrixT = self.rotationMatrix.T rotatedG = rotationMatrixT.dot(dXYZ) rT1g = rotatedG[0, :] rT2g = rotatedG[1, :] rT3g = rotatedG[2, :] focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2 dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :] dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :] dgdX0 = np.array([-1, 0, 0], 'f') dgdY0 = np.array([0, -1, 0], 'f') dgdZ0 = np.array([0, 0, -1], 'f') # Derivatives with respect to X0 dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0) dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0) # Derivatives with respect to Y0 dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0) dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0) # Derivatives with respect to Z0 dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0) dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0) dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T gRT3g = dXYZ * rT3g # Derivatives with respect to Omega dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) - rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) - rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Phi dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) - rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) - rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Kappa dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) - rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) - rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] # all derivatives of x and y dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T, np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T]) a = np.zeros((2 * dd[0].shape[0], 6)) a[0::2] = dd[0] a[1::2] = dd[1] return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-1] ) / 6.\r\n M[i,i] = ( x_p[i+1] - x_p[i-1] ) / 3.\r\n M[i,i+1] = ( x_p[i+1] - x_p[i] ) /6.\r\n d[i,0 ] = ( y_p[i+1] - y_p[i] ) / ( x_p[i+1] - x_p[i] ) - ( y_p[i] - y_p[i-1] ) / ( x_p[i] - x_p[i-1] )\r\n \r\n M[0,0],M[-1,-1] = 1.,1. # compactly sets the BCs\r\n \r\n LU = lu.LU_decomp(M) # solves the matrix equations\r\n return lu.FB_sub(LU.Low, LU.Upp, d) # find and return 2nd derivatives\r", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def det(self):\n if self.x == 0 or self.y == 0:\n return None\n elif self.x == 1 or self.y == 1:\n return self.retrieve(0,0)\n else:\n out = 0.0\n for x in xrange(0, self.x):\n out += self.retrieve(0,x)*self.C(0,x)\n return out", "def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det", "def compute_grad(beta, lambdat, X, y):\n return -2/len(y)*(np.maximum(0, 1-(\n (y[:, np.newaxis]*X).dot(beta)))).dot(\n y[:, np.newaxis]*X) + 2 * lambdat * beta", "def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx", "def d_dx(self, points):\n dk_dx = np.zeros((points.shape[0] + 3, # i\n self.source.n_points, # k\n self.source.n_dims)) # l\n dk_dx[:-3, :] = self.kernel.d_dl(points)\n\n affine_derivative = np.array([[0, 0],\n [1, 0],\n [0, 1]])\n dk_dx[-3:, :] = affine_derivative[:, None]\n\n return np.einsum('ij, ikl -> klj', self.coefficients, dk_dx)", "def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n \n for nl in self.nlists: \n nl.separations()\n \n for force in self.forces:\n force.apply()\n\n # Controllers is the new implementation of forces\n for controller in self.controllers:\n controller.apply()", "def getDerivativeSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n # Derivation\n xi = sy.symbols('xi')\n self.dudx_xyPlane = sy.diff(self.u_xyPlane, xi) / L\n \n # Then calculate the derivation equation on x-z plane\n self.dudx_xzPlane = sy.diff(self.u_xzPlane, xi) / L", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------", "def _compute_derivatives(image, mode=\"constant\", cval=0):\n\n derivatives = [\n ndi.sobel(image, axis=i, mode=mode, cval=cval)\n for i in range(image.ndim)\n ]\n\n return derivatives", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def fluid_deriv(self):\n deriv = np.zeros((self.fluid_constraints['num_eq'],\n 2 * self.num_i + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n for j in range(self.num_nw_fluids):\n deriv[i * self.num_nw_fluids + j, i, j + 3] = 1\n deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1\n return deriv", "def _derX(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c], z[c])\n return dfdx", "def det(a, b, c):\n d = (b[0]*c[1]-c[0]*b[1])+(c[0]*a[1]-a[0]*c[1])+(a[0]*b[1]-a[1]*b[0])\n return d", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def eval_Dxy(self):\n\n return self.Xf - self.Yf", "def _derX(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c])\n return dfdx", "def compute_derivs_matrices(vecs, adv_vecs, dt):\n return (adv_vecs - vecs)/(1.*dt)", "def _gradients(self, partial):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = self.noise_model._laplace_gradients(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #len(dlik_dthetaL)\r\n num_params = len(self._get_param_names())\r\n # make space for one derivative for each likelihood parameter\r\n dL_dthetaL = np.zeros(num_params)\r\n for thetaL_i in range(num_params):\r\n #Explicit\r\n dL_dthetaL_exp = ( np.sum(dlik_dthetaL[:, thetaL_i])\r\n #- 0.5*np.trace(mdot(self.Ki_W_i, (self.K, np.diagflat(dlik_hess_dthetaL[thetaL_i]))))\r\n + np.dot(0.5*np.diag(self.Ki_W_i)[:,None].T, dlik_hess_dthetaL[:, thetaL_i])\r\n )\r\n\r\n #Implicit\r\n dfhat_dthetaL = mdot(I_KW_i, self.K, dlik_grad_dthetaL[:, thetaL_i])\r\n dL_dthetaL_imp = np.dot(dL_dfhat, dfhat_dthetaL)\r\n dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp\r\n\r\n return dL_dthetaL", "def dgdy(self, X):\n \n return 3*X[1]**2", "def determinant(self):\n d1 = self._row_1[0] * (self._row_2[1] * self._row_3[2] - self._row_2[2] * self._row_3[1])\n d2 = self._row_1[1] * (self._row_2[0] * self._row_3[2] - self._row_2[2] * self._row_3[0])\n d3 = self._row_1[2] * (self._row_2[0] * self._row_3[1] - self._row_2[1] * self._row_3[0])\n return d1 - d2 + d3", "def efSolver(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n\n #x-component#\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n\n #y-component\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n #z-component\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)", "def det_matrix_2x2(m: list):\n return m[0][0]*m[1][1] - m[0][1]*m[1][0]", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def df2dx3_func(self,X):\n result = (\n self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0]))\n * ((self.rm*X[2] - self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def jacobian(self, dt):\n return self._F_cache", "def mass_flow_deriv(self):\n deriv = np.zeros((2, 4 + self.num_vars, self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 0] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 0] = -1\n return deriv", "def pgradient(self):\n d = {}\n\n # Det coeff\n det_coeff_grad = (\n self._dets[0][0, :, self._det_map[0]]\n * self._dets[1][0, :, self._det_map[1]]\n * np.exp(\n self._dets[0][1, :, self._det_map[0]]\n + self._dets[1][1, :, self._det_map[1]]\n )\n )\n\n curr_val = self.value()\n d[\"det_coeff\"] = (\n det_coeff_grad.T / (curr_val[0] * np.exp(curr_val[1]))[:, np.newaxis]\n )\n\n # Mo_coeff, adapted from SlaterUHF\n for parm in [\"mo_coeff_alpha\", \"mo_coeff_beta\"]:\n s = 0\n if \"beta\" in parm:\n s = 1\n\n ao = self._aovals[\n :, s * self._nelec[0] : self._nelec[s] + s * self._nelec[0], :\n ]\n pgrad_shape = (ao.shape[0],) + self.parameters[parm].shape\n pgrad = np.zeros(pgrad_shape)\n\n largest_mo = np.max(np.ravel(self._det_occup[s]))\n for i in range(largest_mo + 1): # MO loop\n if i not in self.freeze_orb[s]:\n for det in range(self.parameters[\"det_coeff\"].shape[0]): # Det loop\n if (\n i in self._det_occup[s][self._det_map[s][det]]\n ): # Check if MO in det\n col = self._det_occup[s][self._det_map[s][det]].index(i)\n pgrad[:, :, i] += (\n self.parameters[\"det_coeff\"][det]\n * d[\"det_coeff\"][:, det, np.newaxis]\n * self._testcol(self._det_map[s][det], col, s, ao)\n )\n d[parm] = np.array(pgrad)\n return d", "def darcy_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n visc_i = visc_mix_ph(i, T0=self.inl[0].T.val_SI)\n visc_o = visc_mix_ph(o, T0=self.outl[0].T.val_SI)\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n\n re = 4 * abs(i[0]) / (np.pi * self.D.val * (visc_i + visc_o) / 2)\n\n return ((i[1] - o[1]) - 8 * abs(i[0]) * i[0] * (v_i + v_o) / 2 *\n self.L.val * lamb(re, self.ks.val, self.D.val) /\n (np.pi ** 2 * self.D.val ** 5))", "def joint_model_derivative_z(response, design_matrix, a_cols, h_cols, param_vector_a, param_vector_h, int_a, int_h, indicators, weights, z, prior_means, prior_vars, home_points=\" Home Points\", away_points=\" Away Points\", MAP=False):\n\n # Calculating necessary elements for gradient calculation\n # REQUIRES AWAY AND HOME COEFFICIENTS TO BE IN PARAM_VECTOR[0] AND PARAM_VECTOR[1] RESPECTIVELY\n # REQUIRES MODEL PRECISION IN PARAM_VECTOR[-1]\n # OTHER DIMS OF PARAMETER VECTOR SHOULD MATCH DESIGN MATRIX COLUMNS\n K_teams = int(len(z) / 2)\n gradient = np.zeros(2 * K_teams).reshape((-1,1))\n second_gradient = np.zeros(2 * K_teams).reshape((-1,1))\n\n # Away Score-based derivatives\n away_predictions = design_matrix.loc[:, a_cols].dot(param_vector_a[:-1]) + int_a\n away_difference = response[away_points].reshape(-1,1) - away_predictions\n away_offense_derivatives = (param_vector_a[0] / param_vector_a[-1]) * away_difference\n home_defense_derivatives = (param_vector_a[1] / param_vector_a[-1]) * away_difference\n\n # Home Score-based derivatives\n home_predictions = design_matrix.loc[:, h_cols].dot(param_vector_h[:-1]) + int_h\n home_difference = response[home_points].reshape(-1,1) - home_predictions\n home_offense_derivatives = (param_vector_h[0] / param_vector_h[-1]) * home_difference\n away_defense_derivatives = (param_vector_h[1] / param_vector_h[-1]) * home_difference\n\n team_updates = [[] for _ in range(2 * K_teams)]\n\n # Summing gradient into respective team latent variables\n for i in range(design_matrix.shape[0]):\n away_offense_indicator_vector = create_one_hot(indicators[i][0], 2 * K_teams)\n away_defense_indicator_vector = create_one_hot(indicators[i][0] + K_teams, 2 * K_teams)\n home_offense_indicator_vector = create_one_hot(indicators[i][1], 2 * K_teams)\n home_defense_indicator_vector = create_one_hot(indicators[i][1] + K_teams, 2 * K_teams)\n\n # These do not need to be separated, just easier to read this way\n # Away score gradient adding\n gradient += weights[i] * (away_offense_indicator_vector * away_offense_derivatives.loc[i, 0] + home_defense_indicator_vector * home_defense_derivatives.loc[i, 0])\n second_gradient += weights[i] * (away_offense_indicator_vector * -1 * param_vector_a[0] ** 2 / param_vector_a[-1] + home_defense_indicator_vector * -1 * param_vector_a[1]**2 / param_vector_a[-1])\n\n team_updates[indicators[i][0]].append(weights[i] * away_offense_derivatives.loc[i,0])\n team_updates[indicators[i][1] + K_teams].append(weights[i] * home_defense_derivatives.loc[i,0])\n\n # Home score gradient adding\n gradient += weights[i] * (away_defense_indicator_vector * away_defense_derivatives.loc[i, 0] + home_offense_indicator_vector * home_offense_derivatives.loc[i, 0])\n second_gradient += weights[i] * (away_defense_indicator_vector * -1 * param_vector_h[1] ** 2 / param_vector_h[-1] + home_offense_indicator_vector * -1 * param_vector_h[0] ** 2 / param_vector_h[-1])\n\n team_updates[indicators[i][0] + K_teams].append(weights[i] * away_defense_derivatives.loc[i, 0])\n team_updates[indicators[i][1]].append(weights[i] * home_offense_derivatives.loc[i, 0])\n\n # Adjusting gradient for MAP estimate (prior over latent variables)\n if MAP:\n MAP_gradient = -1 * (z - prior_means) / prior_vars\n gradient += MAP_gradient\n second_gradient += -1 / prior_vars\n\n return gradient, second_gradient, np.array([np.std(team_game_updates) for team_game_updates in team_updates])", "def gradient(self):\n gradients = [func.gradient for func in self.functionals]\n return DiagonalOperator(*gradients)", "def derivative(self, theta):\n diag_gamma = np.dot(theta.T, self.X.T)\n logistic_term = self.logistic_fn(diag_gamma)\n diag_gamma = logistic_term * (1.0 - logistic_term)\n gamma = np.diag(diag_gamma)\n\n # v computation\n diags_v = 1.0 - 2*self.logistic_fn(np.dot(theta.T, self.X.T))\n diags_v = diags_v.reshape((-1, 1))\n diags_v = diags_v*self.X\n assert diags_v.shape == self.X.shape #N*d shape\n\n XtGamma = np.dot(self.X.T, gamma) # d*N shape\n\n # TODO: Verifier car pas sur de mon coup ... et surtout plus long...\n # id = np.eye(self.n_examples).reshape((self.n_examples, self.n_examples, 1))\n # diags_v = diags_v.reshape((self.n_examples, 1, self.dim))\n # v = id*diags_v # n*n*d tensor\n # left = np.tensordot(XtGamma, v, axes=(1, 0)) # shape d*N*d\n # assert left.shape == (self.dim, self.n_examples, self.dim)\n # dg = np.tensordot(left, self.X, axes=(1, 0))\n # dg = np.swapaxes(dg, axis1=-2, axis2=-1)\n\n dg = np.zeros((self.dim, self.dim, self.dim))\n for idx, v_i_diag in enumerate(diags_v.T):\n v_i = np.diag(v_i_diag)\n dg_di = np.dot(np.dot(XtGamma, v_i), self.X)\n dg[:, :, idx] = dg_di\n return dg", "def df2dx5_func(self,X):\n result = (\n -self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0]))\n * ((self.rm*X[4] + self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def _cost_function_derivative(self, y_pred, y, X, m):\n\n derivatives= np.zeros((X.shape[0],1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum+=(y_pred[0][i] -y[0][i])*X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1/m * auxsum\n\n #empty_derivatives = np.zeros((X.shape[0],1))\n return derivatives", "def _derY(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c], z[c])\n return dfdy", "def det2(m):\n\t(a,b), (c,d) = m\n\treturn a*d - b*c", "def compute_derivative(self, r, dr):\n\n out = None\n\n if r.requires_grad == False:\n r.requires_grad = True\n\n with torch.enable_grad():\n\n for jast in self.jastrow_functions:\n\n kernel = jast(r)\n ker_grad = self._grads(kernel, r)\n ker_grad = ker_grad.unsqueeze(1) * dr\n ker_grad = ker_grad.unsqueeze(0).detach().clone()\n\n if out is None:\n out = ker_grad\n else:\n out = torch.cat((out, ker_grad), axis=0)\n\n return out", "def d(self, df):\n # Get variable names\n var = [key for key, _ in self.marginals.items()]\n df_u = self.sample2pr(df)[var]\n # Evaluate copula density\n l_copula = self.copula.d(df_u.values)\n # Evaluate marginal densities\n L_marginals = zeros((df.shape[0], len(var)))\n for i, v in enumerate(var):\n L_marginals[:, i] = self.marginals[v].d(df[v])\n l_marginals = prod(L_marginals, axis=1)\n\n return l_copula * l_marginals", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n\n # compute dX/dp\n\n # dX/dq is the Jacobian of the global transform evaluated at the\n # mean of the model.\n dX_dq = self._global_transform_jacobian(points)\n # dX_dq: n_points x n_global_params x n_dims\n\n # by application of the chain rule dX_db is the Jacobian of the\n # model transformed by the linear component of the global transform\n dS_db = model_jacobian\n dX_dS = self.pdm.global_transform.jacobian_points(points)\n dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)\n # dS_db: n_points x n_weights x n_dims\n # dX_dS: n_points x n_dims x n_dims\n # dX_db: n_points x n_weights x n_dims\n\n # dX/dp is simply the concatenation of the previous two terms\n dX_dp = np.hstack((dX_dq, dX_db))\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def __call__ ( self , func , x , h , der = False ) :\n\n ## calculate differences \n imax = self.__order + 2 if der else self.__order + 1\n i = 0\n while i < imax : \n j = i + 1\n self.__df[i] = func ( x + j * h ) - func ( x - j * h )\n i += 1\n \n ## 1) calculate 1st derivative \n result = dot_fma ( self.__order + 1 , self.__df , self.__d1 ) / ( self.__sf1 * h ) \n if not der : return result \n \n ## 2) calculate Nth derivative \n dd = dot_fma ( self.__order + 2 , self.__df , self.__d2 ) / ( self.__sf2 * h**(self.__order*2+3) ) \n \n return result, dd", "def dfdx(x,t,dt):\n assert is1d(x)\n F = np.zeros((m,m))\n # X\n md = lambda i: np.mod(i,nX)\n for i in range(nX):\n # wrt. X\n F[i,i] = - dt + 1\n F[i,md(i-2)] = - dt * x[md(i-1)]\n F[i,md(i+1)] = + dt * x[md(i-1)]\n F[i,md(i-1)] = dt *(x[md(i+1)]-x[md(i-2)])\n # wrt. Y\n F[i,nX+iiY[i]] = dt * -h*c/b\n # Y\n md = lambda i: nX + np.mod(i-nX,nX*J)\n for i in range(nX,(J+1)*nX):\n # wrt. Y\n F[i,i] = -dt*c + 1\n F[i,md(i-1)] = +dt*c*b * x[md(i+1)]\n F[i,md(i+1)] = -dt*c*b * (x[md(i+2)]-x[md(i-1)])\n F[i,md(i+2)] = -dt*c*b * x[md(i+1)]\n # wrt. X\n F[i,iiX[i-nX]] = dt * h*c/b\n return F", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def det(self):\n\t\t\n\t\trows = self._rows\n\t\tsign = +1\n\t\tsumm = 0\n\n\t\tfor perm in permutations(range(rows), rows):\n\t\t\tmul = 1\n\t\t\tsign = SquareMatrix.__parity_of_permutation(perm)\n\n\t\t\tfor i in range(rows):\n\t\t\t\tmul *= self[i][perm[i]]\n\n\t\t\tsumm += sign * mul\n\t\treturn summ", "def compute_differential_operator(self):\n\n v_in, v_out, weights = self.get_edge_list()\n\n n = len(v_in)\n Dr = np.concatenate((np.arange(n), np.arange(n)))\n Dc = np.empty(2*n)\n Dc[:n] = v_in\n Dc[n:] = v_out\n Dv = np.empty(2*n)\n\n if self.lap_type == 'combinatorial':\n Dv[:n] = np.sqrt(weights)\n Dv[n:] = -Dv[:n]\n elif self.lap_type == 'normalized':\n Dv[:n] = np.sqrt(weights / self.dw[v_in])\n Dv[n:] = -np.sqrt(weights / self.dw[v_out])\n else:\n raise ValueError('Unknown lap_type {}'.format(self.lap_type))\n\n self._D = sparse.csc_matrix((Dv, (Dr, Dc)), shape=(n, self.N))", "def linear_backward_calculation(dZ, internal_params):\n\n A_prev, W, b = internal_params\n nb = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW =np.multiply((np.dot(dZ, A_prev.T)),1/nb)\n db = np.multiply ((np.sum(dZ, axis=1, keepdims=True),1/nb))\n dA_prev = np.dot(W.T, dZ)\n # raise NotImplementedError\n return dA_prev,dW,db", "def d_dl(self, points):\n n_centres = self.n_points\n n_points = points.shape[0]\n\n # TPS kernel (nonlinear + affine)\n\n # for each input, evaluate the rbf\n # (n_points, n_centres)\n k_points = self.kernel.apply(points)\n\n # k_points with (1, x, y) appended to each point\n # (n_points, n_centres+3) - 3 is (1, x, y) for affine component\n k = np.hstack([k_points, np.ones([n_points, 1]), points])\n\n # (n_centres+3, n_centres+3)\n try:\n inv_L = np.linalg.inv(self.l)\n except np.linalg.LinAlgError:\n # If two points are coincident, or very close to being so, then the\n # matrix is rank deficient and thus not-invertible. Therefore,\n # only take the inverse on the full-rank set of indices.\n _u, _s, _v = np.linalg.svd(self.l)\n keep = _s.shape[0] - sum(_s < self.min_singular_val)\n inv_L = _u[:, :keep].dot(1.0 / _s[:keep, None] * _v[:keep, :])\n\n\n # Taking the derivative of L for changes in l must yield an x,y change\n # for each centre.\n # (n_centres+3, n_centres+3, n_centres, n_dims)\n dL_dl = np.zeros(self.l.shape + (n_centres, 2))\n\n # take the derivative of the kernel wrt centres at the centres\n # SHOULD be (n_centres, n_dims, n_centres, n_dims)\n # IS (n_centres, n_centres, n_dims\n dK_dl_at_tgt = self.kernel.d_dl(self.source.points)\n\n # we want to build a tensor where for each slice where\n # dK_dl[i, j, k, l] is the derivative wrt the l'th dimension of the\n # i'th centre for L[j, k] -> first axis is just looping over centres\n # and last looping over dims\n # (n_centres, n_centres, n_centres, n_dims)\n dK_dl = np.zeros((n_centres, ) + dK_dl_at_tgt.shape)\n\n # make a linear iterator over the centres\n iter = np.arange(n_centres)\n\n # efficiently build the repeated pattern for dK_dl\n # note that the repetition over centres happens over axis 0\n # and the dims axis is the last\n # so dK_dl[0, ..., 0] corresponds to dK/dx0 in Joan's paper\n # dK_dl[3, ..., 1] corresponds to dK_dy3 in Joan's paper\n dK_dl[iter, iter] = dK_dl_at_tgt[iter]\n dK_dl[iter, :, iter] = dK_dl_at_tgt[:, iter]\n\n # prepare memory for the answer\n # SHOULD be (n_points, n_dims, n_centres, n_dims)\n # IS (n_points, , n_centres, n_dims)\n dW_dl = np.zeros((n_points, n_centres, 2))\n\n # pretend the target is equal to the source\n # (n_dims, n_centres+3)\n pseudo_target = np.hstack([self.source.points.T, np.zeros([2, 3])])\n\n for i in np.arange(n_centres):\n # dP_dli (n_centres, n_points, n_dims, n_dims)\n dP_dli = np.zeros(self.p.shape + (2,))\n dP_dli[i, 1, 0] = -1\n dP_dli[i, 2, 1] = -1\n\n dL_dl[:n_centres, :n_centres, i] = dK_dl[i]\n dL_dl[:n_centres, n_centres:, i] = dP_dli\n dL_dl[n_centres:, :n_centres, i] = np.swapaxes(dP_dli, 0, 1)\n\n omega_x = -inv_L.dot(dL_dl[..., i, 0].dot(inv_L))\n omega_y = -inv_L.dot(dL_dl[..., i, 1].dot(inv_L))\n dW_dl[:, i, 0] = k.dot(omega_x).dot(pseudo_target[0])\n dW_dl[:, i, 1] = k.dot(omega_y).dot(pseudo_target[1])\n\n return dW_dl", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def mass_flow_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 0] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 0] = -1\n return deriv", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def dfdx(self, X):\n \n return 3*(X[0])**2", "def jacobian(self, dt):\n raise NotImplementedError", "def _derZ(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdz = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdz[c] = self.functions[j].derivativeZ(x[c], y[c], z[c])\n return dfdz", "def Cholesky_Derivs(InvV,m_points):\n #get some of the dimensions from the inputs\n m = InvV.n\n d = InvV.d\n\n #initialise the required array\n dM_dL = np.zeros([m,d,d,d,d])\n \n #firstly loop through each map/component\n for j in range(m):\n #now want to loop over each of the Cholesky components\n for k in range(d):\n for l in range(d):\n #take the l-th column of L and put it in row k\n dM_dL[j,k,l,k,:] += InvV.L[j,:,l]\n #take the k-th row of L and put it in row l\n dM_dL[j,k,l,l,:] += InvV.L[j,k,:] \n return dM_dL", "def pderiv2D(field, xld, dim = 0):\n n_x, n_y = field.shape\n dfield = np.zeros_like(field)\n if (dim not in [0, 1]): \n raise ValueError(\"2-D function, enter dim = 0 (df/dx) or dim = 1 (df/dy)\")\n if (dim == 0):\n # check if len(x) equals M\n if len(xld) != n_x : \n raise ValueError(\"x-direction lengths do not match\")\n for j in range(n_y):\n dfield[:, j] = deriv(field[:,j], np.array(xld))\n if (dim == 1):\n if len(xld) != n_y:\n raise ValueError('y-direction lengths do not match')\n for i in range(n_x):\n dfield[i,:] = deriv(field[i,:], np.array(xld))\n return dfield", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def compute_gradient(c, x, y):\n\n vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])\n rows, cols = c.shape\n\n result = np.empty_like(x)\n\n for i in nb.prange(rows):\n for j in nb.prange(cols):\n c_remainder = c[i, j] % 4\n gradient_co = vectors[c_remainder]\n result[i, j] = gradient_co[0] * x[i, j] + gradient_co[1] * y[i, j]\n\n return result", "def _derY(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c])\n return dfdy", "def _det(mat):\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))", "def LDL(A, d):\n n = shape(A)[0]\n L = array(eye(n))\n dg = zeros(n)\n dg[0] = A[0, 0]\n for k in range(1, n):\n m = reshape(array(A[:k, k].copy()), k)\n rforwardsolve(L[:k, :k], m, d)\n L[k, :k] = m/dg[:k]\n dg[k] = A[k, k] - dot(L[k, :k], m)\n return L, dg", "def beinflumat(x_axis, y_axis, e_eff):\n len_x = len(x_axis)\n len_y = len(y_axis)\n influence_matrix_complete = np.zeros((len_x, len_y, len_x, len_y))\n\n # generate coordinate grids\n a_factor = (x_axis[-1] - x_axis[0]) / (len_x - 1) / 2\n b_factor = (y_axis[-1] - y_axis[0]) / (len_y - 1) / 2\n x_grid = __beinflumatgrid(x_axis)\n y_grid = __beinflumatgrid(y_axis)\n\n # use numexpr to evaluate expressions\n xpa = ne.evaluate('x_grid + a_factor')\n xma = ne.evaluate('x_grid - a_factor')\n ypb = ne.evaluate('y_grid + b_factor')\n ymb = ne.evaluate('y_grid - b_factor')\n\n # calculate complete influence matrix\n for j in range(0, len_y):\n for j_prime in range(0, len_y):\n influence_matrix_complete[:, j, :, j_prime] = \\\n (np.multiply(xpa, np.log(\n np.divide(\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa)))))) +\n (ypb[j, j_prime]) * np.log(\n np.divide(\n (xpa +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n (xma +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma))))) +\n np.multiply(xma, np.log(\n np.divide(\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma)))))) +\n (ymb[j, j_prime]) * np.log(\n np.divide(\n (xma +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n (xpa +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa))))))\n\n return influence_matrix_complete * 1 / e_eff * 2 / pi", "def update(self, dLds, alpha, beta):\n T = len(self.x)\n self.nodes.reset_error()\n self.igate.reset_error()\n self.fgate.reset_error()\n self.ogate.reset_error()\n dLdx = np.zeros((T, self.input_size))\n dLdc = np.zeros(self.hidden_size)\n for t in xrange(T-1, -1, -1):\n dLdpo = dLds[t] * self.h[t] * self.gatefun.derivate(self.o[t])\n # parameters for output gate\n self.ogate.dLdu += np.outer(dLdpo, self.x[t])\n self.ogate.dLdw += np.outer(dLdpo, self.s[t-1])\n self.ogate.dLdv += np.outer(dLdpo, self.c[t-1])\n dLds[t-1] += np.dot(self.ogate.w.T, dLdpo)\n dLdx[t] += np.dot(self.ogate.u.T, dLdpo)\n dLdc += np.dot(self.ogate.v.T, dLdpo)\n\n dLdc += dLds[t] * self.o[t] * self.acfun.derivate(self.h[t])\n dLdpi = dLdc * self.g[t] * self.gatefun.derivate(self.i[t])\n dLdpf = dLdc * self.c[t-1] * self.gatefun.derivate(self.f[t])\n dLdpg = dLdc * self.i[t] * self.acfun.derivate(self.g[t])\n dLdc = dLdc * self.f[t]\n # parameters for nodes in hidden layer\n self.nodes.dLdu += np.outer(dLdpg, self.x[t])\n self.nodes.dLdw += np.outer(dLdpg, self.s[t-1])\n dLds[t-1] += np.dot(self.nodes.w.T, dLdpg)\n dLdx[t] += np.dot(self.nodes.u.T, dLdpg)\n # parameters for input gate\n self.igate.dLdu += np.outer(dLdpi, self.x[t])\n self.igate.dLdw += np.outer(dLdpi, self.s[t-1])\n self.igate.dLdv += np.outer(dLdpi, self.c[t-1])\n dLds[t-1] += np.dot(self.igate.w.T, dLdpi)\n dLdx[t] += np.dot(self.igate.u.T, dLdpi)\n dLdc += np.dot(self.igate.v.T, dLdpi)\n # parameters for forget gate\n self.fgate.dLdu += np.outer(dLdpf, self.x[t])\n self.fgate.dLdw += np.outer(dLdpf, self.s[t-1])\n self.fgate.dLdv += np.outer(dLdpf, self.c[t-1])\n dLds[t-1] += np.dot(self.fgate.w.T, dLdpf)\n dLdx[t] += np.dot(self.fgate.u.T, dLdpf)\n dLdc += np.dot(self.fgate.v.T, dLdpf)\n if self.en_bias:\n self.nodes.dLdb += dLdpg\n self.igate.dLdb += dLdpi\n self.fgate.dLdb += dLdpf\n self.ogate.dLdb += dLdpo\n # update weight matrix of current hidden node\n self.nodes.update(alpha, beta)\n self.igate.update(alpha, beta)\n self.fgate.update(alpha, beta)\n self.ogate.update(alpha, beta)\n return dLdx", "def ddx(field, method=None):\n if method == None or method == 'central':\n new_field = field-field\n\n # Apply central differencing in the 'core' region\n new_field[:,1:-1] = (field[:,2:]-field[:,:-2])/field.dL/2\n\n # Apply second order forward/backward differences at boundaries\n new_field[:,0] = (field[:,2] - 2*field[:,1] + field[:,0]) / \\\n field.dL**2\n new_field[:,-1] = (field[:,-3] - 2*field[:,-2] + field[:,-1]) / \\\n field.dL**2\n return new_field\n\n elif method == 'richardson':\n new_field = field[:,:-4,2:-2] - field[:,4:,2:-2] + \\\n 8*field[:,3:-1,2:-2] - 8*field[:,1:-3,2:-2]\n new_field = new_field/field.dL/12\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n elif method == 'least_square':\n new_field = 2*field[:,4:,2:-2] - 2*field[:,:-4,2:-2] + \\\n field[:,3:-1,2:-2] - field[:,1:-3,2:-2]\n new_field = new_field/field.dL/10\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n else:\n raise ValueError('method keyword argument was not valid.')", "def gradient(self):\n result = np.zeros(len(self.variables))\n result[self.bivariateGradInd] = (self.shape-1)/self.variable - self.rate\n return result", "def func_deriv(l, sign=1.0):\n\tx,y,z,xs,ys=l\n\n\tdf1dx = 0\n\tdf1dy = 0\n\tdf1dz = 0\n\tdf1dxs = 0\n\tdf1dys = 0\n\tfor i in range(n):\n\t \tdf1dx += (di+ci*ri[i])*(x-xi[i])*(1/ (((x-xi[i])**2+(y-yi[i])**2+(z-zi[i])**2 )**0.5) )\n\t \tdf1dy += (di+ci*ri[i])*(y-yi[i])*(1/ (((x-xi[i])**2+(y-yi[i])**2+(z-zi[i])**2 )**0.5) )\n\t \tdf1dz += (di+ci*ri[i])*(z-zi[i])*(1/ (((x-xi[i])**2+(y-yi[i])**2+(z-zi[i])**2 )**0.5) )\n\n\tdf2dx = 0\n\tdf2dy = 0\n\tdf2dz = 0\n\tdf2dxs = 0\n\tdf2dys = 0\n\tfor i in range(n): \n\t \tdf2dx += (d +cd*ri[i])*(x-xs)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t \tdf2dy += (d +cd*ri[i])*(y-ys)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t \tdf2dz += (d +cd*ri[i])*(z-zs)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t\tdf2dxs += -(d +cd*ri[i])*(x-xs)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t \tdf2dys += -(d +cd*ri[i])*(y-ys)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\n\tdf3dx = 0\n\tdf3dy = 0\n\tdf3dz = 0\n\tdf3dxs = 0\n\tdf3dys = 0\n\tfor i in range(n):\n\t \tdf3dxs += (s +cs*ri[i])*(xm-xs)*(1/ (((xm-xs)**2+(ym-ys)**2+(zm-zs)**2 )**0.5) )\n\t\tdf3dys += (s +cs*ri[i])*(ym-ys)*(1/ (((xm-xs)**2+(ym-ys)**2+(zm-zs)**2 )**0.5) )\n\n\tdfdx = sign*(df1dx + df2dx + df3dx)\n\tdfdy = sign*(df1dy + df2dy + df3dy)\n\tdfdz = sign*(df1dz + df2dz + df3dz)\n\tdfdxs = sign*(df1dxs + df2dxs + df3dxs)\n\tdfdys = sign*(df1dys + df2dys + df3dys)\n\n\treturn np.array([ dfdx, dfdy, dfdz, dfdxs, dfdys ])", "def deriv(self, model):\n k1, k2, k3 = self.coefficients\n r = self.relation(model)\n dc_dm1 = k1 * r\n dc_dm2 = k2 * r\n\n result = np.r_[dc_dm1, dc_dm2]\n\n return result", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state.item(0)\n pe = state.item(1)\n pd = state.item(2)\n u = state.item(3)\n v = state.item(4)\n w = state.item(5)\n e0 = state.item(6)\n e1 = state.item(7)\n e2 = state.item(8)\n e3 = state.item(9)\n p = state.item(10)\n q = state.item(11)\n r = state.item(12)\n # extract forces/moments\n fx = forces_moments.item(0)\n fy = forces_moments.item(1)\n fz = forces_moments.item(2)\n l = forces_moments.item(3)\n m = forces_moments.item(4)\n n = forces_moments.item(5)\n\n # position kinematics\n pn_dot =\n pe_dot =\n pd_dot =\n\n # position dynamics\n u_dot =\n v_dot =\n w_dot =\n\n # rotational kinematics\n e0_dot =\n e1_dot =\n e2_dot =\n e3_dot =\n\n # rotatonal dynamics\n p_dot =\n q_dot =\n r_dot = \n\n # collect the derivative of the states\n x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,\n e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T\n return x_dot", "def _2ndderiv_xyz(self,x,y,z,i,j):\n return -np.pi*self._rhoc_M*self.a**3*self._b*self._c *\\\n _2ndDerivInt(x,y,z,self._a2,self._b2*self._a2,self._c2*self._a2,self.n,i,j)", "def det_2x2(matrix: FieldMatrix) -> FlowFieldVal:\n _validate_matrix_shape(matrix, (2, 2))\n\n det = lambda a, b, c, d: a * d - b * c\n\n a, b = matrix[0]\n c, d = matrix[1]\n\n return tf.nest.map_structure(det, a, b, c, d)", "def compute_dz(self):\n el_geom_w = self.compute_geom_weights()\n el_geom_grad = self.compute_geom_grads()\n\n # Sum of weights coeffs\n w_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(el_geom_w))\n\n # Sum of weighted df = (dfx, dfy)\n dfx_el_w = np.empty_like(el_geom_w)\n dfy_el_w = np.empty_like(el_geom_w)\n for iapex in range(3):\n dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]\n dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]\n dfx_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(dfx_el_w))\n dfy_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(dfy_el_w))\n\n # Estimation of df\n dfx_estim = dfx_node_sum/w_node_sum\n dfy_estim = dfy_node_sum/w_node_sum\n return np.vstack([dfx_estim, dfy_estim]).T", "def compute_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def Derivative(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Derivative(self, *args)", "def c(\n dp: np.ndarray,\n ddp: np.ndarray,\n ) -> np.ndarray:\n\n return \\\n np.abs(ddp[0, :]*dp[1, :] - dp[0, :]*ddp[1, :]) / \\\n (dp[0, :]**2 + dp[1, :]**2)**1.5", "def test_vic_dcor_nonlinear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n [0.2, 0.5, 0.0],\n [0.2, 0.5, 1.0],\n [0.4, 1.0, 0.0],\n [0.4, 1.0, 1.0],\n [0.6, 1.0, 0.0],\n [0.6, 1.0, 1.0],\n [0.8, 0.5, 0.0],\n [0.8, 0.5, 1.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 1.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\", \"dCor\")\n expected_w_vector = np.array(\n [0.22633480, 0.27052183, 0.50314336],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n # dX/dp is simply the Jacobian of the model\n dX_dp = self.pdm.model.jacobian\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def dgdx(self, X):\n \n return 2*(X[0]) - 2", "def jacobian(self, x):\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std / self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad", "def test_vic_dcor_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\", \"dCor\")\n expected_w_vector = np.array(\n [0.33817571, 0.33091215, 0.33091215],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def compute_det(self, log_progress=False):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n for k in range(i + 1, size):\n mat[j][k] = (mat[j][k] * mat[i][i]) - (mat[j][i] * mat[i][k])\n if i > 0:\n mat[j][k] //= mat[i - 1][i - 1]\n if log_progress:\n print(i)\n if i > 0:\n for j in range(size):\n mat[j][i - 1] = 0\n mat[i - 1][j] = 0\n\n return mat[size - 1][size - 1]", "def determinant(self):\n if self.L is None or self.U is None:\n self.decomposeLU()\n\n retval = 1.0\n for i in range(self.rows):\n retval *= self.L[i, i] * self.U[i, i]\n return retval", "def jacobian_ur5(q, delta=0.0001):\n # Alocacion de memoria\n J = np.zeros((3,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n # Iteracion para la derivada de cada columna\n for i in xrange(6):\n # Copiar la configuracion articular inicial\n dq = copy(q);\n # Incrementar la articulacion i-esima usando un delta\n dq[i] = dq[i] + delta \n dT = fkine_ur5(dq)\n \n J[:,i] = (dT[0:3, 3] - T[0:3, 3])/delta\n\n return J", "def adjoint_derivative(self, dx=None, dy_u=None, dy_l=None,\n P_idx=None, A_idx=None, eps_iter_ref=1e-04):\n\n P, q = self._derivative_cache['P'], self._derivative_cache['q']\n A = self._derivative_cache['A']\n l, u = self._derivative_cache['l'], self._derivative_cache['u']\n\n try:\n results = self._derivative_cache['results']\n except KeyError:\n raise ValueError(\"Problem has not been solved. \"\n \"You cannot take derivatives. \"\n \"Please call the solve function.\")\n\n if results.info.status != \"solved\":\n raise ValueError(\"Problem has not been solved to optimality. \"\n \"You cannot take derivatives\")\n\n m, n = A.shape\n x = results.x\n y = results.y\n y_u = np.maximum(y, 0)\n y_l = -np.minimum(y, 0)\n\n if A_idx is None:\n A_idx = A.nonzero()\n\n if P_idx is None:\n P_idx = P.nonzero()\n\n if dy_u is None:\n dy_u = np.zeros(m)\n if dy_l is None:\n dy_l = np.zeros(m)\n\n # Make sure M matrix exists\n if 'M' not in self._derivative_cache:\n # Multiply second-third row by diag(y_u)^-1 and diag(y_l)^-1\n # to make the matrix symmetric\n inv_dia_y_u = spa.diags(np.reciprocal(y_u + 1e-20))\n inv_dia_y_l = spa.diags(np.reciprocal(y_l + 1e-20))\n M = spa.bmat([\n [P, A.T, -A.T],\n [A, spa.diags(A @ x - u) @ inv_dia_y_u, None],\n [-A, None, spa.diags(l - A @ x) @ inv_dia_y_l]\n ], format='csc')\n delta = spa.bmat([[eps_iter_ref * spa.eye(n), None],\n [None, -eps_iter_ref * spa.eye(2 * m)]],\n format='csc')\n self._derivative_cache['M'] = M\n self._derivative_cache['solver'] = qdldl.Solver(M + delta)\n\n rhs = - np.concatenate([dx, dy_u, dy_l])\n\n r_sol = self.derivative_iterative_refinement(rhs)\n\n r_x, r_yu, r_yl = np.split(r_sol, [n, n+m])\n\n # Extract derivatives for the constraints\n rows, cols = A_idx\n dA_vals = (y_u[rows] - y_l[rows]) * r_x[cols] + \\\n (r_yu[rows] - r_yl[rows]) * x[cols]\n dA = spa.csc_matrix((dA_vals, (rows, cols)), shape=A.shape)\n du = - r_yu\n dl = r_yl\n\n # Extract derivatives for the cost (P, q)\n rows, cols = P_idx\n dP_vals = .5 * (r_x[rows] * x[cols] + r_x[cols] * x[rows])\n dP = spa.csc_matrix((dP_vals, P_idx), shape=P.shape)\n dq = r_x\n\n return (dP, dq, dA, dl, du)", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]", "def gradient_cf(self, potential, get_energy=True):\n xn, xe, lpn, lpe, alpha, o1, o2 = self(None)\n fn_, fe_ = potential((xn, xe))\n fn_ = (fn_ + self.tw * lpn) * self.wn\n fe_ = (fe_ - lpe) * self.we\n fn = fn_ * alpha\n fe = fe_ * alpha\n dmu = tf.math.divide_no_nan(tf.reduce_sum(fn * self.xn, axis=-1, keepdims=True), self.sigma)\n dsg = tf.math.divide_no_nan(tf.reduce_sum(fn * self.x22, axis=-1, keepdims=True), self.sigma)\n dmu1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi, -1, keepdims=True), o1)\n dmu2 = tf.reduce_sum(fe * self.xj, -1, keepdims=True) / o2\n dsg1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi22, -1, keepdims=True), o1)\n dsg2 = tf.reduce_sum(fe * self.xj22, -1, keepdims=True) / o2\n\n dmu += (tf.concat([dmu1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dmu2[:, :, 810:, ...], 2, True)], 2))\n\n dsg += (tf.concat([dsg1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dsg2[:, :, 810:, ...], 2, True)], 2))\n\n dalpha = (tf.reduce_sum(fn_, [2, 4], keepdims=True) + tf.reduce_sum(fe_, [2, 4], keepdims=True))\n dw = alpha * (dalpha - tf.reduce_sum(dalpha * alpha, 3, keepdims=True))\n energy = tf.zeros(fn.shape[:2], tf.float64) if not get_energy else \\\n -(tf.reduce_sum(fn, [2, 3, 4]) + tf.reduce_sum(fe, [2, 3, 4]))\n return (-dmu * sqrt2, -dsg, -dw), energy", "def ddy(field, method=None):\n if method == None or method == 'central':\n new_field = field-field\n\n # Apply central differencing in the 'core' region\n new_field[:,:,1:-1] = (field[:,:,2:]-field[:,:,:-2])/field.dL/2\n\n # Apply second order forward/backward differences at boundaries\n new_field[:,:,0] = (field[:,:,2] - 2*field[:,:,1] + field[:,:,0]) / \\\n field.dL**2\n new_field[:,:,-1] = (field[:,:,-3] - 2*field[:,:,-2] + field[:,:,-1]) / \\\n field.dL**2\n return new_field\n\n elif method == 'richardson':\n new_field = field[:,2:-2,4:] - 8*field[:,2:-2,3:-1] + 8*field[:,2:-2,1:-3] - field[:,2:-2,:-4]\n new_field = new_field/field.dL/12\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n elif method == 'least_square':\n new_field = 2*field[:,2:-2,:-4] + field[:,2:-2,1:-3] - field[:,2:-2,3:-1] - 2*field[:,2:-2,4:]\n new_field = new_field/field.dL/10\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n else:\n raise ValueError('method keyword argument was not valid.')", "def local_det_chol(node):\r\n if node.op == det:\r\n x, = node.inputs\r\n for (cl, xpos) in x.clients:\r\n if isinstance(cl.op, Cholesky):\r\n L = cl.outputs[0]\r\n return [tensor.prod(extract_diag(L) ** 2)]", "def gradient(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\n\n\t## Initial parameters\n\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\t### Define normal triad and proper motions\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\n\t### Eq. 8 in Lindegren+2000 (Covariance Matrix)\n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\n\t### Eq. 16 in Lindegren+2000 (Definition of D matrix)\t\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v*parallax[:]/_A)**2., (sigma_v*parallax[:]/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t### First derivatives in Eq. A3 \n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See Eq. A5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See Eq. A6\n\tdG_dpi, dG_dsigmav = np.zeros((3,3,N)), np.zeros((3,3,N)) \n\t\n\tdG_dpi[0,0,:], dG_dpi[0,1,:], dG_dpi[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[1,0,:], dG_dpi[1,1,:], dG_dpi[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[2,0,:], dG_dpi[2,1,:], dG_dpi[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dpi[:]\n\t\n\n\tdG_dsigmav[0,0,:], dG_dsigmav[0,1,:], dG_dsigmav[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[1,0,:], dG_dsigmav[1,1,:], dG_dsigmav[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[2,0,:], dG_dsigmav[2,1,:], dG_dsigmav[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dsigmav[:]\n\n\tf_dpi = np.zeros((N), dtype=np.float64) \n\t\n\t\n\tfor i in range(N):\n\t\tf_dpi_1, f_dpi_3 = 0., 0.0 \n\t\tfor ia in range(3):\n\t\t\tfor ib in range(3):\n\t\t\t\tf_dpi_1 += invD[ia,ib,i]*cprime_pi[ia,i]*a_c[ib][i]\n\t\t\t\tf_dpi_3 += (-0.5)*(dG_dpi[ia,ib,i]*a_c[ia][i]*a_c[ib][i])\n\t\t\t\t\t\n\t\tf_dpi_2 = (-0.5)*dlnd_dpi[i]\n\t\tf_dpi[i] = f_dpi_1 + f_dpi_2 + f_dpi_3\n\t\t\n\n\tf_vx, f_vy, f_vz, f_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) \n\n\tf_vx = np.sum(invD[0,0,:]*cprime_vx[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vx[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vx[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vx[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vx[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vx[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vx[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vx[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vx[2,:]*a_c[2][:])\n\t\n\tf_vy = np.sum(invD[0,0,:]*cprime_vy[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vy[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vy[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vy[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vy[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vy[1][:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vy[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vy[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vy[2,:]*a_c[2][:])\n\n\tf_vz = np.sum(invD[0,0,:]*cprime_vz[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vz[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vz[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vz[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vz[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vz[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vz[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vz[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vz[2,:]*a_c[2][:])\n\t\n\tf_sigmav = np.sum(-0.5*(dG_dsigmav[0,0,:]*a_c[0][:]*a_c[0][:] + dG_dsigmav[0,1,:]*a_c[1][:]*a_c[0][:]+ dG_dsigmav[0,2,:]*a_c[2][:]*a_c[0][:] + \\\n\t\t dG_dsigmav[1,0,i]*a_c[1][:]*a_c[0][:] + dG_dsigmav[1,1,:]*a_c[1][:]*a_c[1][:]+ dG_dsigmav[1,2,:]*a_c[1][:]*a_c[2][:] + \t\n\t\t dG_dsigmav[2,0,i]*a_c[2][:]*a_c[0][:] + dG_dsigmav[2,1,:]*a_c[2][:]*a_c[1][:]+ dG_dsigmav[2,2,:]*a_c[2][:]*a_c[2][:]))\n\t\n\n\tf_sigmav = f_sigmav - 0.5*np.sum(dlnd_dsigmav)\t\n\tf = np.concatenate((f_dpi, np.array([f_vx, f_vy, f_vz, f_sigmav]))) ### Grad L(theta), see Eq. 17\n\treturn -2.*f \t\t\t\t\t\t ### Grad U(theta), see Eq. 18", "def det(mtx):\n if not is_square(mtx):\n raise ValueError(\"Matrix should be square\")\n if len(mtx) == 2:\n return mtx[0][0] * mtx[1][1] - mtx[0][1] * mtx[1][0]\n\n result = 0\n sign = 1\n for inx in range(len(mtx)):\n next_mtx = get_minor_mtx(mtx, 0, inx)\n result += sign * (mtx[0][inx] * det(next_mtx))\n sign *= -1\n return result" ]
[ "0.6885692", "0.65035003", "0.62627643", "0.62573814", "0.6247173", "0.6219265", "0.6201201", "0.619879", "0.6149196", "0.6138502", "0.60837907", "0.6081997", "0.6081997", "0.6045166", "0.6043331", "0.6040035", "0.6020596", "0.6014159", "0.600143", "0.59920883", "0.59908885", "0.5982734", "0.5950538", "0.5949337", "0.59363395", "0.59175146", "0.590359", "0.59027284", "0.59012634", "0.58971363", "0.58907276", "0.5890504", "0.58891016", "0.58888257", "0.58624613", "0.58597463", "0.58593607", "0.5842004", "0.5841317", "0.58348954", "0.58224577", "0.5813928", "0.5812084", "0.5808016", "0.58067286", "0.57914793", "0.57820994", "0.5772348", "0.57705534", "0.5769604", "0.57687515", "0.57630205", "0.5755876", "0.57538915", "0.5753227", "0.57482386", "0.5747402", "0.5744218", "0.57406825", "0.57406825", "0.57388484", "0.5738823", "0.57382584", "0.57367396", "0.573557", "0.5735189", "0.57276696", "0.57258713", "0.5710849", "0.5707394", "0.57013994", "0.5695874", "0.5694693", "0.5693386", "0.5690821", "0.5687571", "0.5684301", "0.56834394", "0.56788", "0.5674708", "0.5670777", "0.5669225", "0.56602585", "0.56597877", "0.5655768", "0.5655377", "0.5653163", "0.56418157", "0.5641398", "0.5640695", "0.5634412", "0.56290203", "0.5626649", "0.56243306", "0.5622138", "0.56213", "0.5620395", "0.56189823", "0.56087345", "0.56065935", "0.56065106" ]
0.0
-1
Compute the derivatives of the collinear law (design matrix)
def __ComputeDesignMatrix_RzRyRz(self, groundPoints): # initialization for readability azimuth = self.exteriorOrientationParameters[3] phi = self.exteriorOrientationParameters[4] kappa = self.exteriorOrientationParameters[5] # Coordinates subtraction dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0] dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1] dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2] dXYZ = np.vstack([dX, dY, dZ]) rotationMatrixT = self.rotationMatrix_RzRyRz.T rotatedG = rotationMatrixT.dot(dXYZ) rT1g = rotatedG[0, :] rT2g = rotatedG[1, :] rT3g = rotatedG[2, :] focalBySqauredRT3g = self.camera.focalLength / rT3g ** 2 dxdg = rotationMatrixT[0, :][None, :] * rT3g[:, None] - rT1g[:, None] * rotationMatrixT[2, :][None, :] dydg = rotationMatrixT[1, :][None, :] * rT3g[:, None] - rT2g[:, None] * rotationMatrixT[2, :][None, :] dgdX0 = np.array([-1, 0, 0], 'f') dgdY0 = np.array([0, -1, 0], 'f') dgdZ0 = np.array([0, 0, -1], 'f') # Derivatives with respect to X0 dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0) dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0) # Derivatives with respect to Y0 dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0) dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0) # Derivatives with respect to Z0 dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0) dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0) dRTdOmega = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'azimuth').T dRTdPhi = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'phi').T dRTdKappa = Compute3DRotationDerivativeMatrix_RzRyRz(azimuth, phi, kappa, 'kappa').T gRT3g = dXYZ * rT3g # Derivatives with respect to Omega dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) - rT1g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) - rT2g * (dRTdOmega[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Phi dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) - rT1g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) - rT2g * (dRTdPhi[2, :][None, :].dot(dXYZ)))[0] # Derivatives with respect to Kappa dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) - rT1g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) - rT2g * (dRTdKappa[2, :][None, :].dot(dXYZ)))[0] # all derivatives of x and y dd = np.array([np.vstack([dxdX0, dxdY0, dxdZ0, dxdOmega, dxdPhi, dxdKappa]).T, np.vstack([dydX0, dydY0, dydZ0, dydOmega, dydPhi, dydKappa]).T]) a = np.zeros((2 * dd[0].shape[0], 6)) a[0::2] = dd[0] a[1::2] = dd[1] return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def derivatives(x_p, y_p):\r\n # set up the matrix equation\r\n n = x_p.shape[0]\r\n M = np.zeros( [n,n] )\r\n d = np.zeros( [n,1] )\r\n \r\n # fill in the constants where they can be\r\n for i in np.arange(1,n-1 ): # for all but the first and last row\r\n M[i,i-1 ] = ( x_p[i] - x_p[i-1] ) / 6.\r\n M[i,i] = ( x_p[i+1] - x_p[i-1] ) / 3.\r\n M[i,i+1] = ( x_p[i+1] - x_p[i] ) /6.\r\n d[i,0 ] = ( y_p[i+1] - y_p[i] ) / ( x_p[i+1] - x_p[i] ) - ( y_p[i] - y_p[i-1] ) / ( x_p[i] - x_p[i-1] )\r\n \r\n M[0,0],M[-1,-1] = 1.,1. # compactly sets the BCs\r\n \r\n LU = lu.LU_decomp(M) # solves the matrix equations\r\n return lu.FB_sub(LU.Low, LU.Upp, d) # find and return 2nd derivatives\r", "def _core_calc_degrad(self,bd,Ld) :\n\t\tdegrad = np.dot(Ld,bd) # Do matrix multiplication \n\t\tdegrad = np.exp(degrad) # Exponentiate to convert log to real\n\t\treturn degrad", "def det(self):\n if self.x == 0 or self.y == 0:\n return None\n elif self.x == 1 or self.y == 1:\n return self.retrieve(0,0)\n else:\n out = 0.0\n for x in xrange(0, self.x):\n out += self.retrieve(0,x)*self.C(0,x)\n return out", "def det(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square\")\n\n if self.rows == 1:\n return self.row(1)[0]\n\n if self.rows == 2:\n return self.entry(1,1) * self.entry(2,2) - self.entry(1,2) * self.entry(2,1)\n\n det = 0\n row_to_expand = 1\n\n for i in range(1, self.columns + 1):\n det += self.entry(row_to_expand, i) * self._cofactor(row_to_expand, i)\n\n return det", "def compute_grad(beta, lambdat, X, y):\n return -2/len(y)*(np.maximum(0, 1-(\n (y[:, np.newaxis]*X).dot(beta)))).dot(\n y[:, np.newaxis]*X) + 2 * lambdat * beta", "def jacobian(self,x,p,fun):\n n = self.n\n y = fun(x,p)\n h = 1e-4\n nout = np.size(y)\n dfdx = np.zeros((nout,n))\n for j in range(n):\n dx1 = np.zeros(n)\n dx2 = np.zeros(n)\n dx1[j] = -h\n dx2[j] = h\n dfdx[:,j] = (fun(x+dx2,p)-fun(x+dx1,p))/(2*h)\n return dfdx", "def d_dx(self, points):\n dk_dx = np.zeros((points.shape[0] + 3, # i\n self.source.n_points, # k\n self.source.n_dims)) # l\n dk_dx[:-3, :] = self.kernel.d_dl(points)\n\n affine_derivative = np.array([[0, 0],\n [1, 0],\n [0, 1]])\n dk_dx[-3:, :] = affine_derivative[:, None]\n\n return np.einsum('ij, ikl -> klj', self.coefficients, dk_dx)", "def derivative_matrix(g):\n\n def _(g):\n B = g.B[0].grad\n N = g.N[0]\n P = g.dec.P(1)\n H = np.vstack(P(B(i)) for i in range(N)).T\n return H\n\n return _(g), _(g.dual)", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0", "def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n \n for nl in self.nlists: \n nl.separations()\n \n for force in self.forces:\n force.apply()\n\n # Controllers is the new implementation of forces\n for controller in self.controllers:\n controller.apply()", "def getDerivativeSkeletonEqs(self):\n xs = np.array(self.XYProjections)[:,0]\n L = xs[-1] - xs[0]\n \n # Derivation\n xi = sy.symbols('xi')\n self.dudx_xyPlane = sy.diff(self.u_xyPlane, xi) / L\n \n # Then calculate the derivation equation on x-z plane\n self.dudx_xzPlane = sy.diff(self.u_xzPlane, xi) / L", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx", "def determinant(self) -> float:\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Determinant must be for a square matrix; this one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n # Note: this one should be recursive....\n if num_R == 1:\n return self.mat[0][0]\n det =0\n for i in range(num_R):\n det += self.mat[0][i] * self.get_minor(0,i).determinant() * (-1)**i\n return det\n pass # remove this when you add your code.\n # -------------------------------------------------------", "def _compute_derivatives(image, mode=\"constant\", cval=0):\n\n derivatives = [\n ndi.sobel(image, axis=i, mode=mode, cval=cval)\n for i in range(image.ndim)\n ]\n\n return derivatives", "def det(A):\n # Section 1: Establish n parameter and copy A\n n = len(A)\n AM = A[:]\n\n # Section 2: Row manipulate A into an upper triangle matrix\n for fd in range(n): # fd stands for focus diagonal\n if AM[fd][fd] == 0:\n AM[fd][fd] = 1.0e-18 # Cheating by adding zero + ~zero\n for i in range(fd+1, n): # skip row with fd in it.\n crScaler = AM[i][fd] / AM[fd][fd] # cr stands for \"current row\".\n for j in range(n): # cr - crScaler * fdRow, one element at a time.\n AM[i][j] = AM[i][j] - crScaler * AM[fd][j]\n\n # Section 3: Once AM is in upper triangle form ...\n product = 1.0\n for i in range(n):\n product *= AM[i][i] # ... product of diagonals is determinant\n\n return product", "def fluid_deriv(self):\n deriv = np.zeros((self.fluid_constraints['num_eq'],\n 2 * self.num_i + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n for j in range(self.num_nw_fluids):\n deriv[i * self.num_nw_fluids + j, i, j + 3] = 1\n deriv[i * self.num_nw_fluids + j, self.num_i + i, j + 3] = -1\n return deriv", "def _derX(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c], z[c])\n return dfdx", "def det(a, b, c):\n d = (b[0]*c[1]-c[0]*b[1])+(c[0]*a[1]-a[0]*c[1])+(a[0]*b[1]-a[1]*b[0])\n return d", "def efSolver2(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n \"\"\"\n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n \"\"\"\n\n ##x-component#\n #if i==0: \n #x-component#\n \"\"\"\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n \"\"\"\n \n # forward\n self.ef[0,0:self.nj,0:self.nk,0] = -(-3*self.phi[0,0:self.nj,0:self.nk]+\\\n 4*self.phi[1,0:self.nj,0:self.nk]-\\\n self.phi[2,0:self.nj,0:self.nk])/(2*dx)\n \n #elif i==self.ni-1: \n \"\"\"\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n \"\"\" \n # backward\n self.ef[self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[self.ni-3,0:self.nj,0:self.nk]-\\\n 4*self.phi[self.ni-2,0:self.nj,0:self.nk]+\\\n 3*self.phi[self.ni-1,0:self.nj,0:self.nk])/(2*dx)\n \"\"\"\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n \"\"\" \n #central\n self.ef[1:self.ni-1,0:self.nj,0:self.nk,0] = -(self.phi[2:self.ni,0:self.nj,0:self.nk] - \\\n self.phi[0:self.ni-2,0:self.nj,0:self.nk])/(2*dx)\n\n\n #y-component\n #if j==0:\n \"\"\"\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,0,0:self.nk,1] = -(-3*self.phi[0:self.ni,0,0:self.nk] + \\\n 4*self.phi[0:self.ni,1,0:self.nk]-\\\n self.phi[0:self.ni,2,0:self.nk])/(2*dy)\n #elif j==self.nj-1:\n \"\"\"\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n \n \"\"\"\n self.ef[0:self.ni,self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,self.nj-3,0:self.nk] - \\\n 4*self.phi[0:self.ni,self.nj-2,0:self.nk] +\\\n 3*self.phi[0:self.ni,self.nj-1,0:self.nk])/(2*dy)\n #else:\n \"\"\"\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n \"\"\"\n self.ef[0:self.ni,1:self.nj-1,0:self.nk,1] = -(self.phi[0:self.ni,2:self.nj,0:self.nk] - \\\n self.phi[0:self.ni,0:self.nj-2,0:self.nk])/(2*dy)\n\n #z-component\n '''\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n \n '''\n #z-component\n #if k==0:\n self.ef[0:self.ni,0:self.nj,0,2] = -(-3*self.phi[0:self.ni,0:self.nj,0] + \\\n 4*self.phi[0:self.ni,0:self.nj,1]-\n self.phi[0:self.ni,0:self.nj,2])/(2*dz)\n\n \"\"\"\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n \"\"\"\n \n #elif k==self.nk-1:\n self.ef[0:self.ni,0:self.nj,self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,self.nk-3] - \\\n 4*self.phi[0:self.ni,0:self.nj,self.nk-2] + \\\n 3*self.phi[0:self.ni,0:self.nj,self.nk-1])/(2*dz) \n \"\"\"\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)\n \"\"\"\n #else:\n self.ef[0:self.ni,0:self.nj,1:self.nk-1,2] = -(self.phi[0:self.ni,0:self.nj,2:self.nk] - \\\n self.phi[0:self.ni,0:self.nj,0:self.nk-2])/(2*dz)", "def eval_Dxy(self):\n\n return self.Xf - self.Yf", "def _derX(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n dfdx = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdx[c] = self.functions[j].derivativeX(x[c], y[c])\n return dfdx", "def compute_derivs_matrices(vecs, adv_vecs, dt):\n return (adv_vecs - vecs)/(1.*dt)", "def _gradients(self, partial):\r\n dL_dfhat, I_KW_i = self._shared_gradients_components()\r\n dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = self.noise_model._laplace_gradients(self.f_hat, self.data, extra_data=self.extra_data)\r\n\r\n #len(dlik_dthetaL)\r\n num_params = len(self._get_param_names())\r\n # make space for one derivative for each likelihood parameter\r\n dL_dthetaL = np.zeros(num_params)\r\n for thetaL_i in range(num_params):\r\n #Explicit\r\n dL_dthetaL_exp = ( np.sum(dlik_dthetaL[:, thetaL_i])\r\n #- 0.5*np.trace(mdot(self.Ki_W_i, (self.K, np.diagflat(dlik_hess_dthetaL[thetaL_i]))))\r\n + np.dot(0.5*np.diag(self.Ki_W_i)[:,None].T, dlik_hess_dthetaL[:, thetaL_i])\r\n )\r\n\r\n #Implicit\r\n dfhat_dthetaL = mdot(I_KW_i, self.K, dlik_grad_dthetaL[:, thetaL_i])\r\n dL_dthetaL_imp = np.dot(dL_dfhat, dfhat_dthetaL)\r\n dL_dthetaL[thetaL_i] = dL_dthetaL_exp + dL_dthetaL_imp\r\n\r\n return dL_dthetaL", "def dgdy(self, X):\n \n return 3*X[1]**2", "def determinant(self):\n d1 = self._row_1[0] * (self._row_2[1] * self._row_3[2] - self._row_2[2] * self._row_3[1])\n d2 = self._row_1[1] * (self._row_2[0] * self._row_3[2] - self._row_2[2] * self._row_3[0])\n d3 = self._row_1[2] * (self._row_2[0] * self._row_3[1] - self._row_2[1] * self._row_3[0])\n return d1 - d2 + d3", "def efSolver(self):\n dx = self.dh[0] # dx\n dy = self.dh[1] # dy\n dz = self.dh[2] # dz\n \n for i in np.arange(0, self.ni):\n for j in np.arange(0, self.nj):\n for k in np.arange(0, self.nk):\n\n #x-component#\n if i==0: \n # forward\n self.ef[i][j][k][0] = -(-3*self.phi[i][j][k]+\\\n 4*self.phi[i+1][j][k]-\\\n self.phi[i+2][j][k])/(2*dx)\n elif i==self.ni-1: \n # backward\n self.ef[i][j][k][0] = -(self.phi[i-2][j][k]-\\\n 4*self.phi[i-1][j][k]+\\\n 3*self.phi[i][j][k])/(2*dx)\n else: \n #central\n self.ef[i][j][k][0] = -(self.phi[i+1][j][k] - \\\n self.phi[i-1][j][k])/(2*dx)\n\n #y-component\n if j==0:\n self.ef[i][j][k][1] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j+1][k]-\\\n self.phi[i][j+2][k])/(2*dy)\n elif j==self.nj-1:\n self.ef[i][j][k][1] = -(self.phi[i][j-2][k] - \\\n 4*self.phi[i][j-1][k] +\\\n 3*self.phi[i][j][k])/(2*dy)\n else:\n self.ef[i][j][k][1] = -(self.phi[i][j+1][k] - \\\n self.phi[i][j-1][k])/(2*dy)\n\n #z-component\n if k==0:\n self.ef[i][j][k][2] = -(-3*self.phi[i][j][k] + \\\n 4*self.phi[i][j][k+1]-\n self.phi[i][j][k+2])/(2*dz)\n elif k==self.nk-1:\n self.ef[i][j][k][2] = -(self.phi[i][j][k-2] - \\\n 4*self.phi[i][j][k-1] + \\\n 3*self.phi[i][j][k])/(2*dz)\n else:\n self.ef[i][j][k][2] = -(self.phi[i][j][k+1] - \\\n self.phi[i][j][k-1])/(2*dz)", "def det_matrix_2x2(m: list):\n return m[0][0]*m[1][1] - m[0][1]*m[1][0]", "def _forward_log_det_jacobian(self, x):\n d = self._compute_shared(x=x)\n relx = (x - d.x_k) / d.w_k\n relx = relx # tf.where(d.out_of_bounds, 0.5*tf.ones_like(x), relx)\n grad = (\n 2 * tf.math.log(d.s_k) +\n tf.math.log(d.d_kp1 * relx**2 + 2 * d.s_k * relx * (1 - relx) + # newln\n d.d_k * (1 - relx)**2) -\n 2 * tf.math.log((d.d_kp1 + d.d_k - 2 * d.s_k) * relx *\n (1 - relx) + d.s_k))\n return grad # tf.where(d.out_of_bounds, tf.zeros_like(grad), grad)", "def df2dx3_func(self,X):\n result = (\n self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0]))\n * ((self.rm*X[2] - self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def jacobian(self, dt):\n return self._F_cache", "def mass_flow_deriv(self):\n deriv = np.zeros((2, 4 + self.num_vars, self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 0] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 0] = -1\n return deriv", "def pgradient(self):\n d = {}\n\n # Det coeff\n det_coeff_grad = (\n self._dets[0][0, :, self._det_map[0]]\n * self._dets[1][0, :, self._det_map[1]]\n * np.exp(\n self._dets[0][1, :, self._det_map[0]]\n + self._dets[1][1, :, self._det_map[1]]\n )\n )\n\n curr_val = self.value()\n d[\"det_coeff\"] = (\n det_coeff_grad.T / (curr_val[0] * np.exp(curr_val[1]))[:, np.newaxis]\n )\n\n # Mo_coeff, adapted from SlaterUHF\n for parm in [\"mo_coeff_alpha\", \"mo_coeff_beta\"]:\n s = 0\n if \"beta\" in parm:\n s = 1\n\n ao = self._aovals[\n :, s * self._nelec[0] : self._nelec[s] + s * self._nelec[0], :\n ]\n pgrad_shape = (ao.shape[0],) + self.parameters[parm].shape\n pgrad = np.zeros(pgrad_shape)\n\n largest_mo = np.max(np.ravel(self._det_occup[s]))\n for i in range(largest_mo + 1): # MO loop\n if i not in self.freeze_orb[s]:\n for det in range(self.parameters[\"det_coeff\"].shape[0]): # Det loop\n if (\n i in self._det_occup[s][self._det_map[s][det]]\n ): # Check if MO in det\n col = self._det_occup[s][self._det_map[s][det]].index(i)\n pgrad[:, :, i] += (\n self.parameters[\"det_coeff\"][det]\n * d[\"det_coeff\"][:, det, np.newaxis]\n * self._testcol(self._det_map[s][det], col, s, ao)\n )\n d[parm] = np.array(pgrad)\n return d", "def darcy_func(self):\n i, o = self.inl[0].to_flow(), self.outl[0].to_flow()\n\n if abs(i[0]) < 1e-4:\n return i[1] - o[1]\n\n visc_i = visc_mix_ph(i, T0=self.inl[0].T.val_SI)\n visc_o = visc_mix_ph(o, T0=self.outl[0].T.val_SI)\n v_i = v_mix_ph(i, T0=self.inl[0].T.val_SI)\n v_o = v_mix_ph(o, T0=self.outl[0].T.val_SI)\n\n re = 4 * abs(i[0]) / (np.pi * self.D.val * (visc_i + visc_o) / 2)\n\n return ((i[1] - o[1]) - 8 * abs(i[0]) * i[0] * (v_i + v_o) / 2 *\n self.L.val * lamb(re, self.ks.val, self.D.val) /\n (np.pi ** 2 * self.D.val ** 5))", "def joint_model_derivative_z(response, design_matrix, a_cols, h_cols, param_vector_a, param_vector_h, int_a, int_h, indicators, weights, z, prior_means, prior_vars, home_points=\" Home Points\", away_points=\" Away Points\", MAP=False):\n\n # Calculating necessary elements for gradient calculation\n # REQUIRES AWAY AND HOME COEFFICIENTS TO BE IN PARAM_VECTOR[0] AND PARAM_VECTOR[1] RESPECTIVELY\n # REQUIRES MODEL PRECISION IN PARAM_VECTOR[-1]\n # OTHER DIMS OF PARAMETER VECTOR SHOULD MATCH DESIGN MATRIX COLUMNS\n K_teams = int(len(z) / 2)\n gradient = np.zeros(2 * K_teams).reshape((-1,1))\n second_gradient = np.zeros(2 * K_teams).reshape((-1,1))\n\n # Away Score-based derivatives\n away_predictions = design_matrix.loc[:, a_cols].dot(param_vector_a[:-1]) + int_a\n away_difference = response[away_points].reshape(-1,1) - away_predictions\n away_offense_derivatives = (param_vector_a[0] / param_vector_a[-1]) * away_difference\n home_defense_derivatives = (param_vector_a[1] / param_vector_a[-1]) * away_difference\n\n # Home Score-based derivatives\n home_predictions = design_matrix.loc[:, h_cols].dot(param_vector_h[:-1]) + int_h\n home_difference = response[home_points].reshape(-1,1) - home_predictions\n home_offense_derivatives = (param_vector_h[0] / param_vector_h[-1]) * home_difference\n away_defense_derivatives = (param_vector_h[1] / param_vector_h[-1]) * home_difference\n\n team_updates = [[] for _ in range(2 * K_teams)]\n\n # Summing gradient into respective team latent variables\n for i in range(design_matrix.shape[0]):\n away_offense_indicator_vector = create_one_hot(indicators[i][0], 2 * K_teams)\n away_defense_indicator_vector = create_one_hot(indicators[i][0] + K_teams, 2 * K_teams)\n home_offense_indicator_vector = create_one_hot(indicators[i][1], 2 * K_teams)\n home_defense_indicator_vector = create_one_hot(indicators[i][1] + K_teams, 2 * K_teams)\n\n # These do not need to be separated, just easier to read this way\n # Away score gradient adding\n gradient += weights[i] * (away_offense_indicator_vector * away_offense_derivatives.loc[i, 0] + home_defense_indicator_vector * home_defense_derivatives.loc[i, 0])\n second_gradient += weights[i] * (away_offense_indicator_vector * -1 * param_vector_a[0] ** 2 / param_vector_a[-1] + home_defense_indicator_vector * -1 * param_vector_a[1]**2 / param_vector_a[-1])\n\n team_updates[indicators[i][0]].append(weights[i] * away_offense_derivatives.loc[i,0])\n team_updates[indicators[i][1] + K_teams].append(weights[i] * home_defense_derivatives.loc[i,0])\n\n # Home score gradient adding\n gradient += weights[i] * (away_defense_indicator_vector * away_defense_derivatives.loc[i, 0] + home_offense_indicator_vector * home_offense_derivatives.loc[i, 0])\n second_gradient += weights[i] * (away_defense_indicator_vector * -1 * param_vector_h[1] ** 2 / param_vector_h[-1] + home_offense_indicator_vector * -1 * param_vector_h[0] ** 2 / param_vector_h[-1])\n\n team_updates[indicators[i][0] + K_teams].append(weights[i] * away_defense_derivatives.loc[i, 0])\n team_updates[indicators[i][1]].append(weights[i] * home_offense_derivatives.loc[i, 0])\n\n # Adjusting gradient for MAP estimate (prior over latent variables)\n if MAP:\n MAP_gradient = -1 * (z - prior_means) / prior_vars\n gradient += MAP_gradient\n second_gradient += -1 / prior_vars\n\n return gradient, second_gradient, np.array([np.std(team_game_updates) for team_game_updates in team_updates])", "def gradient(self):\n gradients = [func.gradient for func in self.functionals]\n return DiagonalOperator(*gradients)", "def derivative(self, theta):\n diag_gamma = np.dot(theta.T, self.X.T)\n logistic_term = self.logistic_fn(diag_gamma)\n diag_gamma = logistic_term * (1.0 - logistic_term)\n gamma = np.diag(diag_gamma)\n\n # v computation\n diags_v = 1.0 - 2*self.logistic_fn(np.dot(theta.T, self.X.T))\n diags_v = diags_v.reshape((-1, 1))\n diags_v = diags_v*self.X\n assert diags_v.shape == self.X.shape #N*d shape\n\n XtGamma = np.dot(self.X.T, gamma) # d*N shape\n\n # TODO: Verifier car pas sur de mon coup ... et surtout plus long...\n # id = np.eye(self.n_examples).reshape((self.n_examples, self.n_examples, 1))\n # diags_v = diags_v.reshape((self.n_examples, 1, self.dim))\n # v = id*diags_v # n*n*d tensor\n # left = np.tensordot(XtGamma, v, axes=(1, 0)) # shape d*N*d\n # assert left.shape == (self.dim, self.n_examples, self.dim)\n # dg = np.tensordot(left, self.X, axes=(1, 0))\n # dg = np.swapaxes(dg, axis1=-2, axis2=-1)\n\n dg = np.zeros((self.dim, self.dim, self.dim))\n for idx, v_i_diag in enumerate(diags_v.T):\n v_i = np.diag(v_i_diag)\n dg_di = np.dot(np.dot(XtGamma, v_i), self.X)\n dg[:, :, idx] = dg_di\n return dg", "def df2dx5_func(self,X):\n result = (\n -self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0]))\n * ((self.rm*X[4] + self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def _cost_function_derivative(self, y_pred, y, X, m):\n\n derivatives= np.zeros((X.shape[0],1))\n for j in range(X.shape[0]):\n auxsum = 0\n for i in range(m):\n auxsum+=(y_pred[0][i] -y[0][i])*X[j][i]\n derivatives[j][0] = self.theta[j][0] - self.alpha * 1/m * auxsum\n\n #empty_derivatives = np.zeros((X.shape[0],1))\n return derivatives", "def _derY(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c], z[c])\n return dfdy", "def det2(m):\n\t(a,b), (c,d) = m\n\treturn a*d - b*c", "def compute_derivative(self, r, dr):\n\n out = None\n\n if r.requires_grad == False:\n r.requires_grad = True\n\n with torch.enable_grad():\n\n for jast in self.jastrow_functions:\n\n kernel = jast(r)\n ker_grad = self._grads(kernel, r)\n ker_grad = ker_grad.unsqueeze(1) * dr\n ker_grad = ker_grad.unsqueeze(0).detach().clone()\n\n if out is None:\n out = ker_grad\n else:\n out = torch.cat((out, ker_grad), axis=0)\n\n return out", "def d(self, df):\n # Get variable names\n var = [key for key, _ in self.marginals.items()]\n df_u = self.sample2pr(df)[var]\n # Evaluate copula density\n l_copula = self.copula.d(df_u.values)\n # Evaluate marginal densities\n L_marginals = zeros((df.shape[0], len(var)))\n for i, v in enumerate(var):\n L_marginals[:, i] = self.marginals[v].d(df[v])\n l_marginals = prod(L_marginals, axis=1)\n\n return l_copula * l_marginals", "def determinant(self):\n if self.cols != self.rows:\n raise Exception ('Matrix is not square!')\n for i in range(self.rows):\n if self.values[i][i] == 0:\n raise Exception ('There is zero on the main diagonal')\n #TODO: Rearrange the lines, that the main diagonal don't have a zero values \n\n arr = self.values[:]\n for i in range(self.rows):\n for j in range(self.cols):\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n if i > j :\n arr2 = arr[i][j]/diag[j]\n arr1 = [round(x * arr2, 4) for x in arr[i-i+j]]\n arr[i] = map(lambda x,y: round(x - y, 4) , arr[i], arr1 )\n\n diag = [arr[l][p] for p in range(self.cols) for l in range(self.rows) if l == p ]\n det = 1\n for i in range(len(diag)):\n det *= diag[i]\n if det != 0 :\n return True\n else:\n return False", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n model_jacobian = self.pdm.model.jacobian\n points = self.pdm.model.mean.points\n\n # compute dX/dp\n\n # dX/dq is the Jacobian of the global transform evaluated at the\n # mean of the model.\n dX_dq = self._global_transform_jacobian(points)\n # dX_dq: n_points x n_global_params x n_dims\n\n # by application of the chain rule dX_db is the Jacobian of the\n # model transformed by the linear component of the global transform\n dS_db = model_jacobian\n dX_dS = self.pdm.global_transform.jacobian_points(points)\n dX_db = np.einsum('ilj, idj -> idj', dX_dS, dS_db)\n # dS_db: n_points x n_weights x n_dims\n # dX_dS: n_points x n_dims x n_dims\n # dX_db: n_points x n_weights x n_dims\n\n # dX/dp is simply the concatenation of the previous two terms\n dX_dp = np.hstack((dX_dq, dX_db))\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def __call__ ( self , func , x , h , der = False ) :\n\n ## calculate differences \n imax = self.__order + 2 if der else self.__order + 1\n i = 0\n while i < imax : \n j = i + 1\n self.__df[i] = func ( x + j * h ) - func ( x - j * h )\n i += 1\n \n ## 1) calculate 1st derivative \n result = dot_fma ( self.__order + 1 , self.__df , self.__d1 ) / ( self.__sf1 * h ) \n if not der : return result \n \n ## 2) calculate Nth derivative \n dd = dot_fma ( self.__order + 2 , self.__df , self.__d2 ) / ( self.__sf2 * h**(self.__order*2+3) ) \n \n return result, dd", "def dfdx(x,t,dt):\n assert is1d(x)\n F = np.zeros((m,m))\n # X\n md = lambda i: np.mod(i,nX)\n for i in range(nX):\n # wrt. X\n F[i,i] = - dt + 1\n F[i,md(i-2)] = - dt * x[md(i-1)]\n F[i,md(i+1)] = + dt * x[md(i-1)]\n F[i,md(i-1)] = dt *(x[md(i+1)]-x[md(i-2)])\n # wrt. Y\n F[i,nX+iiY[i]] = dt * -h*c/b\n # Y\n md = lambda i: nX + np.mod(i-nX,nX*J)\n for i in range(nX,(J+1)*nX):\n # wrt. Y\n F[i,i] = -dt*c + 1\n F[i,md(i-1)] = +dt*c*b * x[md(i+1)]\n F[i,md(i+1)] = -dt*c*b * (x[md(i+2)]-x[md(i-1)])\n F[i,md(i+2)] = -dt*c*b * x[md(i+1)]\n # wrt. X\n F[i,iiX[i-nX]] = dt * h*c/b\n return F", "def dV(X):\n return -4 * a * np.power(X, 3) + 2 * b * X", "def log_abs_det_jacobian(self, z):\n pre_u = self.u_ + self.u\n pre_w = self.w_ + self.w\n a = F.softplus(self.a + self.inv)\n w = F.softmax(pre_w, dim=3)\n u = F.softmax(pre_u, dim=3)\n # Perform computation\n pre_sigm = torch.sum(u * a * z, 3) + self.b\n sigm = torch.sigmoid(pre_sigm)\n x_pre = torch.sum(w * sigm, dim=3)\n x_pre_clipped = x_pre * (1 - self.eps) + self.eps * 0.5\n logj = F.log_softmax(pre_w, dim=3) + logsigmoid(pre_sigm) + logsigmoid(-pre_sigm) + torch.log(a)\n # n, d, d2, dh\n logj = logj + F.log_softmax(pre_u, dim=3)\n # n, d, d2, dh, d1\n logj = torch.log(torch.sum(torch.exp(logj),3))\n # n, d, d2, d1\n logdet_ = logj + np.log(1 - self.eps) - (torch.log(x_pre_clipped) + torch.log(-x_pre_clipped + 1))\n return logdet_", "def determinant(self):\n if self.n_rows != self.n_cols:\n raise Exception('Matrix is not square')\n if self.n_rows == 2:\n return (self.data[0][0] * self.data[1][1]) - (self.data[1][0] * self.data[0][1])\n else:\n echelon, ops = reduce_to_echelon(self.data.copy(), True)\n swaps = sum([1 if row[0] == 'swap' else 0 for row in ops])\n return math.prod([echelon[i][i] for i in range(len(echelon))]) * (-1) ** swaps", "def det(self):\n\t\t\n\t\trows = self._rows\n\t\tsign = +1\n\t\tsumm = 0\n\n\t\tfor perm in permutations(range(rows), rows):\n\t\t\tmul = 1\n\t\t\tsign = SquareMatrix.__parity_of_permutation(perm)\n\n\t\t\tfor i in range(rows):\n\t\t\t\tmul *= self[i][perm[i]]\n\n\t\t\tsumm += sign * mul\n\t\treturn summ", "def compute_differential_operator(self):\n\n v_in, v_out, weights = self.get_edge_list()\n\n n = len(v_in)\n Dr = np.concatenate((np.arange(n), np.arange(n)))\n Dc = np.empty(2*n)\n Dc[:n] = v_in\n Dc[n:] = v_out\n Dv = np.empty(2*n)\n\n if self.lap_type == 'combinatorial':\n Dv[:n] = np.sqrt(weights)\n Dv[n:] = -Dv[:n]\n elif self.lap_type == 'normalized':\n Dv[:n] = np.sqrt(weights / self.dw[v_in])\n Dv[n:] = -np.sqrt(weights / self.dw[v_out])\n else:\n raise ValueError('Unknown lap_type {}'.format(self.lap_type))\n\n self._D = sparse.csc_matrix((Dv, (Dr, Dc)), shape=(n, self.N))", "def linear_backward_calculation(dZ, internal_params):\n\n A_prev, W, b = internal_params\n nb = A_prev.shape[1]\n\n ### START CODE HERE ### (≈ 3 lines of code)\n dW =np.multiply((np.dot(dZ, A_prev.T)),1/nb)\n db = np.multiply ((np.sum(dZ, axis=1, keepdims=True),1/nb))\n dA_prev = np.dot(W.T, dZ)\n # raise NotImplementedError\n return dA_prev,dW,db", "def d_dl(self, points):\n n_centres = self.n_points\n n_points = points.shape[0]\n\n # TPS kernel (nonlinear + affine)\n\n # for each input, evaluate the rbf\n # (n_points, n_centres)\n k_points = self.kernel.apply(points)\n\n # k_points with (1, x, y) appended to each point\n # (n_points, n_centres+3) - 3 is (1, x, y) for affine component\n k = np.hstack([k_points, np.ones([n_points, 1]), points])\n\n # (n_centres+3, n_centres+3)\n try:\n inv_L = np.linalg.inv(self.l)\n except np.linalg.LinAlgError:\n # If two points are coincident, or very close to being so, then the\n # matrix is rank deficient and thus not-invertible. Therefore,\n # only take the inverse on the full-rank set of indices.\n _u, _s, _v = np.linalg.svd(self.l)\n keep = _s.shape[0] - sum(_s < self.min_singular_val)\n inv_L = _u[:, :keep].dot(1.0 / _s[:keep, None] * _v[:keep, :])\n\n\n # Taking the derivative of L for changes in l must yield an x,y change\n # for each centre.\n # (n_centres+3, n_centres+3, n_centres, n_dims)\n dL_dl = np.zeros(self.l.shape + (n_centres, 2))\n\n # take the derivative of the kernel wrt centres at the centres\n # SHOULD be (n_centres, n_dims, n_centres, n_dims)\n # IS (n_centres, n_centres, n_dims\n dK_dl_at_tgt = self.kernel.d_dl(self.source.points)\n\n # we want to build a tensor where for each slice where\n # dK_dl[i, j, k, l] is the derivative wrt the l'th dimension of the\n # i'th centre for L[j, k] -> first axis is just looping over centres\n # and last looping over dims\n # (n_centres, n_centres, n_centres, n_dims)\n dK_dl = np.zeros((n_centres, ) + dK_dl_at_tgt.shape)\n\n # make a linear iterator over the centres\n iter = np.arange(n_centres)\n\n # efficiently build the repeated pattern for dK_dl\n # note that the repetition over centres happens over axis 0\n # and the dims axis is the last\n # so dK_dl[0, ..., 0] corresponds to dK/dx0 in Joan's paper\n # dK_dl[3, ..., 1] corresponds to dK_dy3 in Joan's paper\n dK_dl[iter, iter] = dK_dl_at_tgt[iter]\n dK_dl[iter, :, iter] = dK_dl_at_tgt[:, iter]\n\n # prepare memory for the answer\n # SHOULD be (n_points, n_dims, n_centres, n_dims)\n # IS (n_points, , n_centres, n_dims)\n dW_dl = np.zeros((n_points, n_centres, 2))\n\n # pretend the target is equal to the source\n # (n_dims, n_centres+3)\n pseudo_target = np.hstack([self.source.points.T, np.zeros([2, 3])])\n\n for i in np.arange(n_centres):\n # dP_dli (n_centres, n_points, n_dims, n_dims)\n dP_dli = np.zeros(self.p.shape + (2,))\n dP_dli[i, 1, 0] = -1\n dP_dli[i, 2, 1] = -1\n\n dL_dl[:n_centres, :n_centres, i] = dK_dl[i]\n dL_dl[:n_centres, n_centres:, i] = dP_dli\n dL_dl[n_centres:, :n_centres, i] = np.swapaxes(dP_dli, 0, 1)\n\n omega_x = -inv_L.dot(dL_dl[..., i, 0].dot(inv_L))\n omega_y = -inv_L.dot(dL_dl[..., i, 1].dot(inv_L))\n dW_dl[:, i, 0] = k.dot(omega_x).dot(pseudo_target[0])\n dW_dl[:, i, 1] = k.dot(omega_y).dot(pseudo_target[1])\n\n return dW_dl", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def determinant(self):\n if not self.is_square():\n raise(ValueError, \"Cannot calculate determinant of non-square matrix.\")\n if self.h > 2:\n raise(NotImplementedError, \"Calculating determinant not implemented for matrices largerer than 2x2.\")\n\n # TODO - your code here\n if self.h == 1:\n return self.g[0][0] # a 1x1 matrix\n else:\n return ((self.g[0][0] * self.g[1][1]) - (self.g[0][1] * self.g[1][0])) # a 2x2 matrix\n # TODO - your code here", "def mass_flow_deriv(self):\n deriv = np.zeros((\n self.num_i,\n self.num_i + self.num_o + self.num_vars,\n self.num_nw_vars))\n for i in range(self.num_i):\n deriv[i, i, 0] = 1\n for j in range(self.num_o):\n deriv[j, j + i + 1, 0] = -1\n return deriv", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def jacobian_d(self, x, out=None, **kwargs):\n return self._base_nlp.jacobian_d(x, out=out, **kwargs)", "def dfdx(self, X):\n \n return 3*(X[0])**2", "def jacobian(self, dt):\n raise NotImplementedError", "def _derZ(self, x, y, z):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y, z)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdz = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdz[c] = self.functions[j].derivativeZ(x[c], y[c], z[c])\n return dfdz", "def Cholesky_Derivs(InvV,m_points):\n #get some of the dimensions from the inputs\n m = InvV.n\n d = InvV.d\n\n #initialise the required array\n dM_dL = np.zeros([m,d,d,d,d])\n \n #firstly loop through each map/component\n for j in range(m):\n #now want to loop over each of the Cholesky components\n for k in range(d):\n for l in range(d):\n #take the l-th column of L and put it in row k\n dM_dL[j,k,l,k,:] += InvV.L[j,:,l]\n #take the k-th row of L and put it in row l\n dM_dL[j,k,l,l,:] += InvV.L[j,k,:] \n return dM_dL", "def pderiv2D(field, xld, dim = 0):\n n_x, n_y = field.shape\n dfield = np.zeros_like(field)\n if (dim not in [0, 1]): \n raise ValueError(\"2-D function, enter dim = 0 (df/dx) or dim = 1 (df/dy)\")\n if (dim == 0):\n # check if len(x) equals M\n if len(xld) != n_x : \n raise ValueError(\"x-direction lengths do not match\")\n for j in range(n_y):\n dfield[:, j] = deriv(field[:,j], np.array(xld))\n if (dim == 1):\n if len(xld) != n_y:\n raise ValueError('y-direction lengths do not match')\n for i in range(n_x):\n dfield[i,:] = deriv(field[i,:], np.array(xld))\n return dfield", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def compute_gradient(c, x, y):\n\n vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])\n rows, cols = c.shape\n\n result = np.empty_like(x)\n\n for i in nb.prange(rows):\n for j in nb.prange(cols):\n c_remainder = c[i, j] % 4\n gradient_co = vectors[c_remainder]\n result[i, j] = gradient_co[0] * x[i, j] + gradient_co[1] * y[i, j]\n\n return result", "def _derY(self, x, y):\n m = len(x)\n temp = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n temp[:, j] = self.functions[j](x, y)\n i = self.argcompare(temp, axis=1)\n y = temp[np.arange(m), i]\n dfdy = np.zeros_like(x)\n for j in range(self.funcCount):\n c = i == j\n dfdy[c] = self.functions[j].derivativeY(x[c], y[c])\n return dfdy", "def _det(mat):\n\n return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])\n + mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *\n mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -\n mat[1][1] * mat[2][0]))", "def LDL(A, d):\n n = shape(A)[0]\n L = array(eye(n))\n dg = zeros(n)\n dg[0] = A[0, 0]\n for k in range(1, n):\n m = reshape(array(A[:k, k].copy()), k)\n rforwardsolve(L[:k, :k], m, d)\n L[k, :k] = m/dg[:k]\n dg[k] = A[k, k] - dot(L[k, :k], m)\n return L, dg", "def beinflumat(x_axis, y_axis, e_eff):\n len_x = len(x_axis)\n len_y = len(y_axis)\n influence_matrix_complete = np.zeros((len_x, len_y, len_x, len_y))\n\n # generate coordinate grids\n a_factor = (x_axis[-1] - x_axis[0]) / (len_x - 1) / 2\n b_factor = (y_axis[-1] - y_axis[0]) / (len_y - 1) / 2\n x_grid = __beinflumatgrid(x_axis)\n y_grid = __beinflumatgrid(y_axis)\n\n # use numexpr to evaluate expressions\n xpa = ne.evaluate('x_grid + a_factor')\n xma = ne.evaluate('x_grid - a_factor')\n ypb = ne.evaluate('y_grid + b_factor')\n ymb = ne.evaluate('y_grid - b_factor')\n\n # calculate complete influence matrix\n for j in range(0, len_y):\n for j_prime in range(0, len_y):\n influence_matrix_complete[:, j, :, j_prime] = \\\n (np.multiply(xpa, np.log(\n np.divide(\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa)))))) +\n (ypb[j, j_prime]) * np.log(\n np.divide(\n (xpa +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xpa, xpa))),\n (xma +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma))))) +\n np.multiply(xma, np.log(\n np.divide(\n ((ymb[j, j_prime]) +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n ((ypb[j, j_prime]) +\n np.sqrt(np.multiply((ypb[j, j_prime]),\n (ypb[j, j_prime])) +\n np.multiply(xma, xma)))))) +\n (ymb[j, j_prime]) * np.log(\n np.divide(\n (xma +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xma, xma))),\n (xpa +\n np.sqrt(np.multiply((ymb[j, j_prime]),\n (ymb[j, j_prime])) +\n np.multiply(xpa, xpa))))))\n\n return influence_matrix_complete * 1 / e_eff * 2 / pi", "def update(self, dLds, alpha, beta):\n T = len(self.x)\n self.nodes.reset_error()\n self.igate.reset_error()\n self.fgate.reset_error()\n self.ogate.reset_error()\n dLdx = np.zeros((T, self.input_size))\n dLdc = np.zeros(self.hidden_size)\n for t in xrange(T-1, -1, -1):\n dLdpo = dLds[t] * self.h[t] * self.gatefun.derivate(self.o[t])\n # parameters for output gate\n self.ogate.dLdu += np.outer(dLdpo, self.x[t])\n self.ogate.dLdw += np.outer(dLdpo, self.s[t-1])\n self.ogate.dLdv += np.outer(dLdpo, self.c[t-1])\n dLds[t-1] += np.dot(self.ogate.w.T, dLdpo)\n dLdx[t] += np.dot(self.ogate.u.T, dLdpo)\n dLdc += np.dot(self.ogate.v.T, dLdpo)\n\n dLdc += dLds[t] * self.o[t] * self.acfun.derivate(self.h[t])\n dLdpi = dLdc * self.g[t] * self.gatefun.derivate(self.i[t])\n dLdpf = dLdc * self.c[t-1] * self.gatefun.derivate(self.f[t])\n dLdpg = dLdc * self.i[t] * self.acfun.derivate(self.g[t])\n dLdc = dLdc * self.f[t]\n # parameters for nodes in hidden layer\n self.nodes.dLdu += np.outer(dLdpg, self.x[t])\n self.nodes.dLdw += np.outer(dLdpg, self.s[t-1])\n dLds[t-1] += np.dot(self.nodes.w.T, dLdpg)\n dLdx[t] += np.dot(self.nodes.u.T, dLdpg)\n # parameters for input gate\n self.igate.dLdu += np.outer(dLdpi, self.x[t])\n self.igate.dLdw += np.outer(dLdpi, self.s[t-1])\n self.igate.dLdv += np.outer(dLdpi, self.c[t-1])\n dLds[t-1] += np.dot(self.igate.w.T, dLdpi)\n dLdx[t] += np.dot(self.igate.u.T, dLdpi)\n dLdc += np.dot(self.igate.v.T, dLdpi)\n # parameters for forget gate\n self.fgate.dLdu += np.outer(dLdpf, self.x[t])\n self.fgate.dLdw += np.outer(dLdpf, self.s[t-1])\n self.fgate.dLdv += np.outer(dLdpf, self.c[t-1])\n dLds[t-1] += np.dot(self.fgate.w.T, dLdpf)\n dLdx[t] += np.dot(self.fgate.u.T, dLdpf)\n dLdc += np.dot(self.fgate.v.T, dLdpf)\n if self.en_bias:\n self.nodes.dLdb += dLdpg\n self.igate.dLdb += dLdpi\n self.fgate.dLdb += dLdpf\n self.ogate.dLdb += dLdpo\n # update weight matrix of current hidden node\n self.nodes.update(alpha, beta)\n self.igate.update(alpha, beta)\n self.fgate.update(alpha, beta)\n self.ogate.update(alpha, beta)\n return dLdx", "def ddx(field, method=None):\n if method == None or method == 'central':\n new_field = field-field\n\n # Apply central differencing in the 'core' region\n new_field[:,1:-1] = (field[:,2:]-field[:,:-2])/field.dL/2\n\n # Apply second order forward/backward differences at boundaries\n new_field[:,0] = (field[:,2] - 2*field[:,1] + field[:,0]) / \\\n field.dL**2\n new_field[:,-1] = (field[:,-3] - 2*field[:,-2] + field[:,-1]) / \\\n field.dL**2\n return new_field\n\n elif method == 'richardson':\n new_field = field[:,:-4,2:-2] - field[:,4:,2:-2] + \\\n 8*field[:,3:-1,2:-2] - 8*field[:,1:-3,2:-2]\n new_field = new_field/field.dL/12\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n elif method == 'least_square':\n new_field = 2*field[:,4:,2:-2] - 2*field[:,:-4,2:-2] + \\\n field[:,3:-1,2:-2] - field[:,1:-3,2:-2]\n new_field = new_field/field.dL/10\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n else:\n raise ValueError('method keyword argument was not valid.')", "def gradient(self):\n result = np.zeros(len(self.variables))\n result[self.bivariateGradInd] = (self.shape-1)/self.variable - self.rate\n return result", "def func_deriv(l, sign=1.0):\n\tx,y,z,xs,ys=l\n\n\tdf1dx = 0\n\tdf1dy = 0\n\tdf1dz = 0\n\tdf1dxs = 0\n\tdf1dys = 0\n\tfor i in range(n):\n\t \tdf1dx += (di+ci*ri[i])*(x-xi[i])*(1/ (((x-xi[i])**2+(y-yi[i])**2+(z-zi[i])**2 )**0.5) )\n\t \tdf1dy += (di+ci*ri[i])*(y-yi[i])*(1/ (((x-xi[i])**2+(y-yi[i])**2+(z-zi[i])**2 )**0.5) )\n\t \tdf1dz += (di+ci*ri[i])*(z-zi[i])*(1/ (((x-xi[i])**2+(y-yi[i])**2+(z-zi[i])**2 )**0.5) )\n\n\tdf2dx = 0\n\tdf2dy = 0\n\tdf2dz = 0\n\tdf2dxs = 0\n\tdf2dys = 0\n\tfor i in range(n): \n\t \tdf2dx += (d +cd*ri[i])*(x-xs)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t \tdf2dy += (d +cd*ri[i])*(y-ys)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t \tdf2dz += (d +cd*ri[i])*(z-zs)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t\tdf2dxs += -(d +cd*ri[i])*(x-xs)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\t \tdf2dys += -(d +cd*ri[i])*(y-ys)*(1/ (((x-xs)**2+(y-ys)**2+(z-zs)**2 )**0.5) )\n\n\tdf3dx = 0\n\tdf3dy = 0\n\tdf3dz = 0\n\tdf3dxs = 0\n\tdf3dys = 0\n\tfor i in range(n):\n\t \tdf3dxs += (s +cs*ri[i])*(xm-xs)*(1/ (((xm-xs)**2+(ym-ys)**2+(zm-zs)**2 )**0.5) )\n\t\tdf3dys += (s +cs*ri[i])*(ym-ys)*(1/ (((xm-xs)**2+(ym-ys)**2+(zm-zs)**2 )**0.5) )\n\n\tdfdx = sign*(df1dx + df2dx + df3dx)\n\tdfdy = sign*(df1dy + df2dy + df3dy)\n\tdfdz = sign*(df1dz + df2dz + df3dz)\n\tdfdxs = sign*(df1dxs + df2dxs + df3dxs)\n\tdfdys = sign*(df1dys + df2dys + df3dys)\n\n\treturn np.array([ dfdx, dfdy, dfdz, dfdxs, dfdys ])", "def deriv(self, model):\n k1, k2, k3 = self.coefficients\n r = self.relation(model)\n dc_dm1 = k1 * r\n dc_dm2 = k2 * r\n\n result = np.r_[dc_dm1, dc_dm2]\n\n return result", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state.item(0)\n pe = state.item(1)\n pd = state.item(2)\n u = state.item(3)\n v = state.item(4)\n w = state.item(5)\n e0 = state.item(6)\n e1 = state.item(7)\n e2 = state.item(8)\n e3 = state.item(9)\n p = state.item(10)\n q = state.item(11)\n r = state.item(12)\n # extract forces/moments\n fx = forces_moments.item(0)\n fy = forces_moments.item(1)\n fz = forces_moments.item(2)\n l = forces_moments.item(3)\n m = forces_moments.item(4)\n n = forces_moments.item(5)\n\n # position kinematics\n pn_dot =\n pe_dot =\n pd_dot =\n\n # position dynamics\n u_dot =\n v_dot =\n w_dot =\n\n # rotational kinematics\n e0_dot =\n e1_dot =\n e2_dot =\n e3_dot =\n\n # rotatonal dynamics\n p_dot =\n q_dot =\n r_dot = \n\n # collect the derivative of the states\n x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,\n e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T\n return x_dot", "def _2ndderiv_xyz(self,x,y,z,i,j):\n return -np.pi*self._rhoc_M*self.a**3*self._b*self._c *\\\n _2ndDerivInt(x,y,z,self._a2,self._b2*self._a2,self._c2*self._a2,self.n,i,j)", "def det_2x2(matrix: FieldMatrix) -> FlowFieldVal:\n _validate_matrix_shape(matrix, (2, 2))\n\n det = lambda a, b, c, d: a * d - b * c\n\n a, b = matrix[0]\n c, d = matrix[1]\n\n return tf.nest.map_structure(det, a, b, c, d)", "def compute_dz(self):\n el_geom_w = self.compute_geom_weights()\n el_geom_grad = self.compute_geom_grads()\n\n # Sum of weights coeffs\n w_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(el_geom_w))\n\n # Sum of weighted df = (dfx, dfy)\n dfx_el_w = np.empty_like(el_geom_w)\n dfy_el_w = np.empty_like(el_geom_w)\n for iapex in range(3):\n dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]\n dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]\n dfx_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(dfx_el_w))\n dfy_node_sum = np.bincount(np.ravel(self._triangles),\n weights=np.ravel(dfy_el_w))\n\n # Estimation of df\n dfx_estim = dfx_node_sum/w_node_sum\n dfy_estim = dfy_node_sum/w_node_sum\n return np.vstack([dfx_estim, dfy_estim]).T", "def compute_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def Derivative(self, *args):\n return _Adaptor3d.Adaptor3d_InterFunc_Derivative(self, *args)", "def c(\n dp: np.ndarray,\n ddp: np.ndarray,\n ) -> np.ndarray:\n\n return \\\n np.abs(ddp[0, :]*dp[1, :] - dp[0, :]*ddp[1, :]) / \\\n (dp[0, :]**2 + dp[1, :]**2)**1.5", "def test_vic_dcor_nonlinear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n [0.2, 0.5, 0.0],\n [0.2, 0.5, 1.0],\n [0.4, 1.0, 0.0],\n [0.4, 1.0, 1.0],\n [0.6, 1.0, 0.0],\n [0.6, 1.0, 1.0],\n [0.8, 0.5, 0.0],\n [0.8, 0.5, 1.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 1.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\", \"dCor\")\n expected_w_vector = np.array(\n [0.22633480, 0.27052183, 0.50314336],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def calculateElementCoefficients(self):\n #\n #get u,grad(u), and grad(u)Xgrad(w) at the quadrature points\n #\n for cj in range(self.nc):\n self.u[cj].getValues(self.q[('v',cj)],\n self.q[('u',cj)])\n if self.q.has_key(('grad(u)',cj)):\n self.u[cj].getGradientValues(self.q[('grad(v)',cj)],\n self.q[('grad(u)',cj)])\n #\n #get functions of (t,x,u) at the quadrature points\n #\n self.coefficients.evaluate(self.timeIntegration.t,self.q)\n log(\"Coefficients on element\",level=10,data=self.q)\n #\n # time integration is handled directly in ELLAM weak approximation, don't have a hook for\n # doing that via a time integration object (could if it were a direct Lagrange Galerkin formulation I believe)\n # however, need to set time integration's m_tmp if use that anywhere\n #if self.timeTerm:\n # self.timeIntegration.calculateElementCoefficients(self.q)\n\n #todo eventually can add nonlinear potential here\n\n #cek and mwf need to go through this section to clean up, some of next two blocks could go to calcQuad\n #\n #todo need non-diagonal dependence?\n for ci in range(self.nc):\n cfemIntegrals.calculateCFLADR(self.elementEffectiveDiametersArray,\n self.q[('dm',ci,ci)],\n self.q[('df',ci,ci)],#could just be velocity\n self.q[('cfl',ci)])", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def jacobian(self, points):\n # check if re-computation of dW/dx can be avoided\n if not np.array_equal(self._cached_points, points):\n # recompute dW/dx, i.e. the relative weight of each point wrt\n # the source landmarks\n self.dW_dX = self.transform.weight_points(points)\n # cache points\n self._cached_points = points\n\n # dX/dp is simply the Jacobian of the model\n dX_dp = self.pdm.model.jacobian\n\n # dW_dX: n_points x n_points x n_dims\n # dX_dp: n_points x n_params x n_dims\n dW_dp = np.einsum('ild, lpd -> ipd', self.dW_dX, dX_dp)\n # dW_dp: n_points x n_params x n_dims\n\n return dW_dp", "def dgdx(self, X):\n \n return 2*(X[0]) - 2", "def jacobian(self, x):\n x_ = np.atleast_2d(x)\n if self.normalize:\n x_ = (x_ - self.sample_mean) / self.sample_std\n s_ = (self.samples - self.sample_mean) / self.sample_std\n else:\n s_ = self.samples\n\n fx, jf = self.reg_model(x_)\n rx, drdx = self.corr_model(x=x_, s=s_, params=self.corr_model_params, dx=True)\n y_grad = np.einsum('ikj,jm->ik', jf, self.beta) + np.einsum('ijk,jm->ki', drdx.T, self.gamma)\n if self.normalize:\n y_grad = y_grad * self.value_std / self.sample_std\n if x_.shape[1] == 1:\n y_grad = y_grad.flatten()\n return y_grad", "def test_vic_dcor_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"VIC\", \"dCor\")\n expected_w_vector = np.array(\n [0.33817571, 0.33091215, 0.33091215],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def compute_det(self, log_progress=False):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n\n for i in range(size - 1):\n for j in range(i + 1, size):\n for k in range(i + 1, size):\n mat[j][k] = (mat[j][k] * mat[i][i]) - (mat[j][i] * mat[i][k])\n if i > 0:\n mat[j][k] //= mat[i - 1][i - 1]\n if log_progress:\n print(i)\n if i > 0:\n for j in range(size):\n mat[j][i - 1] = 0\n mat[i - 1][j] = 0\n\n return mat[size - 1][size - 1]", "def determinant(self):\n if self.L is None or self.U is None:\n self.decomposeLU()\n\n retval = 1.0\n for i in range(self.rows):\n retval *= self.L[i, i] * self.U[i, i]\n return retval", "def jacobian_ur5(q, delta=0.0001):\n # Alocacion de memoria\n J = np.zeros((3,6))\n # Transformacion homogenea inicial (usando q)\n T = fkine_ur5(q)\n # Iteracion para la derivada de cada columna\n for i in xrange(6):\n # Copiar la configuracion articular inicial\n dq = copy(q);\n # Incrementar la articulacion i-esima usando un delta\n dq[i] = dq[i] + delta \n dT = fkine_ur5(dq)\n \n J[:,i] = (dT[0:3, 3] - T[0:3, 3])/delta\n\n return J", "def adjoint_derivative(self, dx=None, dy_u=None, dy_l=None,\n P_idx=None, A_idx=None, eps_iter_ref=1e-04):\n\n P, q = self._derivative_cache['P'], self._derivative_cache['q']\n A = self._derivative_cache['A']\n l, u = self._derivative_cache['l'], self._derivative_cache['u']\n\n try:\n results = self._derivative_cache['results']\n except KeyError:\n raise ValueError(\"Problem has not been solved. \"\n \"You cannot take derivatives. \"\n \"Please call the solve function.\")\n\n if results.info.status != \"solved\":\n raise ValueError(\"Problem has not been solved to optimality. \"\n \"You cannot take derivatives\")\n\n m, n = A.shape\n x = results.x\n y = results.y\n y_u = np.maximum(y, 0)\n y_l = -np.minimum(y, 0)\n\n if A_idx is None:\n A_idx = A.nonzero()\n\n if P_idx is None:\n P_idx = P.nonzero()\n\n if dy_u is None:\n dy_u = np.zeros(m)\n if dy_l is None:\n dy_l = np.zeros(m)\n\n # Make sure M matrix exists\n if 'M' not in self._derivative_cache:\n # Multiply second-third row by diag(y_u)^-1 and diag(y_l)^-1\n # to make the matrix symmetric\n inv_dia_y_u = spa.diags(np.reciprocal(y_u + 1e-20))\n inv_dia_y_l = spa.diags(np.reciprocal(y_l + 1e-20))\n M = spa.bmat([\n [P, A.T, -A.T],\n [A, spa.diags(A @ x - u) @ inv_dia_y_u, None],\n [-A, None, spa.diags(l - A @ x) @ inv_dia_y_l]\n ], format='csc')\n delta = spa.bmat([[eps_iter_ref * spa.eye(n), None],\n [None, -eps_iter_ref * spa.eye(2 * m)]],\n format='csc')\n self._derivative_cache['M'] = M\n self._derivative_cache['solver'] = qdldl.Solver(M + delta)\n\n rhs = - np.concatenate([dx, dy_u, dy_l])\n\n r_sol = self.derivative_iterative_refinement(rhs)\n\n r_x, r_yu, r_yl = np.split(r_sol, [n, n+m])\n\n # Extract derivatives for the constraints\n rows, cols = A_idx\n dA_vals = (y_u[rows] - y_l[rows]) * r_x[cols] + \\\n (r_yu[rows] - r_yl[rows]) * x[cols]\n dA = spa.csc_matrix((dA_vals, (rows, cols)), shape=A.shape)\n du = - r_yu\n dl = r_yl\n\n # Extract derivatives for the cost (P, q)\n rows, cols = P_idx\n dP_vals = .5 * (r_x[rows] * x[cols] + r_x[cols] * x[rows])\n dP = spa.csc_matrix((dP_vals, P_idx), shape=P.shape)\n dq = r_x\n\n return (dP, dq, dA, dl, du)", "def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot", "def jacobian(self, dt):\n if dt not in self._F_cache:\n d = self._dimension\n with torch.no_grad():\n F = eye_like(self.sa2, d)\n F[: d // 2, d // 2 :] = dt * eye_like(self.sa2, d // 2)\n self._F_cache[dt] = F\n\n return self._F_cache[dt]", "def gradient_cf(self, potential, get_energy=True):\n xn, xe, lpn, lpe, alpha, o1, o2 = self(None)\n fn_, fe_ = potential((xn, xe))\n fn_ = (fn_ + self.tw * lpn) * self.wn\n fe_ = (fe_ - lpe) * self.we\n fn = fn_ * alpha\n fe = fe_ * alpha\n dmu = tf.math.divide_no_nan(tf.reduce_sum(fn * self.xn, axis=-1, keepdims=True), self.sigma)\n dsg = tf.math.divide_no_nan(tf.reduce_sum(fn * self.x22, axis=-1, keepdims=True), self.sigma)\n dmu1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi, -1, keepdims=True), o1)\n dmu2 = tf.reduce_sum(fe * self.xj, -1, keepdims=True) / o2\n dsg1 = tf.math.divide_no_nan(tf.reduce_sum(fe * self.xi22, -1, keepdims=True), o1)\n dsg2 = tf.reduce_sum(fe * self.xj22, -1, keepdims=True) / o2\n\n dmu += (tf.concat([dmu1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dmu2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dmu2[:, :, 810:, ...], 2, True)], 2))\n\n dsg += (tf.concat([dsg1, self.top0], 2) + tf.concat(\n [self.btm0, tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, :729, ...], self.s1), [3, 5]), self.s3),\n tf.reshape(tf.reduce_sum(tf.reshape(dsg2[:, :, 729:810, ...], self.s2), [3, 5]), self.s3),\n tf.reduce_sum(dsg2[:, :, 810:, ...], 2, True)], 2))\n\n dalpha = (tf.reduce_sum(fn_, [2, 4], keepdims=True) + tf.reduce_sum(fe_, [2, 4], keepdims=True))\n dw = alpha * (dalpha - tf.reduce_sum(dalpha * alpha, 3, keepdims=True))\n energy = tf.zeros(fn.shape[:2], tf.float64) if not get_energy else \\\n -(tf.reduce_sum(fn, [2, 3, 4]) + tf.reduce_sum(fe, [2, 3, 4]))\n return (-dmu * sqrt2, -dsg, -dw), energy", "def ddy(field, method=None):\n if method == None or method == 'central':\n new_field = field-field\n\n # Apply central differencing in the 'core' region\n new_field[:,:,1:-1] = (field[:,:,2:]-field[:,:,:-2])/field.dL/2\n\n # Apply second order forward/backward differences at boundaries\n new_field[:,:,0] = (field[:,:,2] - 2*field[:,:,1] + field[:,:,0]) / \\\n field.dL**2\n new_field[:,:,-1] = (field[:,:,-3] - 2*field[:,:,-2] + field[:,:,-1]) / \\\n field.dL**2\n return new_field\n\n elif method == 'richardson':\n new_field = field[:,2:-2,4:] - 8*field[:,2:-2,3:-1] + 8*field[:,2:-2,1:-3] - field[:,2:-2,:-4]\n new_field = new_field/field.dL/12\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n elif method == 'least_square':\n new_field = 2*field[:,2:-2,:-4] + field[:,2:-2,1:-3] - field[:,2:-2,3:-1] - 2*field[:,2:-2,4:]\n new_field = new_field/field.dL/10\n new_field.x = field.x[2:-2,2:-2]\n new_field.y = field.y[2:-2,2:-2]\n return new_field\n\n else:\n raise ValueError('method keyword argument was not valid.')", "def local_det_chol(node):\r\n if node.op == det:\r\n x, = node.inputs\r\n for (cl, xpos) in x.clients:\r\n if isinstance(cl.op, Cholesky):\r\n L = cl.outputs[0]\r\n return [tensor.prod(extract_diag(L) ** 2)]", "def gradient(init_par, alpha, delta, obs, sigma_obs, ccoef, N):\n\n\n\t## Initial parameters\n\n\tparallax, v, sigma_v = init_par[:-4], init_par[-4:-1], init_par[-1] \n\tplx_obs, mualpha_obs, mudelta_obs = obs[:, 0], obs[:, 1], obs[:, 2]\n\n\t### Define normal triad and proper motions\n\tp, q, r = normalTriad(alpha, delta)\n\tmualpha_mod = np.dot(np.transpose(p),v)*parallax/_A\n\tmudelta_mod = np.dot(np.transpose(q),v)*parallax/_A\n\t\n\tplx_mod, mualpha_mod, mudelta_mod = parallax, mualpha_mod, mudelta_mod\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\n\ta,like, expo, detD = np.ones(N),np.ones(N),np.ones(N), np.ones(N) \n\n\t### Eq. 8 in Lindegren+2000 (Covariance Matrix)\n\tC = np.zeros((3,3,N),dtype=np.float64)\n\tC[0,0,:],C[1,1,:],C[2,2,:] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\n\tcorr_coefficient_plx_mualpha, corr_coefficient_plx_mudelta, corr_coefficient_mualpha_mudelta = np.zeros(N), np.zeros(N), np.zeros(N)\n\tcorr_coefficient_plx_mualpha[:], corr_coefficient_plx_mudelta[:], corr_coefficient_mualpha_mudelta[:] = ccoef[:, 0], ccoef[:, 1], ccoef[:, 2] \n\t\n\tC[0,1,:], C[0,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta\n\tC[1,0,:], C[1,2,:] = corr_coefficient_plx_mualpha*sigma_plx*sigma_mualpha, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\tC[2,0,:], C[2,1,:] = corr_coefficient_plx_mudelta*sigma_plx*sigma_mudelta, corr_coefficient_mualpha_mudelta*sigma_mualpha*sigma_mudelta\n\n\t### Eq. 16 in Lindegren+2000 (Definition of D matrix)\t\n\tE = np.zeros((3,3,N),dtype=np.float64)\n\tE[1,1,:],E[2,2,:] = (sigma_v*parallax[:]/_A)**2., (sigma_v*parallax[:]/_A)**2.\n\tD,invD = np.zeros((3,3,N),dtype=np.float64),np.zeros((3,3,N),dtype=np.float64)\n\tD = np.add(E,C)\n\tfor i in range(N):\n\t\tdetD[i] = matrix_det(D[:,:,i]) \n\t\tinvD[:,:,i] = matrix_inv(D[:,:,i])\n\t\t\n\t\n\ta_c = np.ones((3,N))\n\ta_c = [plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod]\n\t\n\t### First derivatives in Eq. A3 \n\tcprime_pi, cprime_vx, cprime_vy, cprime_vz, = np.ones((3,N)), np.ones((3,N)), \\\n\t\t\t\t\t\t\tnp.ones((3,N)), np.ones((3,N)), \n\tcprime_pi[0,:] = 1.\n\tcprime_pi[1,:] = np.dot(np.transpose(p),v)/_A\n\tcprime_pi[2,:] = np.dot(np.transpose(q),v)/_A\n\t\n\tcprime_vx[0,:] = 0.\n\tcprime_vx[1,:] = -np.sin(alpha)*plx_mod/_A \n\tcprime_vx[2,:] = -np.sin(delta)*np.cos(alpha)*plx_mod/_A\n\n\t\n\tcprime_vy[0,:] = 0.\n\tcprime_vy[1,:] = np.cos(alpha)*plx_mod/_A \n\tcprime_vy[2,:] = -np.sin(delta)*np.sin(alpha)*plx_mod/_A\n\n\tcprime_vz[0,:] = 0.\n\tcprime_vz[1,:] = 0. \n\tcprime_vz[2,:] = np.cos(delta)*plx_mod/_A\n\n\tdlnd_dpi, dlnd_dsigmav = np.zeros(N), np.zeros(N)\n\tde_dpi, de_dsigmav = np.zeros(N), np.zeros(N)\n\t\n\n\t### See Eq. A5 \n\tde_dpi[:] = ((sigma_v/_A)**2.)*2.*plx_mod[:]\n\tde_dsigmav[:] = ((plx_mod[:]/_A)**2.)*2.*sigma_v\n\t\n\tdlnd_dpi[:] = (invD[1,1,:] + invD[2,2,:])*de_dpi[:] \n\tdlnd_dsigmav[:] = (invD[1,1,:] + invD[2,2,:])*de_dsigmav[:]\n\t\n\t\n\t\n\t### See Eq. A6\n\tdG_dpi, dG_dsigmav = np.zeros((3,3,N)), np.zeros((3,3,N)) \n\t\n\tdG_dpi[0,0,:], dG_dpi[0,1,:], dG_dpi[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[1,0,:], dG_dpi[1,1,:], dG_dpi[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dpi[:]\n\tdG_dpi[2,0,:], dG_dpi[2,1,:], dG_dpi[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dpi[:], \\\n\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dpi[:]\n\t\n\n\tdG_dsigmav[0,0,:], dG_dsigmav[0,1,:], dG_dsigmav[0,2,:] = (-invD[0,1,:]*invD[1, 0, :] - invD[0, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1, 1, :] - invD[0,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[0,1,:]*invD[1,2,:] - invD[0,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[1,0,:], dG_dsigmav[1,1,:], dG_dsigmav[1,2,:] = (-invD[1,1,:]*invD[1, 0, :] - invD[1, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1, 1, :] - invD[1,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[1,1,:]*invD[1,2,:] - invD[1,2,:]*invD[2,2,:])*de_dsigmav[:]\n\tdG_dsigmav[2,0,:], dG_dsigmav[2,1,:], dG_dsigmav[2,2,:] = (-invD[2,1,:]*invD[1, 0, :] - invD[2, 2, :]*invD[2,0,:])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1, 1, :] - invD[2,2,:]*invD[2, 1, :])*de_dsigmav[:], \\\n\t\t\t\t\t\t\t\t (-invD[2,1,:]*invD[1,2,:] - invD[2,2,:]*invD[2,2,:])*de_dsigmav[:]\n\n\tf_dpi = np.zeros((N), dtype=np.float64) \n\t\n\t\n\tfor i in range(N):\n\t\tf_dpi_1, f_dpi_3 = 0., 0.0 \n\t\tfor ia in range(3):\n\t\t\tfor ib in range(3):\n\t\t\t\tf_dpi_1 += invD[ia,ib,i]*cprime_pi[ia,i]*a_c[ib][i]\n\t\t\t\tf_dpi_3 += (-0.5)*(dG_dpi[ia,ib,i]*a_c[ia][i]*a_c[ib][i])\n\t\t\t\t\t\n\t\tf_dpi_2 = (-0.5)*dlnd_dpi[i]\n\t\tf_dpi[i] = f_dpi_1 + f_dpi_2 + f_dpi_3\n\t\t\n\n\tf_vx, f_vy, f_vz, f_sigmav = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) \n\n\tf_vx = np.sum(invD[0,0,:]*cprime_vx[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vx[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vx[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vx[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vx[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vx[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vx[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vx[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vx[2,:]*a_c[2][:])\n\t\n\tf_vy = np.sum(invD[0,0,:]*cprime_vy[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vy[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vy[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vy[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vy[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vy[1][:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vy[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vy[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vy[2,:]*a_c[2][:])\n\n\tf_vz = np.sum(invD[0,0,:]*cprime_vz[0,:]*a_c[0][:] + invD[0,1,:]*cprime_vz[0,:]*a_c[1][:] + invD[0,2,:]*cprime_vz[0,:]*a_c[2][:] + \\\n\t\t invD[1,0,:]*cprime_vz[1,:]*a_c[0][:] + invD[1,1,:]*cprime_vz[1,:]*a_c[1][:] + invD[1,2,:]*cprime_vz[1,:]*a_c[2][:] + \\\n\t\t invD[2,0,:]*cprime_vz[2,:]*a_c[0][:] + invD[2,1,:]*cprime_vz[2,:]*a_c[1][:] + invD[2,2,:]*cprime_vz[2,:]*a_c[2][:])\n\t\n\tf_sigmav = np.sum(-0.5*(dG_dsigmav[0,0,:]*a_c[0][:]*a_c[0][:] + dG_dsigmav[0,1,:]*a_c[1][:]*a_c[0][:]+ dG_dsigmav[0,2,:]*a_c[2][:]*a_c[0][:] + \\\n\t\t dG_dsigmav[1,0,i]*a_c[1][:]*a_c[0][:] + dG_dsigmav[1,1,:]*a_c[1][:]*a_c[1][:]+ dG_dsigmav[1,2,:]*a_c[1][:]*a_c[2][:] + \t\n\t\t dG_dsigmav[2,0,i]*a_c[2][:]*a_c[0][:] + dG_dsigmav[2,1,:]*a_c[2][:]*a_c[1][:]+ dG_dsigmav[2,2,:]*a_c[2][:]*a_c[2][:]))\n\t\n\n\tf_sigmav = f_sigmav - 0.5*np.sum(dlnd_dsigmav)\t\n\tf = np.concatenate((f_dpi, np.array([f_vx, f_vy, f_vz, f_sigmav]))) ### Grad L(theta), see Eq. 17\n\treturn -2.*f \t\t\t\t\t\t ### Grad U(theta), see Eq. 18", "def det(mtx):\n if not is_square(mtx):\n raise ValueError(\"Matrix should be square\")\n if len(mtx) == 2:\n return mtx[0][0] * mtx[1][1] - mtx[0][1] * mtx[1][0]\n\n result = 0\n sign = 1\n for inx in range(len(mtx)):\n next_mtx = get_minor_mtx(mtx, 0, inx)\n result += sign * (mtx[0][inx] * det(next_mtx))\n sign *= -1\n return result" ]
[ "0.6885692", "0.65035003", "0.62627643", "0.62573814", "0.6247173", "0.6219265", "0.6201201", "0.619879", "0.6149196", "0.6138502", "0.60837907", "0.6081997", "0.6081997", "0.6045166", "0.6043331", "0.6040035", "0.6020596", "0.6014159", "0.600143", "0.59920883", "0.59908885", "0.5982734", "0.5950538", "0.5949337", "0.59363395", "0.59175146", "0.590359", "0.59027284", "0.59012634", "0.58971363", "0.58907276", "0.5890504", "0.58891016", "0.58888257", "0.58624613", "0.58597463", "0.58593607", "0.5842004", "0.5841317", "0.58348954", "0.58224577", "0.5813928", "0.5812084", "0.5808016", "0.58067286", "0.57914793", "0.57820994", "0.5772348", "0.57705534", "0.5769604", "0.57687515", "0.57630205", "0.5755876", "0.57538915", "0.5753227", "0.57482386", "0.5747402", "0.5744218", "0.57406825", "0.57406825", "0.57388484", "0.5738823", "0.57382584", "0.57367396", "0.573557", "0.5735189", "0.57276696", "0.57258713", "0.5710849", "0.5707394", "0.57013994", "0.5695874", "0.5694693", "0.5693386", "0.5690821", "0.5687571", "0.5684301", "0.56834394", "0.56788", "0.5674708", "0.5670777", "0.5669225", "0.56602585", "0.56597877", "0.5655768", "0.5655377", "0.5653163", "0.56418157", "0.5641398", "0.5640695", "0.5634412", "0.56290203", "0.5626649", "0.56243306", "0.5622138", "0.56213", "0.5620395", "0.56189823", "0.56087345", "0.56065935", "0.56065106" ]
0.0
-1
Map label to name.
def label_to_name(label): return "Tree"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_to_name(self, label):\n\t\t\treturn self.labels[label]", "def label_to_name(self, label):\n\t\treturn self.labels[label]", "def label_to_name(self, label):\n return self.labels[label]", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]", "def label_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label_name\")", "def name2label(name):\n if name.endswith('_id'):\n name = name[:-3]\n return ' '.join([s.capitalize() for s in\n re.findall(r'([A-Z][a-z0-9]+|[a-z0-9]+|[A-Z0-9]+)', name)])", "def get_label_name(label_id):\n if self._int_to_label == {}:\n print(\"ERROR\")\n print(\"Need to import data first\")\n else:\n label_name = self._int_to_label[label_id]\n\n return label_name", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def fromLabel(name):\n return Data.labels.index(name)", "def get_label(self, index, key=\"Name\"):\n return eval(self.names[key][index])", "def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, label in numbered_classes}\n new_labels = [new_mapping[numbered[0]] for numbered in numbered_classes]\n\n return new_labels, new_mapping", "def label(self):\r\n return self._name", "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def label(self) -> str:\n return self[\"label\"]", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def create_label(self, org, name):\n pass", "def label(self):\n # type: () -> str\n labels = self.__class__.__labels__\n return force_str(labels.get(self.value, self.name))", "def jointNameToLabel(name):\n label = JOINT_LABELS.get(name)\n return label if label else JOINT_LABELS[None]", "def name_to_label(self, name):\n return self.classes[name]", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def labelName(self):\n if self.isRequired:\n return '%s*' % self.name\n return self.name", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def get_labelname(self):\n return self.options['labelname']", "def get_name(self):\n return self._label", "def get_label(urs):\n return assign_term(urs)[1]", "def make_label(self, node):\n\t\tcurstring = str(node.__class__)[13:-2]\n\t\tif isinstance(node, ast.Name):\n\t\t\tcurstring = node.id\n\t\telif isinstance(node, ast.Num):\n\t\t\tcurstring = str(node.n)\n\t\telif isinstance(node, ast.Str):\n\t\t\tcurstring = node.s\n\n\t\tif isinstance(node, ast.Load) or isinstance(node, ast.Store) or \\\n\t\t\tisinstance(node, ast.Param) or isinstance(node, ast.Add) or \\\n\t\t\tisinstance(node, ast.Sub) or isinstance(node, ast.Mult):\n\t\t\treturn None\n\n\t\ttry:\n\t\t\tself.labels[str(node)] = curstring\n\t\t\treturn str(node)\n\t\texcept AttributeError:\n\t\t\treturn None", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return user_name_for( self.name )", "def coco_label_to_name(self, coco_label):\n\t\t\treturn self.label_to_name(self.coco_label_to_label(coco_label))", "def normalisesym(self, label):\n return label", "def _get_label(self):\n return self.label", "def __add_new_label(self, name, value):\n self.__labels_dict[name] = value", "def coco_label_to_name(self, coco_label):\n\t\treturn self.label_to_name(self.coco_label_to_label(coco_label))", "def get_labels(fasta_file):\n\t\tbase_name = basename(fasta_file)\n\t\tname = splitext(base_name)[0]\n\t\tlabel = name.split(\"_\")[-1]\n\t\tassert label == \"pos\" or label == \"hard\", \"AssertionError: label {} not found, possible labels pos, hard.\"\n\t\tif label == \"pos\":\n\t\t\treturn \"Toxin\"\n\t\telif label == \"hard\":\n\t\t\treturn \"No_toxin\"", "def convert_label(self, label, inverse=False):\n temp = label.copy()\n if inverse:\n for v, k in self.label_mapping.items():\n label[temp == k] = v\n else:\n for k, v in self.label_mapping.items():\n label[temp == k] = v\n return label", "def rename_label(self, *args):\n return _ida_hexrays.vdui_t_rename_label(self, *args)", "def LabelToId(label):\n new_name = ''.join(c if c.isalnum() else ' ' for c in label)\n return ''.join(name.capitalize() if name[0].islower() else name\n for name in new_name.split())", "def get_label(cls):\r\n return cls._type_name(cls.label)", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def add_label_to_unique_species_labels(self, label: str) -> str:\n unique_label, i = label, 0\n while unique_label in self.unique_species_labels:\n unique_label = f'{label}_{i}'\n i += 1\n self.unique_species_labels.append(unique_label)\n return unique_label", "def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label", "def get_final_label(addr, context, move_id):\n #if addr == 0x6a7:\n # print(\"FFF\", hex(context), move_id)\n assert trace.cpu.trace_done\n assert memorymanager.is_valid_binary_addr(addr)\n assert memorymanager.is_valid_binary_addr(context)\n assert move_id is None or movemanager.is_valid_move_id(move_id)\n name, move_id = label_maker(addr, context, move_id)\n if is_simple_name(name):\n labelmanager.labels[addr].add_explicit_name(name, move_id)\n\n return name", "def label(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"label\"))\r\n return self._name", "def bb_labelname(hit):\n try:\n real_name = hit.group(1)\n L = Label.objects.get(name=real_name)\n T = loader.get_template('webview/t/label.html')\n C = Context({ 'L' : L })\n return T.render(C)\n except:\n # This will throw if the requested label is spelt incorrectly, or doesnt exist\n return '<img src=\"/static/transmit.png\" alt=\"Invalid Label\" border=\"0\" /> %s' % (real_name)", "def get_label(cls):\n return cls._type_name(cls.label)", "def post_label():\n label_id = dao.set_label(id=str(uuid.uuid4()),\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def get_label(cls) -> str:\n return cls._meta.label_lower.split('.')[-1]", "def getLabelInfo(self, label): # real signature unknown; restored from __doc__\n pass", "def toProperty(label): # noqa: N802\n label = re.sub(r\"[^\\w]\", \" \", label)\n label = re.sub(\"([a-z])([A-Z])\", \"\\\\1 \\\\2\", label)\n label = label.split(\" \")\n return \"\".join([label[0].lower()] + [x.capitalize() for x in label[1:]])", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def label(self, name: str) -> Optional[str]:\n _args = [\n Arg(\"name\", name),\n ]\n _ctx = self._select(\"label\", _args)\n return _ctx.execute_sync(Optional[str])", "def label(self):\r\n raise NotImplementedError", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def label(self) -> str:\r\n\r\n return self.__label", "def MakeI18nLabel(label):\n label = translation.Translated(label)\n\n html = []\n for locale in translation.LOCALES:\n translated_label = label[locale]\n html_class = 'goofy-label-' + locale\n html.append(u'<span class=\"%s\">%s</span>' % (html_class, translated_label))\n return ''.join(html)", "def label_to_class_name(label):\n try:\n genre_label = pd.read_csv(path.join(DATA_PATH, 'genre_labels.csv'))\n return genre_label[genre_label['label'] == int(label)]['genre'].values[\n 0]\n except IOError:\n return label", "def get_label(self):\n return self.label", "def get_label(self):\n return self.label", "def get_label(self):\n return self.label", "def get_label(self):\n return self.label", "def parse_label(label):\n res = {}\n clazz, instance_num, room_type, room_num, area_num = label.split(\"_\")\n res['instance_class'] = clazz\n res['instance_num'] = int(instance_num)\n res['room_type'] = room_type\n res['room_num'] = int(room_num)\n res['area_num'] = int(area_num)\n return res", "def to_label(self):\n return self.label", "def getLabel(*args):", "def getLabel(*args):", "def getLabel(*args):", "def set_label(self, value: str = \"nowhere\"):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"label\"))\r\n self._name = value", "def _collected_label(collect, label):\n if not collect.__name__.startswith('<'):\n return label + ' ' + collect.__name__\n else:\n return label", "def label(self, value):\n\t\tself._label = value", "def rename(self, label_value: int, new_name: str) -> None:\n seginfo = self.infos[label_value]\n seginfo.name = new_name\n # propagate state changes\n self._update_state_from_infos()", "def get_label ( self ):\n return self.label", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label\")", "def label(self):\n return self.label_", "def get_label():\n inp = option_text('Input label name (leave blank for no label):')\n add_to_collected('label', inp)\n OPTIONS['label'] = inp\n return", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def label(self):\n return self.__label", "def label(self):\n return self.__label", "def __str__(self):\n return str(self.label)", "def _create_label(self, label: str, ent_id: Union[str, None]) -> str:\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label", "def value_for(cls, name: str) -> t.Any:\n for key, value in list(cls.__labels__.items()):\n if isinstance(value, NameTitle) and value.name == name:\n return key\n return None", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def label(self) -> str:\n return self._label", "def __str__(self):\n return self.label", "def __str__(self):\n return self.label", "def _set_label(self, input_label):\n self.label = input_label\n return self.label", "def reset_name_labels(infr):\n infr.print('reset_name_labels', 1)\n orig_names = infr.get_node_attrs('orig_name_label')\n infr.set_node_attrs('name_label', orig_names)" ]
[ "0.8264281", "0.82229835", "0.8194375", "0.71729356", "0.70556575", "0.6895756", "0.6878515", "0.6878004", "0.68344384", "0.682798", "0.6825429", "0.6815113", "0.68029517", "0.6751829", "0.67418474", "0.6715094", "0.6705865", "0.6690326", "0.666572", "0.66346323", "0.65997976", "0.6597727", "0.65612996", "0.65612996", "0.65612996", "0.65612996", "0.65612996", "0.65612996", "0.65612996", "0.65612996", "0.65522796", "0.65504885", "0.6513819", "0.6497796", "0.64776504", "0.6470965", "0.64703226", "0.645827", "0.6455205", "0.64523596", "0.64510953", "0.64507824", "0.64169455", "0.6404684", "0.63977146", "0.6391015", "0.63783425", "0.63605475", "0.6359359", "0.6359245", "0.6357012", "0.63556385", "0.634819", "0.63465494", "0.6339717", "0.63385457", "0.63251156", "0.6285196", "0.6280733", "0.6257397", "0.6236135", "0.62353224", "0.6234291", "0.6229208", "0.6229208", "0.6229208", "0.6229208", "0.6223638", "0.6219711", "0.620912", "0.620912", "0.620912", "0.6206265", "0.6195918", "0.61912465", "0.61878127", "0.6186814", "0.61827976", "0.61827976", "0.61827976", "0.61827976", "0.6170576", "0.6166662", "0.61622113", "0.6150328", "0.6150328", "0.61480165", "0.61466885", "0.6145965", "0.61398286", "0.61398286", "0.61398286", "0.61398286", "0.61398286", "0.61398286", "0.61398286", "0.6137979", "0.6137979", "0.6128831", "0.6127514" ]
0.7184514
3
Predict invidiual tree crown bounding boxes for a single image
def predict_image(model, image_path, score_threshold = 0.1, max_detections= 200, return_plot=True): #predict raw_image = cv2.imread(image_path) image = preprocess(raw_image) image, scale = keras_retinanet_image.resize_image(image) if keras.backend.image_data_format() == 'channels_first': image = image.transpose((2, 0, 1)) # run network boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3] # correct boxes for image scale boxes /= scale # select indices which have a score above the threshold indices = np.where(scores[0, :] > score_threshold)[0] # select those scores scores = scores[0][indices] # find the order with which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] # select detections image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) if return_plot: draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name=label_to_name, score_threshold=score_threshold) return raw_image else: return image_boxes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_width\n \n expand_height = (40 - (top - bottom))/2 \n bottom = bottom - expand_height\n top = top + expand_height \n \n src = rasterio.open(rgb_path)\n pixelSizeX, pixelSizeY = src.res \n img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))\n src.close()\n \n #roll to bgr channel order, bgr\n img = np.rollaxis(img, 0,3)\n img = img[:,:,::-1]\n \n #reshape to 400x400m\n print(\"Original shape is {}\".format(img.shape))\n resized = resize(img, 400, 400)\n boxes = deepforest_model.predict_image(numpy_image = resized, return_plot=False)\n \n if boxes.empty:\n return boxes\n \n #tranform boxes to original size\n x_scale = 400/img.shape[0]\n y_scale = 400/img.shape[1]\n \n boxes[\"xmin\"] = boxes[\"xmin\"]/x_scale \n boxes[\"xmax\"] = boxes[\"xmax\"]/x_scale \n boxes[\"ymin\"] = boxes[\"ymin\"]/y_scale \n boxes[\"ymax\"] = boxes[\"ymax\"]/y_scale \n\n #subtract origin. Recall that numpy origin is top left! Not bottom left.\n boxes[\"xmin\"] = (boxes[\"xmin\"] *pixelSizeX) + left\n boxes[\"xmax\"] = (boxes[\"xmax\"] * pixelSizeX) + left\n boxes[\"ymin\"] = top - (boxes[\"ymin\"] * pixelSizeY) \n boxes[\"ymax\"] = top - (boxes[\"ymax\"] * pixelSizeY)\n\n # combine column to a shapely Box() object, save shapefile\n boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)\n boxes = gpd.GeoDataFrame(boxes, geometry='geometry') \n \n #Give an id field\n boxes[\"box_id\"] = np.arange(boxes.shape[0])\n \n return boxes", "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_width\n \n expand_height = (40 - (top - bottom))/2 \n bottom = bottom - expand_height\n top = top + expand_height \n \n src = rasterio.open(rgb_path)\n pixelSizeX, pixelSizeY = src.res \n img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))\n src.close()\n \n #roll to bgr channel order, bgr\n img = np.rollaxis(img, 0,3)\n img = img[:,:,::-1]\n \n #reshape to 400x400m\n print(\"Original shape is {}\".format(img.shape))\n resized = resize(img, 400, 400)\n boxes = deepforest_model.predict_image(numpy_image = resized, return_plot=False)\n \n if boxes.empty:\n return boxes\n \n #tranform boxes to original size\n x_scale = 400/img.shape[0]\n y_scale = 400/img.shape[1]\n \n boxes[\"xmin\"] = boxes[\"xmin\"]/x_scale \n boxes[\"xmax\"] = boxes[\"xmax\"]/x_scale \n boxes[\"ymin\"] = boxes[\"ymin\"]/y_scale \n boxes[\"ymax\"] = boxes[\"ymax\"]/y_scale \n\n #subtract origin. Recall that numpy origin is top left! Not bottom left.\n boxes[\"xmin\"] = (boxes[\"xmin\"] *pixelSizeX) + left\n boxes[\"xmax\"] = (boxes[\"xmax\"] * pixelSizeX) + left\n boxes[\"ymin\"] = top - (boxes[\"ymin\"] * pixelSizeY) \n boxes[\"ymax\"] = top - (boxes[\"ymax\"] * pixelSizeY)\n\n # combine column to a shapely Box() object, save shapefile\n boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)\n boxes = gpd.GeoDataFrame(boxes, geometry='geometry') \n \n #Give an id field\n boxes[\"box_id\"] = np.arange(boxes.shape[0])\n \n return boxes", "def predict_image(self, image):\n image = preprocess_image(image, self.image_h, self.image_w)\n boxes = super().predict_image(image)\n return boxes", "def get_bounding_boxes(frame):\n\n blob = cv2.dnn.blobFromImage(frame,1/255,(320,320),(0,0,0),1,crop=False)\n net.setInput(blob)\n\n output_layer_names = net.getUnconnectedOutLayersNames()\n layer_outputs = net.forward(output_layer_names)\n\n all_boxes, confidences = get_all_boxes(layer_outputs)\n\n indexes=cv2.dnn.NMSBoxes(all_boxes,confidences,0.5,0.3)\n\n return indexes, confidences, all_boxes", "def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):\n\n inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)\n\n \n\n if cfg.DEDUP_BOXES > 0:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)\n \n _, index, inv_index = np.unique(hashes, return_index=True, return_inverse=True)\n\n inputs['rois'] = inputs['rois'][index, :]\n boxes = boxes[index, :]\n\n data = torch.from_numpy(inputs['data']).cuda()\n rois = torch.from_numpy(inputs['rois']).cuda()\n labels = torch.from_numpy(inputs['labels']).cuda()\n\n return_dict = model(data,rois,labels)\n\n\n if 'final_scores' in return_dict:\n # print(\"pickle rick\")\n \n\n scores = return_dict['final_scores'].cpu().numpy().squeeze()\n scores = scores.reshape([-1, scores.shape[-1]])\n pred_boxes = boxes\n\n if cfg.DEDUP_BOXES > 0:\n # Map scores and predictions back to the original set of boxes\n scores = scores[inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n\n else:\n scores = None\n pred_boxes = None\n im_scale = None\n\n return scores, pred_boxes, im_scale", "def predict_trees(deepforest_model, rgb_path, bounds, expand=10):\n #DeepForest is trained on 400m crops, easiest to mantain this approximate size centered on points\n left, bottom, right, top = bounds\n expand_width = (40 - (right - left))/2\n left = left - expand_width\n right = right + expand_width\n \n expand_height = (40 - (top - bottom))/2 \n bottom = bottom - expand_height\n top = top + expand_height \n \n src = rasterio.open(rgb_path)\n pixelSizeX, pixelSizeY = src.res \n img = src.read(window=rasterio.windows.from_bounds(left, bottom, right, top, transform=src.transform))\n \n #roll to bgr channel order, bgr\n img = np.rollaxis(img, 0,3)\n img = img[:,:,::-1]\n \n boxes = deepforest_model.predict_image(numpy_image = img, return_plot=False)\n\n #subtract origin. Recall that numpy origin is top left! Not bottom left.\n boxes[\"xmin\"] = (boxes[\"xmin\"] *pixelSizeX) + left\n boxes[\"xmax\"] = (boxes[\"xmax\"] * pixelSizeX) + left\n boxes[\"ymin\"] = top - (boxes[\"ymin\"] * pixelSizeY) \n boxes[\"ymax\"] = top - (boxes[\"ymax\"] * pixelSizeY)\n\n # combine column to a shapely Box() object, save shapefile\n boxes['geometry'] = boxes.apply(lambda x: shapely.geometry.box(x.xmin,x.ymin,x.xmax,x.ymax), axis=1)\n boxes = gpd.GeoDataFrame(boxes, geometry='geometry') \n \n #Buffer slightly \n boxes.geometry = boxes.geometry.buffer(1)\n return boxes", "def _get_bounding_boxes(self, imgs, summed_viz, threshold_value=.7):\n self.viz = summed_viz # for debug\n viz = summed_viz\n n_batchs = viz.shape[ 0]\n n_classes = viz.shape[-1]\n \n # viz.shape (100,14,14,20) => (14,14,100,20)\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Normalize <viz>, image per image (to be in range [-1,1])\n viz = viz / np.max(np.abs(viz), axis=(0,1))\n viz = (viz+1)/2 # range[0,1]\n \n # Resize each summed_viz to its original size (size of input image)\n if viz.shape[:2] != imgs.shape[1:3]:\n viz = np.array(\n [ skimage.transform.resize(viz[:,:,idx], imgs[idx].shape[:2])\n for idx in range(len(imgs))\n if viz.shape[0] != imgs.shape[1]\n ] )\n viz = viz.swapaxes(0,2); viz = viz.swapaxes(0,1)\n \n # Threshold <viz>s to keep values over 70% of its max values\n m_max = threshold_value * viz.max(axis=(0,1))\n viz = viz * (m_max < viz)\n \n # We want a 2d boundind box, so project threshold in xs and ys\n xxs = viz.sum(axis=0)\n yys = viz.sum(axis=1)\n \n # Get some non-thresholded values (left, top... of bounding boxes)\n get_lefts = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][ 0]\n get_tops = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][-1]\n get_rights = lambda b_id, c_idx: xxs[:,b_id,c_idx].nonzero()[0][-1]\n get_bottoms = lambda b_id, c_idx: yys[:,b_id,c_idx].nonzero()[0][ 0]\n\n # Debug\n # def get_lefts (b_id, c_idx): \n # print xxs[:,b_id,c_idx].nonzero()\n # xxs[:,b_id,c_idx].nonzero()[0][ 0]\n \n # Build the 2d array with first or lasts positions of zeros\n # INNER FUNCTION\n def _get_border_array(f_border=get_lefts):\n return np.array(\n [ map(f_border, [b_idx]*n_classes, range(n_classes))\n for b_idx in range(n_batchs) ]\n )\n \n lefts = _get_border_array(get_lefts)\n tops = _get_border_array(get_tops)\n rights = _get_border_array(get_rights)\n bottoms = _get_border_array(get_bottoms)\n \n return lefts, tops, rights, bottoms", "def postprocess_boxes(pred_bbox, original_image, train_input_size, score_threshold):\n \n # valid scle for box\n valid_scale=[0, np.inf]\n \n # turn bbox to array\n pred_bbox = np.array(pred_bbox)\n \n # obtain predicted x, y, w, h, objectiveness score, class probabilities\n pred_xywh = pred_bbox[:, 0:4]\n pred_objectiveness = pred_bbox[:, 4]\n pred_prob = pred_bbox[:, 5:]\n \n # 1. (x, y, w, h) --> (x_org, y_org, w_org, h_org)\n # obtain original image width and height\n org_h, org_w = original_image.shape[:2]\n \n # obtain resize ratio for height and width \n resize_ratio_h = train_input_size / org_h\n resize_ratio_w = train_input_size / org_w\n \n # scale x, y, w, h to original x, y, w, h\n pred_coor = np.concatenate([np.expand_dims(pred_xywh[:, 0] / resize_ratio_w, axis = -1), \n np.expand_dims(pred_xywh[:, 1] / resize_ratio_h, axis = -1),\n np.expand_dims(pred_xywh[:, 2] / resize_ratio_w, axis = -1),\n np.expand_dims(pred_xywh[:, 3] / resize_ratio_h, axis = -1),], axis = -1)\n \n # 2. (x_org, y_org, w_org, h_org) --> (xmin_org, ymin_org, xmax_org, ymax_org)\n # obtain diagonal image coordinates\n pred_coor = np.concatenate([pred_coor[:, :2] - pred_coor[:, 2:] * 0.5,\n pred_coor[:, :2] + pred_coor[:, 2:] * 0.5], axis = -1)\n\n # 3. clip some boxes those are out of range\n # clip bboxes where xmin_org, ymin_org < 0 and xmax_org, ymax_org out of bounds\n pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),\n np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis = -1)\n \n # mask that ensure that if xmin < xmax, ymin /> ymax and vice versa\n invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))\n pred_coor[invalid_mask] = 0\n\n # 4. discard some invalid boxes\n bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis = -1))\n scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))\n\n # 5. discard boxes with low scores\n # obtain index of class with max prob for each bbox\n classes = np.argmax(pred_prob, axis = -1)\n \n # multiply max prob with objectivness score for each bbox\n scores = pred_objectiveness * pred_prob[np.arange(len(pred_coor)), classes]\n \n # obtain score mask based on score threshold\n score_mask = scores > score_threshold\n \n # obtain combined mask\n mask = np.logical_and(scale_mask, score_mask)\n \n # obtain coordinates, scores and classes after mask\n coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]\n \n # return concatenated results \n return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis = -1)", "def _recover_boundingboxes(features):\n ymin = features['image/object/bbox/ymin'].values\n xmin = features['image/object/bbox/xmin'].values\n ymax = features['image/object/bbox/ymax'].values\n xmax = features['image/object/bbox/xmax'].values\n return tf.transpose([ymin, xmin, ymax, xmax])", "def im_detect_bbox(model, im, boxes=None):\n inputs, im_scales = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(\n hashes, return_index=True, return_inverse=True\n )\n inputs['rois'] = inputs['rois'][index, :]\n boxes = boxes[index, :]\n\n # Add multi-level rois for FPN\n if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:\n _add_multilevel_rois_for_test(inputs, 'rois')\n\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v)\n workspace.RunNet(model.net.Proto().name)\n\n # Read out blobs\n if cfg.MODEL.FASTER_RCNN:\n assert len(im_scales) == 1, \\\n 'Only single-image / single-scale batch implemented'\n rois = workspace.FetchBlob(core.ScopedName('rois'))\n # unscale back to raw image space\n boxes = rois[:, 1:5] / im_scales[0]\n\n # Softmax class probabilities\n scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze()\n # In case there is 1 proposal\n scores = scores.reshape([-1, scores.shape[-1]])\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze()\n # In case there is 1 proposal\n box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])\n if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:\n # Remove predictions for bg class (compat with MSRA code)\n box_deltas = box_deltas[:, -4:]\n pred_boxes = box_utils.bbox_transform(\n boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS\n )\n pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)\n if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:\n pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:\n # Map scores and predictions back to the original set of boxes\n scores = scores[inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n\n return scores, pred_boxes, im_scales", "def classify_image(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return [np.argmax(model.predict(np.array(images_list)))]", "def im_detections(model, im, anchors):\n k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL\n A = cfg.RETINANET.SCALES_PER_OCTAVE * len(cfg.RETINANET.ASPECT_RATIOS)\n inputs = {}\n inputs['data'], inputs['im_info'] = _get_image_blob(im)\n cls_probs, box_preds = [], []\n for lvl in range(k_min, k_max + 1):\n suffix = 'fpn{}'.format(lvl)\n cls_probs.append(core.ScopedName('retnet_cls_prob_{}'.format(suffix)))\n box_preds.append(core.ScopedName('retnet_bbox_pred_{}'.format(suffix)))\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False))\n\n workspace.RunNet(model.net.Proto().name)\n scale = inputs['im_info'][0, 2]\n cls_probs = workspace.FetchBlobs(cls_probs)\n box_preds = workspace.FetchBlobs(box_preds)\n\n # here the boxes_all are [x0, y0, x1, y1, score]\n boxes_all = defaultdict(list)\n\n cnt = 0\n for lvl in range(k_min, k_max + 1):\n # create cell anchors array\n stride = 2. ** lvl\n cell_anchors = anchors[lvl]\n\n # fetch per level probability\n cls_prob = cls_probs[cnt]\n box_pred = box_preds[cnt]\n cls_prob = cls_prob.reshape((\n cls_prob.shape[0], A, int(cls_prob.shape[1] / A),\n cls_prob.shape[2], cls_prob.shape[3]))\n box_pred = box_pred.reshape((\n box_pred.shape[0], A, 4, box_pred.shape[2], box_pred.shape[3]))\n cnt += 1\n\n if cfg.RETINANET.SOFTMAX:\n cls_prob = cls_prob[:, :, 1::, :, :]\n\n cls_prob_ravel = cls_prob.ravel()\n # In some cases [especially for very small img sizes], it's possible that\n # candidate_ind is empty if we impose threshold 0.05 at all levels. This\n # will lead to errors since no detections are found for this image. Hence,\n # for lvl 7 which has small spatial resolution, we take the threshold 0.0\n th = cfg.RETINANET.INFERENCE_TH if lvl < k_max else 0.0\n candidate_inds = np.where(cls_prob_ravel > th)[0]\n if (len(candidate_inds) == 0):\n continue\n\n pre_nms_topn = min(cfg.RETINANET.PRE_NMS_TOP_N, len(candidate_inds))\n inds = np.argpartition(\n cls_prob_ravel[candidate_inds], -pre_nms_topn)[-pre_nms_topn:]\n inds = candidate_inds[inds]\n\n inds_5d = np.array(np.unravel_index(inds, cls_prob.shape)).transpose()\n classes = inds_5d[:, 2]\n anchor_ids, y, x = inds_5d[:, 1], inds_5d[:, 3], inds_5d[:, 4]\n scores = cls_prob[:, anchor_ids, classes, y, x]\n\n boxes = np.column_stack((x, y, x, y)).astype(dtype=np.float32)\n boxes *= stride\n boxes += cell_anchors[anchor_ids, :]\n\n if not cfg.RETINANET.CLASS_SPECIFIC_BBOX:\n box_deltas = box_pred[0, anchor_ids, :, y, x]\n else:\n box_cls_inds = classes * 4\n box_deltas = np.vstack(\n [box_pred[0, ind:ind + 4, yi, xi]\n for ind, yi, xi in zip(box_cls_inds, y, x)]\n )\n pred_boxes = (\n box_utils.bbox_transform(boxes, box_deltas)\n if cfg.TEST.BBOX_REG else boxes)\n pred_boxes /= scale\n pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)\n box_scores = np.zeros((pred_boxes.shape[0], 5))\n box_scores[:, 0:4] = pred_boxes\n box_scores[:, 4] = scores\n\n for cls in range(1, cfg.MODEL.NUM_CLASSES):\n inds = np.where(classes == cls - 1)[0]\n if len(inds) > 0:\n boxes_all[cls].extend(box_scores[inds, :])\n\n # Combine predictions across all levels and retain the top scoring by class\n detections = []\n for cls, boxes in boxes_all.items():\n cls_dets = np.vstack(boxes).astype(dtype=np.float32)\n # do class specific nms here\n keep = box_utils.nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep, :]\n out = np.zeros((len(keep), 6))\n out[:, 0:5] = cls_dets\n out[:, 5].fill(cls)\n detections.append(out)\n\n detections = np.vstack(detections)\n # sort all again\n inds = np.argsort(-detections[:, 4])\n detections = detections[inds[0:cfg.TEST.DETECTIONS_PER_IM], :]\n boxes = detections[:, 0:4]\n scores = detections[:, 4]\n classes = detections[:, 5]\n return boxes, scores, classes", "def findCluster(path, bound_img_path):\r\n image = cv2.imread(path, cv2.IMREAD_COLOR)\r\n img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n blur = cv2.medianBlur(img, 3)\r\n thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\r\n\r\n # Creating a structuring element to perform morphological operations\r\n k_size = (3, 3)\r\n kernelMorph = cv2.getStructuringElement(cv2.MORPH_RECT, k_size)\r\n\r\n # Performing opening on the image\r\n morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernelMorph, iterations=1)\r\n\r\n # Performing dilation on the image\r\n ksizeKernelDilate = (50, 1)\r\n dilated = cv2.dilate(morph, ksizeKernelDilate, iterations= 5)\r\n\r\n # Identifying contours in the image\r\n cnts = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\r\n clr1, clr2, clr3 = 225, 255, 0,\r\n b = 10\r\n\r\n bounding_box_created = False\r\n if len(cnts) != 0:\r\n contour_rect_box = []\r\n conimage = []\r\n for c in cnts:\r\n # Identifying bounding box dimensions for each contour\r\n x, y, w, h = cv2.boundingRect(c)\r\n # Enclosing the contours in a rectangle\r\n if (w > 10 and h > 10 and w < img.shape[0] and h < img.shape[1]):\r\n conimage = cv2.rectangle(image, (x - b, y - b), (x - b + w + 2 * b, y - b + h + 2 * b),\r\n (clr1, clr2, clr3), -1)\r\n contour_rect_box.append((x, y, x + w, y + h, w, h))\r\n\r\n # Check whether the bounding box is created or not\r\n # If bounding box is created, then update the image with the identified bounding box,\r\n # Else print no clusters have been identified\r\n if len(conimage) > 0:\r\n cv2.imwrite(bound_img_path, conimage)\r\n bounding_box_created = True\r\n else:\r\n print(\"No clusters identified\")\r\n return cnts, bounding_box_created", "def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image", "def draw_boundingbox(image, infer_output, image_width, image_height, conf_thresh):\n\n out_image = image.copy()\n logger.debug(' - input image: [width] %d, [height] %d' % (image.shape[1], image.shape[0]))\n\n def check_valid_range(val, max_val):\n \"\"\" check the coordinate of bbox is inside of an image\"\"\"\n if val < 0:\n val = 0\n elif val > max_val:\n val = max_val\n else:\n pass\n return val\n\n valid_obj_num = 0\n valid_obj_bbox = []\n\n for obj_info in infer_output:\n conf = obj_info['conf']\n # filter by the confidence\n if conf >= conf_thresh:\n # calculate bbox coordinate\n xmin = int(obj_info['x_min'] * image_width)\n ymin = int(obj_info['y_min'] * image_height)\n xmax = int(obj_info['x_max'] * image_width)\n ymax = int(obj_info['y_max'] * image_height)\n\n # round up into valid range\n xmin = check_valid_range(xmin, image_width)\n ymin = check_valid_range(ymin, image_height)\n xmax = check_valid_range(xmax, image_width)\n ymax = check_valid_range(ymax, image_height)\n\n # draw bbox\n cv2.rectangle(out_image, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)\n\n valid_obj_num += 1\n valid_obj_bbox.append((xmin, ymin, xmax, ymax))\n logger.debug(' - draw bbox [%d, %d, %d, %d] confidence: %f' % (xmin,ymin,xmax,ymax,conf))\n\n return out_image, valid_obj_num", "def im_detect(net, im, boxes):\n blobs, unused_im_scale_factors = _get_blobs(im, boxes)\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n for i in range(len(blobs['data'])):\n if cfg.DEDUP_BOXES > 0:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'][i] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'][i] = blobs['rois'][i][index, :]\n boxes_tmp = boxes[index, :].copy()\n else:\n boxes_tmp = boxes.copy()\n t_data = blobs['data'][i].astype(np.float32, copy=False)\n #t_data = t_data.reshape((1, t_data.shape[0], t_data.shape[1], t_data.shape[2], t_data.shape[3]))\n data_height, data_width = t_data.shape[1], t_data.shape[2]\n im_data = torch.FloatTensor(t_data).cuda()\n im_data = im_data.permute(0, 3, 1, 2).contiguous() #.view(3, data_height, data_width)\n LIM = 2000 # split ROIs due to memory issue\n if cfg.TEST.USE_FLIPPED :\n blobs['data'][i] = blobs['data'][i][:, :, ::-1, :]\n width = blobs['data'][i].shape[2]\n t_data = blobs['data'][i].astype(np.float32, copy=False)\n data_height, data_width = t_data.shape[1], t_data.shape[2]\n #im_data = torch.FloatTensor(t_data).cuda()\n im_data_flip = torch.from_numpy(t_data.copy()).cuda()\n im_data_flip = im_data_flip.permute(0, 3, 1, 2).contiguous()#.view(3, data_height, data_width)\n #im_data = im_data[...,::-1]\n for j in range (int(np.ceil(blobs['rois'][i].shape[0] / LIM))) :\n t_rois = blobs['rois'][i][j*LIM:(j+1)*LIM].astype(np.float32, copy=False)\n im_rois = torch.FloatTensor(t_rois).cuda()\n ic_prob, ic_prob1, ic_prob2 = net(im_data, im_rois)\n scores_tmp = ic_prob + ic_prob1 + ic_prob2\n pred_boxes_small = np.tile(boxes_tmp[j*LIM : (j+1)*LIM], (1, scores_tmp.shape[2]))\n\n if cfg.TEST.USE_FLIPPED:\n #pdb.set_trace()\n oldx1 = blobs['rois'][i][j*LIM:(j+1)*LIM, 1].copy()\n oldx2 = blobs['rois'][i][j*LIM:(j+1)*LIM, 3].copy()\n blobs['rois'][i][j*LIM:(j+1)*LIM, 1] = width - oldx2 - 1\n blobs['rois'][i][j*LIM:(j+1)*LIM, 3] = width - oldx1 - 1\n assert (blobs['rois'][i][j*LIM:(j+1)*LIM, 3] >= blobs['rois'][i][j*LIM:(j+1)*LIM, 1]).all()\n t_rois = blobs['rois'][i][j*LIM:(j+1)*LIM].astype(np.float32, copy=False)\n im_rois = torch.FloatTensor(t_rois).cuda()\n ic_prob, ic_prob1, ic_prob2 = net(im_data_flip, im_rois)\n scores_tmp += ic_prob + ic_prob1 + ic_prob2\n del im_rois\n\n if j is 0 :\n scores_tmp_real = scores_tmp\n pred_boxes = pred_boxes_small\n else :\n scores_tmp_real = torch.cat((scores_tmp_real, scores_tmp), dim=1)\n pred_boxes = np.vstack((pred_boxes, pred_boxes_small))\n\n\n if cfg.DEDUP_BOXES > 0:\n # Map scores and predictions back to the original set of boxes\n scores_tmp = scores_tmp_real[:,inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n \n if i == 0: \n scores = np.copy(scores_tmp.data).squeeze()\n if len(scores.shape) == 1 :\n scores = scores[np.newaxis, :]\n else:\n scores += scores_tmp[0].data\n\n scores /= len(blobs['data']) * (1. + cfg.TEST.USE_FLIPPED)\n return scores[:,1:], pred_boxes[:, 4:]", "def classify_image_probavec(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return model.predict(np.array(images_list))", "def __init_rect_list(self, ind, min_prob = 0.5):\n #bbox_label_pred = self.net.tops['bbox_label'].data[ind]\n #binary_pred = self.net.tops['binary_label'].data[ind]\n bottom_height = self.image_height\n bottom_width = self.image_width\n bbox_label_pred = self.net.tops['bbox_pred'].data[ind]\n binary_pred = self.net.tops['binary_softmax'].data[ind]\n label_pred = self.net.tops['label_softmax'].data[ind]\n \n (_, top_height, top_width) = bbox_label_pred.shape\n y_mul = bottom_height * 1. / top_height\n x_mul = bottom_width * 1. / top_width\n rect_list = []\n for y in xrange(top_height):\n for x in xrange(top_width):\n # corresponds to indices in original image\n cx_orig = x_mul * (x + 0.5)\n cy_orig = y_mul * (y + 0.5)\n\n # we predict a symbol here if p(no label) < x\n if binary_pred[0, y, x] < 0.5:\n k = np.argmax(label_pred[:, y, x]) \n #if label_pred[k, y, x] < 0.2: continue\n\n # apply offsets to get positions in original image\n cx = cx_orig + bbox_label_pred[0, y, x]\n cy = cy_orig + bbox_label_pred[1, y, x]\n w = bbox_label_pred[2, y, x]\n h = bbox_label_pred[3, y, x]\n xmin = cx - w / 2.0\n ymin = cy - h / 2.0\n rect = Rect(xmin, ymin, xmin + w, ymin + h, label=k, prob=label_pred[k, y, x])\n rect_list.append(rect)\n\n return rect_list", "def classify_image_proba(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return [np.amax(model.predict(np.array(images_list)))]", "def preprocessing(image_data, max_height, max_width):\n img = image_data[\"image\"]\n img = resize_image(img, max_height, max_width)\n gt_boxes = image_data[\"objects\"][\"bbox\"]\n gt_labels = image_data[\"objects\"][\"label\"]\n return img, gt_boxes, gt_labels", "def get_path_bounding_box(self, image) -> BoundingBox:\n return NNManager.get_yolo_model(\"path\").predict(image)", "def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.5, top_k=-1):\n boxes = boxes[0]\n confidences = confidences[0]\n picked_box_probs = []\n picked_labels = []\n for class_index in range(1, confidences.shape[1]):\n probs = confidences[:, class_index]\n mask = probs > prob_threshold\n probs = probs[mask]\n if probs.shape[0] == 0:\n continue\n subset_boxes = boxes[mask, :]\n box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)\n box_probs = hard_nms(box_probs,\n iou_threshold=iou_threshold,\n top_k=top_k,\n )\n picked_box_probs.append(box_probs)\n picked_labels.extend([class_index] * box_probs.shape[0])\n if not picked_box_probs:\n return np.array([]), np.array([]), np.array([])\n picked_box_probs = np.concatenate(picked_box_probs)\n picked_box_probs[:, 0] *= width\n picked_box_probs[:, 1] *= height\n picked_box_probs[:, 2] *= width\n picked_box_probs[:, 3] *= height\n if picked_box_probs[0][4] > 0.9:\n return picked_box_probs[:, :4].astype(np.int32)\n else:\n return [[]]", "def post_process(probability,threshold,min_size):\n rects = []\n mask = cv2.threshold(probability,threshold,1,cv2.THRESH_BINARY)[1]\n num_component,component = cv2.connectedComponents(mask.astype(np.uint8))\n predictions = np.zeros((350,525),np.float32)\n num = 0\n for c in range(1,num_component):\n p = (component == c)\n print(\"p.sum(): {}\".format(p.sum()))\n if p.sum() > min_size:\n predictions[p] = 1\n num += 1\n if num > 0:\n mask_p = predictions.copy()\n contours,hierarchy = cv2.findContours(mask_p.astype(np.uint8),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(contours,key=cv2.contourArea,reverse=True)[:num]\n for c in cnts:\n x,y,w,h = cv2.boundingRect(c)\n rects.append((x,y,w,h))\n print('rect {}'.format((x,y,w,h)))\n return predictions,num,rects", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def classifier():\n\tprint(\"Classifying\")\n\t#initialize important variables\n\tminConfidence = 0.5\n\tthresholdValue = 0.3\n\t\n\t\"\"\"\n\tfile = request.files#['image']\n\tfile.save(\"./classifier_image.jpg\")\n\tframe = cv2.imread(\"./classifier_image.jpg\")\n\t\"\"\"\n\tfile = request.json\n\tframe = np.array(file[\"Frame\"], dtype = \"uint8\") \n\n\t#file = request.files['image']\n\t#file.save(\"./classifier_image.jpg\")\n\t#frame = cv2.imread(\"./classifier_image.jpg\")\n\t#file = request.json\n\t#frame = np.array(file[\"contour\"], dtype=\"uint8\")\n\t\n\t#Get Image dimensions\n\timage = cv2.copyMakeBorder(frame, 30, 30, 30, 30, cv2.BORDER_CONSTANT, value=255)\n\t(H, W) = image.shape[:2]\n\t\n\t#Get the output layers parameters\n\tln = net.getLayerNames()\n\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\t\n\t#Create a blob to do a forward pass\n\tblob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\tnet.setInput(blob)\n\t#print(H, W)\n\tlayerOutputs = net.forward(ln)\n\tprint(type(net))\n\tboxes = []\n\tconfidences = []\n\tclassIDs = []\n\tfor output in layerOutputs:\n\t\tprint(\"detecting\")\n\t\t#loop over each detection\n\t\tfor detection in output:\n\t\t\t# extract the class ID and confidence (i.e., probability) of\n\t\t\t# the current object detection\n\t\t\tscores = detection[5:]\n\t\t\tclassID = np.argmax(scores)\n\t\t\tconfidence = scores[classID]\n\n\t\t\t# filter out weak predictions by ensuring the detected\n\t\t\t# probability is greater than the minimum probability\n\t\t\tif confidence > minConfidence:\n\t\t\t\t# scale the bounding box coordinates back relative to the\n\t\t\t\t# size of the image, keeping in mind that YOLO actually\n\t\t\t\t# returns the center (x, y)-coordinates of the bounding\n\t\t\t\t# box followed by the boxes' width and height\n\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t# use the center (x, y)-coordinates to derive the top and\n\t\t\t\t# and left corner of the bounding box\n\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t# update our list of bounding box coordinates, confidences,\n\t\t\t\t# and class IDs\n\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\tclassIDs.append(classID)\n\n\t# apply non-maxima suppression to suppress weak, overlapping bounding\n\t# boxes\n\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, minConfidence, thresholdValue)\n\n\t# ensure at least one detection exists\n\tif len(idxs) > 0:\n\t\toutput = json.load(open(outputFile))\n\t\t# loop over the indexes we are keeping\n\t\tfor i in idxs.flatten():\n\t\t\t# extract the bounding box coordinates\n\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\tprint(LABELS[classIDs[i]], output[LABELS[classIDs[i]]]+1, confidences[i])\n\t\t\toutput[LABELS[classIDs[i]]]+=1\n\t\t\n\t\tjson.dump(output, open(outputFile, \"w\"))\n\t\treturn LABELS[classIDs[i]]\n\telse:\n\t\treturn Response(status=200)", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def classify(self):\n infer = self.model.signatures['serving_default']\n for i, original_image in enumerate(self.images):\n image = original_image.copy()\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image = cv.resize(image, (self.image_size, self.image_size))\n image = image / 255.\n\n image = [image]\n image = np.asarray(image).astype(np.float32)\n batch_data = tf.constant(image)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=10,\n max_total_size=10,\n iou_threshold=FLAGS.iou,\n score_threshold=FLAGS.score\n )\n\n height, width, _ = original_image.shape\n\n print(scores)\n classes = classes[0]\n print(classes)\n\n bbox = boxes[0][0].numpy()\n bbox[0] = int(bbox[0] * height)\n bbox[2] = int(bbox[2] * height)\n bbox[1] = int(bbox[1] * width)\n bbox[3] = int(bbox[3] * width)\n\n if BIRD_CLASS in classes:\n idx = np.where(classes == BIRD_CLASS)\n bbox = bbox.astype(np.int)\n x = int((bbox[1] + bbox[3]) / 2)\n y = int((bbox[0] + bbox[2]) / 2)\n self.thumbnail_center.append((x, y))\n cropped_img = original_image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n self.bird_images.append(cropped_img)\n self.confidence_arr.append(scores[idx[0][0]][0])\n\n self.generate_thumbnail(size=150)", "def postprocessing(bbox, image_path, side=416, threshold=0.3): # pylint: disable=R0914\n\n bounds = bbox[:, 0: 4]\n confidence = bbox[:, 4]\n probability = bbox[:, 5:]\n\n image = Image.open(image_path)\n width, height = image.size\n scale = side / max([width, height])\n width_scaled = int(width * scale)\n height_scaled = int(height * scale)\n width_offset = (side - width_scaled) // 2\n height_offset = (side - height_scaled) // 2\n bounds[:, (0, 2)] = (bounds[:, (0, 2)] - width_offset) / scale\n bounds[:, [1, 3]] = (bounds[:, [1, 3]] - height_offset) / scale\n bounds = bounds.astype(np.int32)\n\n bounds[np.where(bounds < 0)] = 0\n bounds[np.where(bounds[:, 2] > width), 2] = width - 1\n bounds[np.where(bounds[:, 3] > height), 3] = height - 1\n mask = np.ones(bounds.shape, dtype=bool)\n mask[:, 2] = (bounds[:, 2] - bounds[:, 0]) > 0\n mask[:, 3] = (bounds[:, 3] - bounds[:, 1]) > 0\n mask = np.logical_and.reduce(mask, axis=1)\n classes = np.argmax(probability, axis=1)\n scores = confidence * probability[np.arange(classes.size), classes]\n mask = mask & (scores > threshold)\n bounds = bounds[mask]\n classes = classes[mask]\n scores = scores[mask]\n return nms(bounds, classes, scores)", "def im_detect(sess, net, im, boxes=None):\n\n blobs, im_scales = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n\n if cfg.TEST.HAS_RPN:\n im_blob = blobs['data']\n blobs['im_info'] = np.array(\n [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],\n dtype=np.float32)\n # forward pass\n if cfg.TEST.HAS_RPN:\n feed_dict={net.data: blobs['data'], net.im_info: blobs['im_info'], net.keep_prob: 1.0}\n else:\n feed_dict={net.data: blobs['data'], net.rois: blobs['rois'], net.keep_prob: 1.0}\n\n run_options = None\n run_metadata = None\n if cfg.TEST.DEBUG_TIMELINE:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n cls_score, cls_prob, bbox_pred, rois = sess.run([net.get_output('cls_score'), net.get_output('cls_prob'), net.get_output('bbox_pred'),net.get_output('rois')],\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata)\n\n if cfg.TEST.HAS_RPN:\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n boxes = rois[:, 1:5] / im_scales[0]\n\n\n if cfg.TEST.SVM:\n # use the raw scores before softmax under the assumption they\n # were trained as linear SVMs\n scores = cls_score\n else:\n # use softmax estimated probabilities\n scores = cls_prob\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred\n pred_boxes = bbox_transform_inv(boxes, box_deltas)\n pred_boxes = _clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.TEST.DEBUG_TIMELINE:\n trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n trace_file = open(str(long(time.time() * 1000)) + '-test-timeline.ctf.json', 'w')\n trace_file.write(trace.generate_chrome_trace_format(show_memory=False))\n trace_file.close()\n\n return scores, pred_boxes", "def get_mask_bbox_and_score(yolact_net: Yolact, img, threshold=0.0, max_predictions=1):\n with torch.no_grad():\n frame = torch.from_numpy(img).cuda().float()\n batch = FastBaseTransform()(frame.unsqueeze(0))\n preds = yolact_net(batch)\n\n h, w, _ = img.shape\n\n save = cfg.rescore_bbox\n cfg.rescore_bbox = True\n t = postprocess(preds, w, h, visualize_lincomb=False, crop_masks=True)\n cfg.rescore_bbox = save\n\n idx = t[1].argsort(0, descending=True)[:max_predictions]\n classes, scores, boxes, masks = [x[idx].cpu().numpy() for x in t[:]]\n\n num_dets_to_consider = min(max_predictions, classes.shape[0])\n # Remove detections below the threshold\n for j in range(num_dets_to_consider):\n if scores[j] < threshold:\n num_dets_to_consider = j\n break\n masks_to_return = boxes_to_return = scores_to_return = None\n if num_dets_to_consider > 0:\n masks = masks[:num_dets_to_consider, :, :, None]\n masks_to_return = []\n boxes_to_return = []\n scores_to_return = []\n for m, b, s in zip(masks, boxes, scores):\n masks_to_return.append(m)\n boxes_to_return.append(b)\n scores_to_return.append(s)\n if len(masks_to_return) == 1:\n masks_to_return = masks_to_return[0]\n if len(boxes_to_return) == 1:\n boxes_to_return = boxes_to_return[0]\n if len(scores_to_return) == 1:\n scores_to_return = scores_to_return[0]\n return masks_to_return, boxes_to_return, scores_to_return", "def _check_load_bbox(self, coco, entry):\n ann_ids = coco.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = coco.loadAnns(ann_ids)\n # check valid bboxes\n valid_objs = []\n valid_segs = []\n width = entry['width']\n height = entry['height']\n for obj in objs:\n if obj.get('ignore', 0) == 1:\n continue\n # crowd objs cannot be used for segmentation\n if obj.get('iscrowd', 0) == 1:\n continue\n # need accurate floating point box representation\n x1, y1, w, h = obj['bbox']\n x2, y2 = x1 + np.maximum(0, w), y1 + np.maximum(0, h)\n # clip to image boundary\n x1 = np.minimum(width, np.maximum(0, x1))\n y1 = np.minimum(height, np.maximum(0, y1))\n x2 = np.minimum(width, np.maximum(0, x2))\n y2 = np.minimum(height, np.maximum(0, y2))\n # require non-zero seg area and more than 1x1 box size\n if obj['area'] > self._min_object_area and x2 > x1 and y2 > y1 \\\n and (x2 - x1) * (y2 - y1) >= 4:\n contiguous_cid = self.json_id_to_contiguous[obj['category_id']]\n valid_objs.append([x1, y1, x2, y2, contiguous_cid])\n\n segs = obj['segmentation'] # polygon or RLE\n assert isinstance(segs, list) or isinstance(segs, dict), '{}'.format(obj.get('iscrowd', 0))\n if isinstance(segs, list):\n valid_segs.append([np.asarray(p).reshape(-1, 2).astype('float32')\n for p in segs if len(p) >= 6])\n else:\n valid_segs.append(segs)\n # there is no easy way to return a polygon placeholder: None is returned\n # in validation, None cannot be used for batchify -> drop label in transform\n # in training: empty images should be be skipped\n if not valid_objs:\n valid_objs = None\n valid_segs = None\n else:\n valid_objs = np.asarray(valid_objs).astype('float32')\n return valid_objs, valid_segs", "def get_bounding_boxes(\n image: np.ndarray, conf_threshold: float = 0.5, scale_size: Tuple[int, int] = (-1, -1)\n) -> List[Tuple[int, ...]]:\n # https://learnopencv.com/face-detection-opencv-dlib-and-deep-learning-c-python/\n net = opencv_dnn_detector()\n\n face_locations: List[Tuple[int, ...]] = []\n\n blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), [104, 117, 123], False, False)\n net.setInput(blob)\n detections = net.forward()\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > conf_threshold:\n x1 = detections[0, 0, i, 3]\n y1 = detections[0, 0, i, 4]\n x2 = detections[0, 0, i, 5]\n y2 = detections[0, 0, i, 6]\n if scale_size == (-1, -1):\n x1 = int(x1 * image.shape[1])\n y1 = int(y1 * image.shape[0])\n x2 = int(x2 * image.shape[1])\n y2 = int(y2 * image.shape[0])\n else:\n x1 = int(x1 * scale_size[1])\n y1 = int(y1 * scale_size[0])\n x2 = int(x2 * scale_size[1])\n y2 = int(y2 * scale_size[0])\n face_locations.append((y1, x2, y2, x1))\n return face_locations", "def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]", "def getMostContour(img,svm,knn,filterArr,digits,wThresh,hThresh):\r\n # append the filter to filter array, this approach is used in case of \r\n # multiple filter methods would be used.\r\n counts = []\r\n # iterare through every filter\r\n for flt in filterArr:\r\n # copy the image so we don't draw on same image\r\n flt_img = img.copy()\r\n last_img = img.copy()\r\n flt_contour,cntfound_fltr = drawcntMap(img.copy(),flt,wThresh,hThresh) \r\n if not digits:\r\n flt_contour,cntfound_fltr = drawcntMap(img.copy(),flt,wThresh,hThresh)\r\n flt_contour_map = []\r\n labels = []\r\n for crop,(x,y,w,h),contour in cropNwriteBBs(img,cntfound_fltr):\r\n #crop = np.array(crop,dtype='float32')\r\n crop = cv2.cvtColor(crop,cv2.COLOR_BGR2GRAY)\r\n crop = cv2.resize(crop,(25,25))\r\n # winSize is the size of the image cropped to an multiple of the cell size\r\n hog_fts = hog.compute(crop)\\\r\n .reshape(n_cells[1] - block_size[1] + 1,\r\n n_cells[0] - block_size[0] + 1,\r\n block_size[0], block_size[1], nbins) \\\r\n .transpose((1, 0, 2, 3, 4))\r\n hog_fts = np.resize(hog_fts.flatten(),(1,576))\r\n # make the resulted crop same type with the trained values\r\n hog_fts.dtype = 'float32'\r\n # get predicted labels\r\n label_svm=svm.predict(hog_fts)[1]\r\n label_knn = knn.findNearest(hog_fts,k=5)[1]\r\n # label 10 is considered as 'not digit' or 'thrash'\r\n # so if predicted label is not 10, draw the bounding box\r\n if digits:\r\n if(label_svm!=10 and label_knn != 10 and label_svm!=11 and label_knn != 11):\r\n flt_contour_map.append(contour)\r\n labels.append(str(label_knn[0])[1])\r\n else:\r\n if(label_svm!=2 and label_knn != 2):\r\n flt_contour_map.append(contour)\r\n labels.append(str(label_knn[0])[1])\r\n #cv2.putText(flt_img,str(label_knn[0])[1],(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=0.8,color=(0,0,255))\r\n #cv2.putText(flt_img,str(label_knn[0])[1],(x,y),fontFace=cv2.FONT_HERSHEY_COMPLEX,fontScale=0.8,color=(0,0,255))\r\n last_cnt,last_labels = secondElimination(flt_contour_map,labels)\r\n for cnt in last_cnt:\r\n x,y,w,h = cv2.boundingRect(cnt)\r\n cv2.rectangle(flt_img,(x,y),(x+w,y+h),[0,255,0],2)\r\n #showWait(flt_img,'fltres')\r\n _,xx,res_boxes,_,_ = mergeBoundingBoxes(flt_img,last_cnt,last_labels)\r\n cnt = len(res_boxes)\r\n counts.append([cnt,flt_img,last_cnt,last_labels])\r\n # append resulted image and contours to an array\r\n counts = np.asarray(counts)\r\n # get the resulted image which contain more digits (bounding boxes)\r\n tmp = counts[:,0]\r\n resulted_img = counts[np.argmax(tmp),1]\r\n result_labels = counts[np.argmax(tmp),3]\r\n resulted_contour = counts[np.argmax(tmp),2]\r\n return resulted_contour,result_labels,resulted_img", "def draw_boxes(image, gt_boxes_norm, pre_boxes_norm):\n # Load Image\n image = (image * 255.0).astype(np.uint8)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n #image = cv2.add(image,image)\n #image = cv2.bitwise_not(image)\n # Draw prediction boxes\n for pre_box_points in pre_boxes_norm:\n image_shape = np.flip(image.shape[0:2], axis=0)\n\n for pre_box_point_idx in range(len(pre_box_points)):\n\n pre_start_point = pre_box_points[pre_box_point_idx] * image_shape\n pre_end_point = pre_box_points[(pre_box_point_idx + 1) % 4] * image_shape\n\n pre_start_point = pre_start_point.astype(np.int32)\n pre_end_point = pre_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(pre_start_point),\n tuple(pre_end_point),\n (107,222,35), thickness=1)\n\n # Draw boxes if they exist\n if gt_boxes_norm is not None:\n for gt_box_points in gt_boxes_norm:\n for gt_box_point_idx in range(len(gt_box_points)):\n\n gt_start_point = gt_box_points[gt_box_point_idx] * image_shape\n gt_end_point = gt_box_points[(gt_box_point_idx + 1) % 4] * image_shape\n\n gt_start_point = gt_start_point.astype(np.int32)\n gt_end_point = gt_end_point.astype(np.int32)\n\n cv2.line(\n image, tuple(gt_start_point),\n tuple(gt_end_point),\n (0,0,205), thickness=1)\n\n return image", "def get_bounding_boxes(outputs, width: int, height: int):\n\n # detected bounding boxes, obtained confidences and class's number\n boxes = []\n scores = []\n classes = []\n\n # this is our threshold for keeping the bounding box\n probability_minimum = 0.5\n\n # iterating through all three outputs\n for result in outputs:\n # going through all bounding boxes from current output layer\n for detection in result:\n # getting class for current object\n scores_current = detection[5:]\n class_current = np.argmax(scores_current)\n\n # getting probability for current object\n probability_current = scores_current[class_current]\n\n # getting object confidence for current object\n object_confidence = detection[4]\n\n # eliminating weak predictions by minimum probability\n if probability_current > probability_minimum:\n # if probability_current*object_confidence > probability_minimum: # this is an alternative way\n\n # Scaling bounding box coordinates to the initial image size\n # by element-wise multiplying them with the width and height of the image\n box_current = np.array(detection[0:4]) * np.array([width, height, width, height])\n\n # YOLO data format keeps center of detected box and its width and height\n # here we reconstruct the top left and bottom right corner\n x_center, y_center, box_width, box_height = box_current.astype('int')\n x_min = int(x_center - (box_width / 2))\n y_min = int(y_center - (box_height / 2))\n x_max = int(x_center + (box_width / 2))\n y_max = int(y_center + (box_height / 2))\n\n # adding results into prepared lists\n boxes.append([x_min, y_min, x_max, y_max])\n scores.append(float(probability_current))\n classes.append(class_current)\n\n boxes = np.array(boxes)\n scores = np.array(scores)\n classes = np.array(classes)\n return boxes, scores, classes", "def bbox(img):\n a = np.where(img != 0)\n bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])\n return bbox", "def predict(model, image, score_thresh, screen_mode, fill):\n\n global COLOR_DICT, prev_bboxes, prev_classes\n\n # Run the prediction\n scores, boxes, classes = model.predict(image)\n \n # Prepare the images for augmentation\n if screen_mode:\n new_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n else:\n new_image = np.zeros((image.shape[0], image.shape[1], 3), dtype=np.uint8)\n cv2.rectangle(new_image, (0, 0), (image.shape[1], image.shape[0]), (255, 0, 0), 5)\n\n # Go through each bounding box and only draw and save the ones above the score threshold\n detected = []\n for i in range(len(scores)):\n if scores[i] > score_thresh:\n detected.append([i, classes[i] + 1])\n detected = bbox_sort(detected) \n \n text_list = [] \n bboxes = []\n classes = []\n for i in range(len(detected)):\n box = boxes[detected[i][0]] * np.array([image.shape[0], image.shape[1], image.shape[0], image.shape[1]])\n bboxes.append(box)\n classes.append(detected[i][0])\n \n matched_indices = matchBBoxes(bboxes, prev_bboxes, 100)\n \n for i in range(len(detected)):\n color = COLOR_DICT[detected[i][1]]\n \n x0 = bboxes[i][1] - 20\n y0 = bboxes[i][0] - (1080 - bboxes[i][0]) * 50 / 1080\n x1 = bboxes[i][3] + 20\n y1 = bboxes[i][2]\n \n num_pairs = 0\n \n for index_pair in matched_indices:\n if index_pair[0] == i and detected[i][0] == prev_classes[index_pair[1]]:\n num_pairs += 1\n x0 = ((x0 * num_pairs) + prev_bboxes[index_pair[1]][1] - 20) / (num_pairs + 1.0)\n y0 = ((y0 * num_pairs) + prev_bboxes[index_pair[1]][0] - (1080 - prev_bboxes[index_pair[1]][1]) * 50 / 1080) / (num_pairs + 1.0)\n x1 = ((x1 * num_pairs) + prev_bboxes[index_pair[1]][3] + 20) / (num_pairs + 1.0)\n y1 = ((y1 * num_pairs) + prev_bboxes[index_pair[1]][2]) / (num_pairs + 1.0)\n \n line_type = 3\n if fill and not screen_mode:\n line_type = cv2.FILLED\n \n cv2.rectangle(new_image, (int(x0), int(y0)), (int(x1), int(y1)), color, line_type)\n\n name = CLASS_DICT[detected[i][1]]\n \n prev_bboxes = bboxes\n prev_classes = classes\n dy = 50 # Change in y position for each item\n for text in text_list:\n color = COLOR_DICT[text[2]]\n cv2.putText(new_image, str(text[1]) + \"x \" + text[0], (1500, y), cv2.FONT_HERSHEY_DUPLEX, 0.5, color, lineType=cv2.LINE_AA)\n y += dy\n\n return new_image", "def get_human_box_detection(bbox):\n array_boxes = [] # Create an empty list\n for i, bbox in enumerate(bbox):\n # If the class of the detected object is 1 and the confidence of the prediction is > 0.6\n if bbox[5] == 0:\n box = np.array(bbox[:4], dtype=np.int32)\n array_boxes.append[box[0],box[1]]\n return array_boxes", "def overlay_boxes(image, predictions):\n labels = predictions.get_field(\"labels\")\n boxes = predictions.bbox\n\n for c_label in DRAW_ORDER:\n for box, label in zip(boxes, labels):\n if label == c_label:\n color = CATEGORIES_COLOR[label] # [int(color[0]), int(color[1]), int(color[2])]\n box = box.to(torch.int64)\n top_left, bottom_right = box[:2].tolist(), box[2:].tolist()\n image = cv2.rectangle(\n image, tuple(top_left), tuple(bottom_right), tuple(color), 3\n )\n\n return image", "def get_classification(self, image):\n #TODO implement light color prediction\n max_idx = 4\n with self.detection_graph.as_default():\n with tf.Session(graph=self.detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')\n \n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')\n \n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n boxes = np.squeeze(boxes)\n scores = np.squeeze(scores)\n classes = np.squeeze(classes).astype(np.int32)\n min_score_thresh = .50\n # find majority light state\n counter = [0, 0, 0, 0, 0]\n for i in range(boxes.shape[0]):\n if scores is None or scores[i] > min_score_thresh:\n counter[classes[i]] += 1\n for i in range(1, 5):\n if counter[i] > counter[max_idx]:\n max_idx = i\n return self.classmap[max_idx]", "def PostProcessing(image, resultList, threshold=0.6):\n\tnum_detections = resultList[0][0].astype(np.int)\n\tscores = resultList[2]\n\tboxes = resultList[3]\n\tbbox_num = 0\n\t\n\t# loop through all the detections and get the confidence and bbox coordinates\n\tfor i in range(num_detections):\n\t\tdet_conf = scores[0, i]\n\t\tdet_ymin = boxes[0, i, 0]\n\t\tdet_xmin = boxes[0, i, 1]\n\t\tdet_ymax = boxes[0, i, 2]\n\t\tdet_xmax = boxes[0, i, 3]\n\n\t\tbbox_width = det_xmax - det_xmin\n\t\tbbox_height = det_ymax - det_ymin\n\t\t# the detection confidence and bbox dimensions must be greater than a minimum value to be a valid detection\n\t\tif threshold <= det_conf and 1 >= det_conf and bbox_width > 0 and bbox_height > 0:\n\t\t\tbbox_num += 1\n\t\t\txmin = int(round(det_xmin * image.shape[1]))\n\t\t\tymin = int(round(det_ymin * image.shape[0]))\n\t\t\txmax = int(round(det_xmax * image.shape[1]))\n\t\t\tymax = int(round(det_ymax * image.shape[0]))\n\t\t\t\n\t\t\tcv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\n\t\telse:\n\t\t\tcontinue\n\n\tprint(\"detected bbox num:\", bbox_num)\n\tSRC_PATH = os.path.realpath(__file__).rsplit(\"/\", 1)[0]\n\tOutput_PATH = os.path.join(SRC_PATH, \"../output/output.jpg\")\n\ttry:\n\t\tos.mkdir(os.path.join(SRC_PATH, \"../output/\"))\n\texcept Exception as e:\n\t\tprint(\"Output Path already exists\")\n\tcv2.imwrite(Output_PATH, image)", "def get_classification(self, image):\n if self.correct_gamma:\n if self.gamma == 1.0:\n self.gamma = 0.6\n elif self.gamma == 0.6:\n self.gamma = 1.0\n image = self.adjust_gamma(image, self.gamma)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np = np.asarray(image, dtype=\"uint8\")\n image_np_expanded = np.expand_dims(image_np, axis=0)\n\n detected = False\n\n with self.detection_graph.as_default():\n (boxes, scores, classes, num) = self.sess.run(\n [self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],\n feed_dict={self.image_tensor: image_np_expanded})\n boxes = np.squeeze(boxes)\n classes = np.squeeze(classes).astype(np.int32)\n scores = np.squeeze(scores)\n best_scores = []\n\n for idx, classID in enumerate(classes):\n if self.MODEL_NAME == 'ssdlite_mobilenet_v2_coco_2018_05_09':\n if classID == 10: # 10 is traffic light\n if scores[idx] > 0.10: #confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n else: # we tuned the model to classify only traffic lights\n if scores[idx] > 0.10: # confidence level\n best_scores.append([scores[idx], idx, classID])\n detected = True\n\n tl_index = TrafficLight.UNKNOWN\n if detected:\n best_scores.sort(key=lambda tup: tup[0], reverse=True)\n\n best_score = best_scores[0]\n rospy.logdebug(\"number of TL found %d, best score: %f, color: %f\", len(best_scores), best_score[0], best_score[2])\n nbox = boxes[best_score[1]]\n\n height = image.shape[0]\n width = image.shape[1]\n\n box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)\n box_height = box[2] - box[0]\n box_width = box[3] - box[1]\n ratio = float(box_height)/float(box_width)\n rospy.logdebug(\"ratio: %f\", ratio)\n if ratio >= 2.0 and ratio < 3.0: #started from 2.4\n tl_cropped = image[box[0]:box[2], box[1]:box[3]]\n tl_color, tl_index = self.get_color(tl_cropped)\n #color = ['RED', 'YELLOW', 'GREEN', 'UNKNOWN']\n #tl_index = best_score[2]\n #tl_color = color[tl_index]\n #augment image with detected TLs\n cv2.rectangle(image, (box[1], box[0]), (box[3], box[2]), (0, 255, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_color = (255, 255, 255)\n cv2.putText(image, tl_color, (box[1], box[0]), font, 2.0, font_color, lineType=cv2.LINE_AA)\n return image, tl_index", "def convert_cellboxes(predictions, S=7, B=1, C=20):\n\n predictions = predictions.to(\"cpu\")\n batch_size = predictions.shape[0]\n predictions = predictions.reshape(batch_size, 7, 7, C + 5 * B) \n bounding_box = predictions[..., 21:25] # x, y , width, height # N x 7 x 7 x 4\n \n confidence = predictions[..., 20].unsqueeze(-1) # N x 7 x 7 \n\n pred_class = predictions[..., 0:20].argmax(-1).unsqueeze(-1) # N x 7 x 7\n\n # N x 49 x 6, return [pred_class, confidence, x, y, width, height]\n # print(torch.flatten(pred_class, 1, 2).shape)\n # print(torch.flatten(confidence, 1, 2).shape)\n # print(torch.flatten(bounding_box, 1, 2).shape)\n predict_boxes = torch.cat((torch.flatten(pred_class, 1, 2), torch.flatten(confidence, 1, 2), torch.flatten(bounding_box, 1, 2)), dim = -1)\n return predict_boxes", "def fit_image(im):\n \n # menpo stores images CHW instead of HWC that TensorFlow uses.\n pixels = im.pixels.transpose(1, 2, 0)\n\n # the model expects rgb images.\n if im.n_channels == 1:\n pixels = np.dstack([pixels]*3)\n\n bounding_box = im.landmarks['bounding_box'].lms\n\n prediction, = sess.run(pred, feed_dict={\n image: pixels,\n # grab the upper-left and lower-down points of the bounding box.\n initial_bb: bounding_box.points[[0, 2]].ravel()}\n )\n \n return menpo.shape.PointCloud(prediction)", "def detect(self, mask):\n # 1) Return Non zero indices\n det_idx = np.where(mask > 0.0)\n idx_x, idx_y = det_idx[0], det_idx[1]\n # 2) Create 1x1 box for each pixel detected.\n detections = []\n for i in range(0, len(idx_x)):\n x, y = idx_x[i], idx_y[i]\n detections.append((x, y, x+1, y+1, 1)) # x1, y1, x2, y2, area\n # 3) merge boxes\n bounding_boxes = self.bounding_boxes(detections)\n return bounding_boxes", "def gain_box_score(im, preds):\n if len(preds[0]) == 0:\n cv2.imshow(\"Video detection\", im)\n else:\n for pred in preds:\n for i, box_label in enumerate(zip( pred[\"boxes\"], pred[\"labels\"] )):\n box, label = box_label\n xmin, ymin, xmax, ymax = box\n#-------------------- Create a Rectangle patch ----------------------- \n if label==1:\n class_name='with_mask'\n color = (0, 255, 0)\n elif label==2:\n class_name='without_mask'\n color = (0, 0, 255)\n elif label==3:\n class_name='mask_worn_improperly'\n color = (255, 255 ,0)\n score = pred['scores'][i]\n#--------------------- Bounding Box painting -------------------------- \n if score > 0.65:\n cv2.rectangle(im, (xmin, ymin), (xmax, ymax), color, 1) \n cv2.putText(im, str(class_name)+str(round(score.item(),2)), (xmin,int(ymax-ymax/20)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1) #print class name\n cv2.imshow(\"Video detection\",im)\n print('*****', 'Bbox:', i , '*****' )\n print('Class: ', str(class_name))\n print('Scores: ', str(round(score.item(),2)))\n print('boxes: ',f'{int(xmin)}, {int(ymin)}, {int(xmax)}, {int(ymax)}')\n print('image shape: ', im.shape) \n else:\n cv2.imshow(\"Video detection\", im)\n print('********************','\\n')", "def obtain_training_set_shape(para, alg):\n \n \n # Preliminaries\n z = os.listdir('Images/shapeset') # image directory\n box_how = [] # the ratio of box's height over its width\n omega = np.load('omega' + alg + '.npy') # load parameters\n \n # Establish a typical bounding box shape\n for i in range(len(z)):\n tu = img.imread('Images/shapeset/' + z[i])\n tu_b = obtain_testing_y(tu, omega, alg)\n tu_b = tu_b.astype(np.uint8) # convert binary image to a format that @findContours can process\n \n # find contours of objects with wanted color\n contours, hierachy = cv2.findContours(tu_b, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n # the binary image will be replaced by this binary contour image\n cv2.drawContours(tu_b, contours, -1, (255, 0, 0), 3) # -1 = draw all contours, (color), thickness of contour lines\n \n # get contours edges, namely bounding box\n tu_b = label(tu_b) # label connected regions of an integer array, so that unconnected contours will considered as separate regions\n region = regionprops(tu_b) # identify regions of the labeled image\n rc = [] # region's centroids\n\n # get rid of tiny regions\n for prop in region.copy():\n if prop.bbox_area < para.bbox_area:\n region.remove(prop)\n else:\n rc.append(prop.centroid)\n \n # get rid of repeated regions\n ind = sorted(range(len(rc)), key = rc.__getitem__) # store element indices of local_centroid tuples before sorting\n rs = sorted(rc) # sorted region\n\n rdel = [] # repeated regions to be deleted\n for i in range(0, len(rs) - 1):\n if abs(rs[i+1][0] - rs[i][0]) < para.cent_dif and abs(rs[i+1][1] - rs[i+1][1]) < para.cent_dif:\n rdel.append(region.copy().pop(ind[i+1]))\n \n for i in range(len(rdel)):\n region.remove(rdel[i])\n \n # since only 1 object, only 1 region should be identified\n if len(region) > 1:\n for i in range(len(region)):\n print(region[i].centroid, region[i].bbox_area)\n plt.imshow(tu_b, cmap = 'gray')\n fig = plt.get_current_fig_manager()\n fig.window.setGeometry(400, 100, 3000, 2000)\n plt.title('You found more than 1 contour on this image!!!', fontsize = 66)\n else:\n minr, minc, maxr, maxc = region[0].bbox # max/min row/column coordinates\n box_how.append((maxr-minr)/(maxc-minc))\n \n # Store extreme values\n max_ratio = max(box_how)\n min_ratio = min(box_how) \n \n return max_ratio, min_ratio", "def draw_pred(image: np.ndarray, class_name: str, box_dimensions: list, distance: float) -> None:\n\n # Params describes the amount of scaling in each of the rgb channels to produce a different color for each class\n # The main object classes have different colours to make them easier to distinguish\n params = {\"car\": (1, 0.85, 0.678), \"person\": (0, 0, 1), \"truck\": (0.33, 1, 0.5), \"bus\": (0.506, 0.149, 0.965)}\n if class_name in params:\n colour_scale = params[class_name] # get the colour values from the colour dictionary\n else:\n colour_scale = (0.90, 0.85, 0.678) # standard colour for less frequently occurring objects\n\n # Choose the brightness of the colour depending on distance away\n # Objects that are further away have a darker shade of their class colour\n # This gets brighter as the object gets closer - enables us to better visualise the depth predictions\n tone = 255 - (min(distance, 50)/50)*255\n colour = (tone * colour_scale[0], tone * colour_scale[1], tone * colour_scale[2])\n\n # Draw a bounding box.\n left, top, box_width, height = box_dimensions # unpack the box tuple\n cv2.rectangle(image, (left, top), (left + box_width, top + height), colour, 2)\n object_label = ' %s : %.2fm' % (class_name, distance) # construct label\n\n # Display the label at the top of the bounding box\n labelSize, baseLine = cv2.getTextSize(object_label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n top = max(top, labelSize[1])\n cv2.rectangle(image, (left, top - round(1.5*labelSize[1])),\n (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv2.FILLED)\n cv2.rectangle(image, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine),\n (0, 0, 0))\n cv2.putText(image, object_label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 1, cv2.LINE_AA)", "def _train_aug(self, results):\n img = results['img']\n h, w, c = img.shape\n boxes = results['gt_bboxes']\n while True:\n scale = random.choice(self.ratios)\n new_h = int(self.crop_size[0] * scale)\n new_w = int(self.crop_size[1] * scale)\n h_border = self._get_border(self.border, h)\n w_border = self._get_border(self.border, w)\n\n for i in range(50):\n center_x = random.randint(low=w_border, high=w - w_border)\n center_y = random.randint(low=h_border, high=h - h_border)\n\n cropped_img, border, patch = self._crop_image_and_paste(\n img, [center_y, center_x], [new_h, new_w])\n\n mask = self._filter_boxes(patch, boxes)\n # if image do not have valid bbox, any crop patch is valid.\n if not mask.any() and len(boxes) > 0:\n continue\n\n results['img'] = cropped_img\n results['img_shape'] = cropped_img.shape\n results['pad_shape'] = cropped_img.shape\n\n x0, y0, x1, y1 = patch\n\n left_w, top_h = center_x - x0, center_y - y0\n cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n # crop bboxes accordingly and clip to the image boundary\n for key in results.get('bbox_fields', []):\n mask = self._filter_boxes(patch, results[key])\n bboxes = results[key][mask]\n bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n if self.bbox_clip_border:\n bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n bboxes[:, 3] > bboxes[:, 1])\n bboxes = bboxes[keep]\n results[key] = bboxes\n if key in ['gt_bboxes']:\n if 'gt_labels' in results:\n labels = results['gt_labels'][mask]\n labels = labels[keep]\n results['gt_labels'] = labels\n if 'gt_masks' in results:\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n\n # crop semantic seg\n for key in results.get('seg_fields', []):\n raise NotImplementedError(\n 'RandomCenterCropPad only supports bbox.')\n return results", "def getRectangularKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_RECT, size)", "def _add_roidb_from_annotations(self, entry):\n ann_ids = self._COCO.getAnnIds(imgIds=entry['id'], iscrowd=None)\n objs = self._COCO.loadAnns(ann_ids)\n width = entry['width']\n height = entry['height']\n # valid objs\n # change the annotation boxes from 'xywh' to 'xyxy'\n valid_objs = []\n for obj in objs:\n x1 = np.max((0, obj['bbox'][0]))\n y1 = np.max((0, obj['bbox'][1]))\n x2 = np.min((width, x1 + np.max((0, obj['bbox'][2]))))\n y2 = np.min((height, y1 + np.max((0, obj['bbox'][3]))))\n if obj['area'] > 0 and x2 >= x1 and y2 >= y1:\n obj['clean_box'] = [x1, y1, x2, y2]\n valid_objs.append(obj)\n objs = valid_objs\n num_objs = len(objs)\n\n bboxes = np.zeros((num_objs, 4), dtype=entry['bboxes'].dtype)\n gt_classes = np.zeros((num_objs), dtype=entry['gt_classes'].dtype)\n\n coco_cat_id_to_class_ind = dict(\n [(self._class_to_coco_cat_id[cls], self._class_to_ind[cls]) for cls in self._classes[1:]])\n for ix, obj in enumerate(objs):\n bboxes[ix, :] = obj['clean_box']\n gt_classes[ix] = coco_cat_id_to_class_ind[obj['category_id']]\n entry['bboxes'] = np.append(entry['bboxes'], bboxes, axis=0)\n entry['gt_classes'] = np.append(entry['gt_classes'], gt_classes)", "def simple_test_bboxes(self,\n x,\n img_meta,\n proposals,\n rcnn_test_cfg,\n rescale=False):\n rois = bbox2roi(proposals)\n roi_feats = self.bbox_roi_extractor(\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n cls_score, bbox_pred = self.bbox_head(roi_feats)\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n is_first = img_meta[0]['is_first']\n det_bboxes, det_labels = self.bbox_head.get_det_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n if det_bboxes.nelement()==0:\n det_obj_ids=np.array([], dtype=np.int64)\n if is_first:\n self.prev_bboxes = None\n self.prev_roi_feats = None\n self.prev_det_labels = None\n return det_bboxes, det_labels, det_obj_ids\n\n res_det_bboxes = det_bboxes.clone()\n if rescale:\n res_det_bboxes[:, :4] *= scale_factor\n\n det_rois = bbox2roi([res_det_bboxes])\n det_roi_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], det_rois)\n # recompute bbox match feature\n \n if is_first or (not is_first and self.prev_bboxes is None):\n det_obj_ids = np.arange(det_bboxes.size(0))\n # save bbox and features for later matching\n self.prev_bboxes = det_bboxes\n self.prev_roi_feats = det_roi_feats\n self.prev_det_labels = det_labels\n else:\n \n assert self.prev_roi_feats is not None\n # only support one image at a time\n bbox_img_n = [det_bboxes.size(0)]\n prev_bbox_img_n = [self.prev_roi_feats.size(0)]\n match_score = self.track_head(det_roi_feats, self.prev_roi_feats,\n bbox_img_n, prev_bbox_img_n)[0]\n match_logprob = torch.nn.functional.log_softmax(match_score, dim=1)\n label_delta = (self.prev_det_labels == det_labels.view(-1,1)).float()\n bbox_ious = bbox_overlaps(det_bboxes[:,:4], self.prev_bboxes[:,:4])\n # compute comprehensive score \n comp_scores = self.track_head.compute_comp_scores(match_logprob, \n det_bboxes[:,4].view(-1, 1),\n bbox_ious,\n label_delta,\n add_bbox_dummy=True)\n match_likelihood, match_ids = torch.max(comp_scores, dim =1)\n # translate match_ids to det_obj_ids, assign new id to new objects\n # update tracking features/bboxes of exisiting object, \n # add tracking features/bboxes of new object\n match_ids = match_ids.cpu().numpy().astype(np.int32)\n det_obj_ids = np.ones((match_ids.shape[0]), dtype=np.int32) * (-1)\n best_match_scores = np.ones((self.prev_bboxes.size(0))) * (-100)\n for idx, match_id in enumerate(match_ids):\n if match_id == 0:\n # add new object\n det_obj_ids[idx] = self.prev_roi_feats.size(0)\n self.prev_roi_feats = torch.cat((self.prev_roi_feats, det_roi_feats[idx][None]), dim=0)\n self.prev_bboxes = torch.cat((self.prev_bboxes, det_bboxes[idx][None]), dim=0)\n self.prev_det_labels = torch.cat((self.prev_det_labels, det_labels[idx][None]), dim=0)\n else:\n # multiple candidate might match with previous object, here we choose the one with\n # largest comprehensive score \n obj_id = match_id - 1\n match_score = comp_scores[idx, match_id]\n if match_score > best_match_scores[obj_id]:\n det_obj_ids[idx] = obj_id\n best_match_scores[obj_id] = match_score\n # udpate feature\n self.prev_roi_feats[obj_id] = det_roi_feats[idx]\n self.prev_bboxes[obj_id] = det_bboxes[idx]\n \n\n return det_bboxes, det_labels, det_obj_ids", "def detect(image, resize=rsz_default, kernel=kernel_size):\n\timage_prep = preprocess(image, resize=resize, kernel=kernel)\n\tcontour = get_contour(image_prep)\n\tM = get_image_moments(contour=contour)\n\t\n\tcentroid, bounding_box = None, None\n\tif check_for_beam(M):\n\t\tcentroid = [pos//resize for pos in get_centroid(M)]\n\t\tbounding_box = [val//resize for val in get_bounding_box(\n\t\t\timage_prep, contour)]\n\treturn centroid, bounding_box", "def inference_detector(model,img:str):\n cfg = model.cfg\n device = next(model.parameters()).device # model device\n # build the data pipeline\n test_pipeline = cfg.test_pipeline\n test_pipeline = Compose(test_pipeline)\n\n if isinstance(img,str):\n img = cv2.imread(img)\n elif isinstance(img,np.ndarray):\n img = img\n elif isinstance(img,Image):\n #TODO:将PIL改为CV2\n pass\n else:\n raise TypeError('img must be a PIL.Image or str or np.ndarray, '\n 'but got {}'.format(type(img)))\n\n ori_h,ori_w,ori_c = img.shape\n\n # prepare data\n data = dict(img=img)\n data = test_pipeline(data)\n img_tensor = data['img'].unsqueeze(0).to(device)\n _,_,new_h,new_w = img_tensor.shape\n data_dict = dict(img=img_tensor)\n # forward the model\n with torch.no_grad():\n preds = model(data_dict,return_loss=False)\n pred_bbox_list,score_bbox_list = model.postprocess(preds)\n\n #pred_bbox_list(b,n,4,2) [(x1,y1),(x2,y2),(x3,y3),(x4,y4)] for bbox model\n batch_pred_bbox = pred_bbox_list[0]\n score_bbox_list = score_bbox_list[0]\n w_scale = float(ori_w) / new_w\n h_scale = float(ori_h) / new_h\n\n if type(batch_pred_bbox)==np.ndarray:\n if len(batch_pred_bbox)!=0:\n ##bbox 情况,其4个点个数稳定\n batch_pred_bbox[:,:,0] *=w_scale\n batch_pred_bbox[:, :, 1] *= h_scale\n else:\n #polygon\n for polygon_array in batch_pred_bbox:\n polygon_array[:, 0] = np.clip(\n np.round(polygon_array[:, 0] / new_w * ori_w), 0, ori_w)\n polygon_array[:, 1] = np.clip(\n np.round(polygon_array[:, 1] / new_h * ori_h), 0, ori_h)\n\n return batch_pred_bbox,score_bbox_list", "def postprocess(image: np.ndarray, results_list: list, threshold_confidence: float, threshold_nms: float) -> list:\n frameHeight = image.shape[0]\n frameWidth = image.shape[1]\n\n # Scan through all the bounding boxes output from the network and..\n # 1. keep only the ones with high confidence scores.\n # 2. assign the box class label as the class with the highest score.\n # 3. construct a list of bounding boxes, class labels and confidence scores\n\n classIds = []\n confidences = []\n boxes = []\n for result in results_list:\n for detection in result:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > threshold_confidence:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = max(0, int(center_x - width / 2))\n top = max(0, int(center_y - height / 2))\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences\n list_of_tuples = []\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, threshold_confidence, threshold_nms)\n for i in indices:\n i = i[0]\n list_of_tuples.append((classIds[i], confidences[i], boxes[i]))\n # return post processed lists of classIds, confidences and bounding boxes\n return list_of_tuples", "def draw_coco_bbox(img,labels):\n \n # w,h,_=img.shape\n for bbox in labels:\n\n x1 = int(bbox['points']['x1'])\n y1 = int(bbox['points']['y1'])\n x2 = int(bbox['points']['x2'])\n y2 = int(bbox['points']['y2'])\n c = str(bbox['probability'])\n # font\n font = cv2.FONT_HERSHEY_SIMPLEX\n \n # org\n org = (int(x1), int(y1))\n \n # fontScale\n fontScale = 1\n LABELS = ['ConcreteCrack','Spalling','Efflorescene','Exposure']\n \n # Blue color in BGR\n # color_efflorescene = \n\n color = (56,255,225)\n\n \n # Line thickness of 2 px\n thickness = 2\n \n # Using cv2.putText() method\n img = cv2.putText(img, str(c), org, font, \n fontScale, color, thickness, cv2.LINE_AA)\n\n\n img = cv2.rectangle(img,(int(x1),int(y1)),(int(x2),int(y2)),color,2)\n return img", "def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n classIds = []\n confidences = []\n boxes = []\n # Scan through all the bounding boxes output from the network and keep only the\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\n # your code here\n # loop over each of the layer output (I guess the outs is the number of anchor boxes)\n for output in outs:\n # loop over each of the detection\n for detection in output:\n # extract the class ID and confidence of the current object detection\n # the detection is an array of [bx, by, bw, bh, Pc, c1, c2, ..., c80]\n # Pc is the probability that there is an object\n scores = detection[5:]\n classID = np.argmax(scores)\n confidence = scores[classID]\n \n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n \n classIds.append(classID)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n \n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences.\n # your code here\n idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)\n \n # get the bounding bxoes after performing non maximum suppression\n # your code here\n output_boxes = []\n if len(idxs) > 0:\n for i in idxs.flatten(): # idxs = [[1],[2],[5],...], idxs.flatten() = [1,2,5,...]\n output_boxes.append(boxes[i])\n left = boxes[i][0]\n top = boxes[i][1]\n width = boxes[i][2]\n height = boxes[i][3]\n right = left + width\n bottom = top + height\n frame = self.drawPred(frame, classIds[i], confidences[i], left, top, right, bottom)\n \n output_image = frame\n return output_image, output_boxes", "def bbox_from_circle(img, circles):\n seg_imgs = []\n bboxes = []\n aux = img.copy()\n for i,el in enumerate(circles):\n bbox = circle_2_bbox(el['coord'])\n bbox = fix_bbox(bbox,aux.shape)\n cv.rectangle(aux,bbox[0],bbox[1],(0,255,0))\n bboxes.append(bbox)\n return bboxes", "def __call__(self, results):\n img = results['img']\n polys = results[self.instance_key]\n x_min, y_min, x_max, y_max = self._random_crop(img, polys)\n kept_idx = []\n for idx, poly in enumerate(polys):\n if np.all((poly[0::2] >= x_min) & (poly[1::2] >= y_min) & \\\n (poly[0::2] <= x_max) & (poly[1::2] <= y_max)):\n kept_idx.append(idx)\n kept_idx = np.array(kept_idx)\n # crop img\n results['img'] = img[y_min : y_max, x_min : x_max, :]\n results['img_shape'] = results['img'].shape\n # crop mask\n for key in results.get('mask_fields', []):\n results[key] = results[key].crop(np.array([x_min, y_min, x_max, y_max]))\n # crop box\n for key in results.get('bbox_fields', []):\n bboxes = []\n for box in results[key]:\n box = np.array(box)\n if np.all((np.min(box[0::2]) >= x_min) & (np.min(box[1::2]) >= y_min) & \\\n (np.max(box[0::2]) <= x_max) & (np.max(box[1::2]) <= y_max)):\n box[0::2] = (box[0::2] - x_min)\n box[1::2] = (box[1::2] - y_min)\n bboxes.append(box)\n # no valid box in img\n if len(bboxes) == 0:\n if key == 'gt_bboxes':\n bboxes = np.zeros((0, 4), dtype=np.float32)\n else:\n bboxes = np.zeros((0, 8), dtype=np.float32)\n results[key] = bboxes\n # calculate the kept text and label\n for key in ['gt_labels', 'gt_texts']:\n if key in results:\n results[key] = [results[key][idx] for idx in kept_idx]\n # calculate the kept mask\n for key in ['gt_masks']:\n if key in results:\n ori_mask = results[key].masks\n kept_mask = [ori_mask[idx] for idx in kept_idx]\n if len(kept_mask) > 0:\n kept_mask = np.stack(kept_mask)\n else:\n kept_mask = np.empty((0, results[key].height, results[key].width), dtype=np.float32)\n results[key] = BitmapMasks(kept_mask, results[key].height, results[key].width)\n return results", "def generate_tree(image):\n\tsize = cvGetSize(image)\n\tfor level in range(1,255):\n\t\t# TODO\n\t\tpass", "def post_process_for_bbox(bbox_pred):\n anchors = torch.FloatTensor(\n [(1.3221, 1.73145),\n (3.19275, 4.00944),\n (5.05587, 8.09892),\n (9.47112, 4.84053),\n (11.2364, 10.0071)]\n )\n\n outsize = (13, 13)\n width, height = outsize\n \n # restore cell pos to x, y\n for w in range(width):\n for h in range(height):\n bbox_pred[:, height*h + w, :, 0] += w\n bbox_pred[:, height*h + w, :, 1] += h\n bbox_pred[:, :, :, :2] /= 13\n \n # apply anchors to w, h\n anchor_w = anchors[:, 0].contiguous().view(-1, 1)\n anchor_h = anchors[:, 1].contiguous().view(-1, 1)\n bbox_pred[:, :, :, 2:3] *= anchor_w\n bbox_pred[:, :, :, 3:4] *= anchor_h\n\n return bbox_pred", "def predict(self, width, height, confidences, boxes, prob_threshold, iou_threshold=0.5, top_k=-1):\n boxes = boxes[0]\n confidences = confidences[0]\n picked_box_probs = []\n picked_labels = []\n\n for class_index in range(1, confidences.shape[1]):\n\n probs = confidences[:, class_index]\n mask = probs > prob_threshold\n probs = probs[mask]\n\n if probs.shape[0] == 0:\n continue\n\n subset_boxes = boxes[mask, :]\n box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)\n box_probs = self.hard_nms(box_probs,\n iou_threshold=iou_threshold,\n top_k=top_k,\n )\n\n picked_box_probs.append(box_probs)\n picked_labels.extend([class_index] * box_probs.shape[0])\n\n if not picked_box_probs:\n return np.array([]), np.array([]), np.array([])\n\n picked_box_probs = np.concatenate(picked_box_probs)\n picked_box_probs[:, 0] *= width\n picked_box_probs[:, 1] *= height\n picked_box_probs[:, 2] *= width\n picked_box_probs[:, 3] *= height\n\n return picked_box_probs[:, :4].astype(np.int32), np.array(picked_labels), picked_box_probs[:, 4]", "def predict(self, image: np.ndarray) -> List[Object2D]:\n predictions: List[Tuple[int, int, int, int]] = self.__cascade.detectMultiScale(\n cv2.cvtColor(image, cv2.COLOR_RGB2GRAY), self.settings.scale_factor, self.settings.min_neighbours)\n return [Object2D(Bounds2D(*prediction), self.settings.class_index) for prediction in predictions]", "def im_detect(net, im, boxes=None):\n blobs, im_scales = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'] = blobs['rois'][index, :]\n boxes = boxes[index, :]\n\n if cfg.TEST.HAS_RPN:\n im_blob = blobs['data']\n blobs['im_info'] = np.array(\n [[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],\n dtype=np.float32)\n\n # reshape network inputs\n net.blobs['data'].reshape(*(blobs['data'].shape))\n if cfg.TEST.HAS_RPN:\n net.blobs['im_info'].reshape(*(blobs['im_info'].shape))\n else:\n net.blobs['rois'].reshape(*(blobs['rois'].shape))\n\n # do forward\n forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}\n if cfg.TEST.HAS_RPN:\n forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)\n else:\n forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)\n \n blobs_out = net.forward(**forward_kwargs)\n \n if cfg.TEST.HAS_RPN:\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n rois = net.blobs['rois'].data.copy()\n # unscale back to raw image space\n boxes = rois[:, 1:5] / im_scales[0]\n\n if cfg.TEST.SVM:\n # use the raw scores before softmax under the assumption they\n # were trained as linear SVMs\n scores = net.blobs['cls_score'].data\n else:\n # use softmax estimated probabilities\n scores = blobs_out['cls_prob']\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = blobs_out['bbox_pred']\n pred_boxes = bbox_transform_inv(boxes, box_deltas)\n pred_boxes = clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:\n # Map scores and predictions back to the original set of boxes\n scores = scores[inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n\n return scores, pred_boxes", "def _decode_bbox(self, normalized_bbox):\n #apply the inverse of transformation\n y1,x1,y2,x2 = preprocess.apply_transformation(normalized_bbox,\n np.linalg.inv(self.transformation))\n\n w,h = self.image_size\n y1,x1,y2,x2 = y1*h,x1*w,y2*h,x2*w\n return vot.Rectangle(x1,y1,x2-x1,y2-y1)", "def draw_bounding_boxes(self, image_path):\n img = cv.imread(image_path, cv.IMREAD_ANYDEPTH)\n bboxes = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n unique, counts = np.unique(img, return_counts=True)\n for uni in unique:\n if uni == 0:\n continue\n self.get_instance_bounding_box(img, bboxes, uni)\n\n cv.namedWindow('building bounding boxes', cv.WINDOW_NORMAL)\n cv.imshow('building bounding boxes', bboxes)\n cv.waitKey(0)\n cv.destroyAllWindows()", "def infer(self, img, threshold=0.2, keep_size=False, custom_nms: NMSCustom=None,\r\n nms_thresh=0.45, nms_topk=400, post_nms=100):\r\n\r\n assert self._model is not None, \"Model has not been loaded, call load(path) first\"\r\n\r\n if custom_nms:\r\n self._model.set_nms(nms_thresh=0.85, nms_topk=5000, post_nms=1000)\r\n else:\r\n self._model.set_nms(nms_thresh=nms_thresh, nms_topk=nms_topk, post_nms=post_nms)\r\n if not isinstance(img, Image):\r\n img = Image(img)\r\n _img = img.convert(\"channels_last\", \"rgb\")\r\n\r\n height, width, _ = _img.shape\r\n img_mx = mx.image.image.nd.from_numpy(np.float32(_img))\r\n\r\n if keep_size:\r\n x, img_mx = transform_test(img_mx)\r\n else:\r\n x, img_mx = presets.ssd.transform_test(img_mx, short=self.img_size)\r\n h_mx, w_mx, _ = img_mx.shape\r\n x = pad_test(x, min_size=self.img_size)\r\n x = x.as_in_context(self.ctx)\r\n class_IDs, scores, boxes = self._model(x)\r\n\r\n class_IDs = class_IDs[0, :, 0].asnumpy()\r\n scores = scores[0, :, 0].asnumpy()\r\n mask = np.where(class_IDs >= 0)[0]\r\n if custom_nms is None:\r\n mask = np.intersect1d(mask, np.where(scores > threshold)[0])\r\n if mask.size == 0:\r\n return BoundingBoxList([])\r\n\r\n scores = scores[mask, np.newaxis]\r\n class_IDs = class_IDs[mask, np.newaxis]\r\n boxes = boxes[0, mask, :].asnumpy()\r\n if x.shape[2] > h_mx:\r\n boxes[:, [1, 3]] -= (x.shape[2] - h_mx)\r\n elif x.shape[3] > w_mx:\r\n boxes[:, [0, 2]] -= (x.shape[3] - w_mx)\r\n boxes[:, [0, 2]] /= w_mx\r\n boxes[:, [1, 3]] /= h_mx\r\n boxes[:, [0, 2]] *= width\r\n boxes[:, [1, 3]] *= height\r\n\r\n if custom_nms is not None:\r\n bounding_boxes, _ = custom_nms.run_nms(boxes=boxes, scores=scores, threshold=threshold, img=_img)\r\n else:\r\n bounding_boxes = BoundingBoxList([])\r\n for idx, box in enumerate(boxes):\r\n bbox = BoundingBox(left=box[0], top=box[1],\r\n width=box[2] - box[0],\r\n height=box[3] - box[1],\r\n name=class_IDs[idx, :],\r\n score=scores[idx, :])\r\n bounding_boxes.data.append(bbox)\r\n\r\n return bounding_boxes", "def handle_origin_image(image, gt_box):\n x = image.width\n y = image.height\n im_max = max(x, y)\n im_min = min(x, y)\n scale = cfg.TRAIN.MIN_SIZE / im_min\n if scale * im_max > cfg.TRAIN.MAX_SIZE:\n scale = cfg.TRAIN.MAX_SIZE / im_max\n width = round(round(x * scale) / 32) * 32\n height = round(round(y * scale) / 32) * 32\n im = image.resize((width, height))\n box = [round(gt_box[0] * width / x), round(gt_box[1] * height / y), round(gt_box[2] * width / x),\n round(gt_box[3] * height / y)]\n # make sure there really tiny flaw still have box to predict\n if (box[3] - box[1]) * (box[2] - box[0]) < 100:\n box = [box[0] - 3, box[1] - 3, box[2] + 3, box[3] + 3]\n return np.array(im), box", "def demo(net, image_name, classes):\n\n # Load pre-computed Selected Search object proposals\n # box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',image_name + '_boxes.mat')\n test_mats_path = '/home/tanshen/fast-rcnn/data/kaggle/test_bbox'\n box_file = os.path.join(test_mats_path ,image_name + '_boxes.mat')\n obj_proposals = sio.loadmat(box_file)['boxes']\n\n # Load the demo image\n test_images_path = '/home/tanshen/fast-rcnn/data/kaggle/ImagesTest'\n # im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')\n im_file = os.path.join(test_images_path, image_name + '.jpg')\n im = cv2.imread(im_file)\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(net, im, obj_proposals)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0\n NMS_THRESH = 0.3\n max_inds = 0\n max_score = 0.0\n for cls in classes:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n keep = np.where(cls_scores >= CONF_THRESH)[0]\n cls_boxes = cls_boxes[keep, :]\n cls_scores = cls_scores[keep]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n # print 'All {} detections with p({} | box) >= {:.1f} in {}'.format(cls, cls,\n # CONF_THRESH, image_name)\n #if get_max!=[]: \n\n [ind,tmp]=get_max(im, cls, dets, thresh=CONF_THRESH)\n #print image_name,cls,tmp\n\n #vis_detections(im, cls, dets, image_name, thresh=CONF_THRESH)\n #print dets[:,-1]\n #print image_name,max_score\n file.writelines([image_name,'\\t',cls,'\\t',str(tmp),'\\n'])\n if(max_score<tmp):\n max_score=tmp\n cls_max=cls\n print image_name,cls_max,max_score", "def get_coco_gt(self, coco, img_id, height, width, img_name):\n annIds = coco.getAnnIds(imgIds=[img_id], iscrowd=None)\n # assert annIds is not None and annIds > 0, 'No annotation for %s' % str(img_id)\n anns = coco.loadAnns(annIds)\n # assert len(anns) > 0, 'No annotation for %s' % str(img_id)\n masks = []\n classes = []\n bboxes = []\n\n for ann in anns:\n id = cat_id_to_real_id(ann['category_id'])\n # id = coco_cat_id_to_voc_id(ann['category_id'])\n if id != 0:\n classes.append(id)\n\n m = coco.annToMask(ann) # {0, 1} mask\n assert m.shape[0] == height and m.shape[1] == width, \\\n 'image %s and ann %s don''t match' % (img_id, ann)\n masks.append(m)\n\n bboxes.append(ann['bbox'])\n\n masks = np.asarray(masks)\n classes = np.asarray(classes)\n bboxes = np.asarray(bboxes)\n\n # to x1, y1, x2, y2\n num_classes = bboxes.shape[0]\n if num_classes <= 0:\n bboxes = np.zeros([0, 4], dtype=np.float32)\n classes = np.zeros([0], dtype=np.float32)\n num_classes = 0\n print('None Annotations %s' % img_name)\n bboxes[:, 2] = bboxes[:, 0] + bboxes[:, 2]\n bboxes[:, 3] = bboxes[:, 0] + bboxes[:, 3]\n\n bboxes = bboxes.astype(np.float32)\n classes = classes.astype(np.float32)\n masks = masks.astype(np.uint8)\n assert masks.shape[0] == bboxes.shape[0], 'Shape Error'\n\n return num_classes, masks, bboxes, classes", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def Predict_Image_Contours(img, mask_full, feature_dict, filename = None, path = ''):\n for ii in range(len(feature_dict)):\n Type = feature_dict[str(ii)] ## So first key is 1\n if Type=='modern_build':\n color_rgb = (255,0,0)\n elif Type=='trad_build':\n color_rgb = (0,0,255)\n mask = mask_full[:,:,ii]\n mask = 255*mask.round().astype('uint8')\n mask = np.stack((mask,mask, mask),-1)\n mask = cv2.cvtColor(mask, cv2.COLOR_RGB2GRAY);\n ret, thresh = cv2.threshold(mask, 127.5, 255, cv2.THRESH_BINARY)\n\n contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n ##print('here')\n area_thresh =30 ## Depends on what the minimal building size desired is\n for cnt in contours:\n ## Contours, flag of whether curve is closed or not\n epsilon = 0.025*cv2.arcLength(cnt,True)\n ## Contours, epsilon for wiggliness, closed shape or not\n approx = cv2.approxPolyDP(cnt,epsilon,True)\n ## Extract Area Dest image, contours, contour index\n area = cv2.contourArea(approx)\n ## centroid computed from moments\n M = cv2.moments(cnt) \n if area > area_thresh:\n if Type=='modern_build':\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n img = cv2.drawContours(image = img, contours = [box], \n contourIdx = 0, color = color_rgb, \n thickness = 2)\n elif Type=='trad_build':\n (x,y),radius = cv2.minEnclosingCircle(cnt)\n center = (int(x),int(y))\n radius = int(radius)\n img = cv2.circle(img,center,radius,color_rgb,2)\n elif Type=='Forest':\n img = cv2.drawContours(image = img, contours = [cnt], \n contourIdx = 0, color = color_rgb, \n thickness = 2)\n elif Type=='Bare':\n img = cv2.drawContours(image = img, contours = [cnt], \n contourIdx = 0, color = color_rgb, \n thickness = 2)\n if filename is not None:\n try: \n if path == '':\n path = 'Predictions'\n os.makedirs(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(img[:,:,0:3])\n plt.tight_layout()\n plt.savefig(path + '/' + filename, bbox_inches='tight') \n plt.close(fig)\n \n return img", "def crop_bbox(img_sitk, label_sitk):\n\n # Setting Bounding Box\n F_statistics = sitk.LabelShapeStatisticsImageFilter()\n F_statistics.Execute(label_sitk)\n bbox_dims = F_statistics.GetBoundingBox(1) # only one label per image\n # print(bbox_dims)\n\n # Applying the bounding box to the image with spacing equal to spc\n spc = 0\n org = bbox_dims[0:3] - [spc]*3\n sz = bbox_dims[3:6] + [spc]*3\n training_patch = img_sitk[org[0]-spc:org[0]+sz[0]+spc,\n org[1]-spc:org[1]+sz[1]+spc,\n org[2]-spc:org[2]+sz[2]+spc]\n\n return training_patch", "def im_detect_bbox_aug(model, im, box_proposals=None):\n assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, 'Size dependent scaling not implemented'\n assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \\\n 'Coord heuristic must be union whenever score heuristic is union'\n assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', 'Score heuristic must be union whenever coord heuristic is union'\n\n # Collect detections computed under different transformations\n scores_ts = []\n boxes_ts = []\n\n def add_preds_t(scores_t, boxes_t):\n scores_ts.append(scores_t)\n boxes_ts.append(boxes_t)\n\n # Perform detection on the horizontally flipped image\n if cfg.TEST.BBOX_AUG.H_FLIP:\n \n scores_hf, boxes_hf, _ = im_detect_bbox_hflip(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, box_proposals=box_proposals)\n \n add_preds_t(scores_hf, boxes_hf)\n\n # Compute detections at different scales\n for scale in cfg.TEST.BBOX_AUG.SCALES:\n max_size = cfg.TEST.BBOX_AUG.MAX_SIZE\n scores_scl, boxes_scl = im_detect_bbox_scale(model, im, scale, max_size, box_proposals)\n add_preds_t(scores_scl, boxes_scl)\n\n if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:\n scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(model, im, scale, max_size, box_proposals, hflip=True)\n add_preds_t(scores_scl_hf, boxes_scl_hf)\n\n # Perform detection at different aspect ratios\n for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:\n scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(model, im, aspect_ratio, box_proposals)\n add_preds_t(scores_ar, boxes_ar)\n\n if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:\n scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(model, im, aspect_ratio, box_proposals, hflip=True)\n add_preds_t(scores_ar_hf, boxes_ar_hf)\n\n # Compute detections for the original image (identity transform) last to\n # ensure that the Caffe2 workspace is populated with blobs corresponding\n # to the original image on return (postcondition of im_detect_bbox)\n scores_i, boxes_i, im_scale_i = im_detect_bbox(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals)\n\n add_preds_t(scores_i, boxes_i)\n\n\n # Combine the predicted scores\n if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':\n scores_c = scores_i\n elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':\n if scores_ts[0] is not None:\n scores_c = np.mean(scores_ts, axis=0)\n else:\n scores_c = None\n\n elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':\n scores_c = np.vstack(scores_ts)\n else:\n raise NotImplementedError(\n 'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)\n )\n\n # Combine the predicted boxes\n if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':\n boxes_c = boxes_i\n elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':\n boxes_c = np.mean(boxes_ts, axis=0)\n elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':\n boxes_c = np.vstack(boxes_ts)\n else:\n raise NotImplementedError(\n 'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)\n )\n\n return scores_c, boxes_c, im_scale_i", "def classify(img, c_model):\n #global class_graph\n\n #img = load_img(im_path,target_size=(input_height, input_width))\n #img = img_to_array(img)\n im_size = 128\n # resize \n\n img = cv2.resize(img, (im_size,im_size))\n\n img = img.astype(\"float\") / 255.0\n img = np.expand_dims(img, axis=0)\n with class_graph.as_default():\n predictions = c_model.predict(img)[0]\n\n return predictions", "def crop_inference_bbox(image, boxes, file_name=\"cropped_inference_result\"):\n # create output folder if not present\n create_dir(\"output/\")\n # crop detections\n if len(boxes) > 0:\n for ind in range(len(boxes)):\n cropped_img = image[\n int(boxes[ind][0][1]) : int(boxes[ind][1][1]),\n int(boxes[ind][0][0]) : int(boxes[ind][1][0]),\n :,\n ]\n save_path = os.path.join(\"output/\", file_name + \"_\" + str(ind) + \".png\")\n cv2.imwrite(save_path, cv2.cvtColor(cropped_img, cv2.COLOR_RGB2BGR))", "def detectFaceAndClassify(faceNet, faceMaskClassifier, testImagePath, threshold):\n # load the input test image from disk\n image = cv2.imread(testImagePath)\n # making a copy of image and finding the image spatial dimensions\n orig = image.copy()\n (h, w) = image.shape[:2]\n\n # construct a blob from the image to pass to the network\n # using standard weights for the face detection model for image preprocessing\n blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n # obtain the face detections by passing the blob through the network\n print(\"computing face detections...\")\n faceNet.setInput(blob)\n faceDetections = faceNet.forward()\n\n # loop over the detections to classify them and form bounding boxes and labels\n for i in range(0, faceDetections.shape[2]):\n # extract only confident detections using the confidence/probability\n # associated with the detection\n confidence = faceDetections[0, 0, i, 2]\n\n # filter out weak detections by ensuring the confidence is\n # greater than the minimum confidence 0.5 or input variable\n if confidence > threshold:\n # extract bounding box dimensions and face Region of intrest for classification\n faceROI, startX, startY, endX, endY = extractBoxAndFaceROI(image, faceDetections, itemNum=i,\n height=h, width=w)\n\n faceROI = np.expand_dims(faceROI, axis=0)\n\n # Passing the pre-processed image with classification model to check if there is a mask or not\n (mask, withoutMask) = faceMaskClassifier.predict(faceROI)[0]\n # (mask, withoutMask) = faceMaskClassifier.predict(faceROI)\n\n # find the class and associated colour to use for the bounding box and text\n label = \"Mask\" if mask > withoutMask else \"No Mask\"\n color = (0, 255, 0) if label == \"Mask\" else (0, 0, 255)\n\n # include the probability of prediction in the label of the bounding box\n label = \"{}: {:.2f}%\".format(label, max(mask, withoutMask) * 100)\n\n # forming bounding box rectangle and display the label the output image frame\n cv2.putText(image, label, (startX, startY - 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.45, color, 2)\n cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)\n\n # show the output image\n cv2.imshow(\"Output\", image)\n # display the image still a key is pressed, when key is pressed program is terminated\n cv2.waitKey(0)", "def nextBatch(self, TRAIN=True, d=False):\n while True:\n if TRAIN==True:\n idx=np.random.randint(self.split*self.total)\n else:\n idx=np.random.randint(self.split*self.total,high=self.total)\n \n if len(self.roidb[idx])!=0:\n break\n \n data=self.imdb[idx][np.newaxis,:]\n gt_boxes=np.array(self.roidb[idx])\n \n maskdb=self.maskdb[idx]\n mask_max_x=0\n mask_max_y=0\n for ins in maskdb:\n if ins.shape[0]>mask_max_y:\n mask_max_y=ins.shape[0]\n if ins.shape[1]>mask_max_x:\n mask_max_x=ins.shape[1]\n\n gt_masks=np.zeros((len(maskdb),mask_max_y,mask_max_x))\n mask_info=np.zeros((len(maskdb),2))\n for j in range(len(maskdb)):\n mask=maskdb[j]\n mask_x=mask.shape[1]\n mask_y=mask.shape[0]\n gt_masks[j,0:mask_y,0:mask_x]=mask\n mask_info[j,0]=mask_y\n mask_info[j,1]=mask_x\n\n blobs={\n 'data': data,\n 'gt_boxes': gt_boxes,\n 'im_info': np.array([[data.shape[2],data.shape[3],1]], dtype=np.float32),\n 'gt_masks':gt_masks,\n 'mask_info':mask_info\n }\n if d: \n # i is always 1, in ultrasound case\n for i in range(blobs['data'].shape[0]):\n print blobs['im_info']\n print blobs['mask_info']\n print blobs['gt_boxes']\n img=blobs['data'][0,0]\n print img.shape\n fig=plt.figure()\n ax=fig.add_subplot(111)\n plt.imshow(img)\n for j,bbox in enumerate(gt_boxes):\n blank=np.zeros_like(img)\n print blank.shape,maskdb[j].shape,bbox\n blank[bbox[1]:maskdb[j].shape[0]+bbox[1],bbox[0]:maskdb[j].shape[1]+bbox[0]]=maskdb[j]\n blank[blank>0]=1\n plt.imshow(blank,alpha=.9)\n ax.add_patch(patches.Rectangle((bbox[0],bbox[1]),bbox[2]-bbox[0],bbox[3]-bbox[1],fill=False))\n plt.text(bbox[0],bbox[1],bbox[-1],bbox=dict(facecolor='blue',alpha=0.5),fontsize=14, color='white')\n plt.show()\n for i in blobs:\n print i,blobs[i].shape\n print ''\n return blobs", "def identify_blocks(images):\n locations = {1: Point(), 2: Point(), 3: Point()}\n blocks = {\"left\": 0, \"middle\": 0, \"right\": 0}\n pipeline = keras_ocr.pipeline.Pipeline()\n cv2_images = []\n\n for image in images:\n bridge = cv_bridge.CvBridge()\n cv2_images.append(bridge.imgmsg_to_cv2(image, \"bgr8\"))\n\n predictions = pipeline.recognize(cv2_images)\n rospy.loginfo(predictions)\n\n blocks[\"left\"] = int(predictions[0][0][0])\n blocks[\"middle\"] = int(predictions[1][0][0])\n blocks[\"right\"] = int(predictions[2][0][0])\n\n for position, block in blocks.items():\n locations[block] = block_locations[position]\n \n return locations", "def detect(self, image):\n\n\t\t(rect, weights) = self.hog.detectMultiScale(image, winStride=(4, 4), padding=(16, 16), scale=1.09)\n\n\t\t# non-maxima suppression applied to the boxes\n\t\trect = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rect])\n\t\tboxes = non_max_suppression(rect, probs=None, overlapThresh=0.3)\n\n\t\t# draw of the rectangles around people\n\t\thuman_boxes = []\n\t\tthickness = 1\n\t\tfor i in range(len(boxes)):\n\t\t\tx = boxes[i][0]\n\t\t\ty = boxes[i][1]\n\t\t\tw = boxes[i][2]\n\t\t\th = boxes[i][3]\n\t\t\tp_w, p_h = int(0.1*w), int(0.025*h)\n\t\t\t#cv2.rectangle(image, (x + p_w, y + p_h), (x + w - p_w, y + h - p_h), (0, 255, 0), thickness)\n\n\t\t\thuman_boxes.append([(x + p_w, y + p_h), (x + w - p_w, y + h - p_h)])\n\n\t\t# reformat and rearrange the boxes for convenience\n\t\treturn human_boxes", "def im_detect_bbox_aug(model, im, box_proposals=None):\n assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \\\n 'Size dependent scaling not implemented'\n assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \\\n cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \\\n 'Coord heuristic must be union whenever score heuristic is union'\n assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \\\n cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \\\n 'Score heuristic must be union whenever coord heuristic is union'\n assert not cfg.MODEL.FASTER_RCNN or \\\n cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \\\n 'Union heuristic must be used to combine Faster RCNN predictions'\n\n # Collect detections computed under different transformations\n scores_ts = []\n boxes_ts = []\n\n def add_preds_t(scores_t, boxes_t):\n scores_ts.append(scores_t)\n boxes_ts.append(boxes_t)\n\n # Perform detection on the horizontally flipped image\n if cfg.TEST.BBOX_AUG.H_FLIP:\n scores_hf, boxes_hf, _im_scales_hf = im_detect_bbox_hflip(\n model, im, box_proposals\n )\n add_preds_t(scores_hf, boxes_hf)\n\n # Compute detections at different scales\n for scale in cfg.TEST.BBOX_AUG.SCALES:\n max_size = cfg.TEST.BBOX_AUG.MAX_SIZE\n scores_scl, boxes_scl = im_detect_bbox_scale(\n model, im, scale, max_size, box_proposals\n )\n add_preds_t(scores_scl, boxes_scl)\n\n if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:\n scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(\n model, im, scale, max_size, box_proposals, hflip=True\n )\n add_preds_t(scores_scl_hf, boxes_scl_hf)\n\n # Perform detection at different aspect ratios\n for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:\n scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(\n model, im, aspect_ratio, box_proposals\n )\n add_preds_t(scores_ar, boxes_ar)\n\n if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:\n scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(\n model, im, aspect_ratio, box_proposals, hflip=True\n )\n add_preds_t(scores_ar_hf, boxes_ar_hf)\n\n # Compute detections for the original image (identity transform) last to\n # ensure that the Caffe2 workspace is populated with blobs corresponding\n # to the original image on return (postcondition of im_detect_bbox)\n scores_i, boxes_i, im_scales_i = im_detect_bbox(model, im, box_proposals)\n add_preds_t(scores_i, boxes_i)\n\n # Combine the predicted scores\n if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':\n scores_c = scores_i\n elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':\n scores_c = np.mean(scores_ts, axis=0)\n elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':\n scores_c = np.vstack(scores_ts)\n else:\n raise NotImplementedError(\n 'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)\n )\n\n # Combine the predicted boxes\n if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':\n boxes_c = boxes_i\n elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':\n boxes_c = np.mean(boxes_ts, axis=0)\n elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':\n boxes_c = np.vstack(boxes_ts)\n else:\n raise NotImplementedError(\n 'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)\n )\n\n return scores_c, boxes_c, im_scales_i", "def detect_fn(image) :\n image, shapes = detection_model.preprocess(image)\n prediction_dict = detection_model.predict(image, shapes)\n detections = detection_model.postprocess(prediction_dict, shapes)\n\n return detections, prediction_dict, tf.reshape(shapes, [-1])", "def run_onet(self, image, bounding_boxes):\n n, c, h, w = image.size()\n\n crops = crop_boxes(image, bounding_boxes, size=48)\n\n if len(crops) == 0:\n return []\n\n landmarks, offsets, scores = self.onet(crops)\n\n keep = (scores[:, 1] > self.score_thresholds[2]).nonzero(as_tuple=True)[0]\n bounding_boxes = bounding_boxes[keep, :]\n bounding_boxes[:, 5] = scores[keep, 1].view(-1)\n landmarks = landmarks[keep]\n\n # Rescale landmarks\n width = bounding_boxes[:, 3] - bounding_boxes[:, 1] + 1.0\n height = bounding_boxes[:, 4] - bounding_boxes[:, 2] + 1.0\n\n x_min, y_min = bounding_boxes[:, 1], bounding_boxes[:, 2]\n\n landmarks[:, 0:5] = x_min.unsqueeze(1) + width.unsqueeze(1) * landmarks[:, 0:5]\n landmarks[:, 5:10] = y_min.unsqueeze(1) + height.unsqueeze(1) * landmarks[:, 5:10]\n\n bounding_boxes = adjust_boxes(bounding_boxes)\n bounding_boxes = torch.cat((bounding_boxes, landmarks), dim=1)\n\n bounding_boxes = batched_nms(bounding_boxes, n,\n self.iou_thresholds[2], mode='min')\n\n return bounding_boxes", "def process_image(image):\n \n # (step 1) get gray image\n gray = grayscale(image)\n \n # (step 2) do gaussian blur with kernel size is 3\n blur_gray = gaussian_blur(gray, 3)\n \n # (step 3) do canny edge detction with low 50 and hight 150\n canny_edges = canny(blur_gray, 50, 150)\n \n # (step 4) region of interset\n imshape = image.shape\n left_bottom = (50,imshape[0])\n right_bottom = (imshape[1]-50,imshape[0])\n left_top = (420, 330)\n right_top = (imshape[1]-420, 330)\n # used later to discard lines which are out of the ROI\n polygon = Polygon([(50,imshape[0]+1),(imshape[1]-50,imshape[0]+1), (imshape[1]-420, 329), (420, 329)])\n vertices = np.array([[left_bottom,left_top, right_top, right_bottom]], dtype=np.int32)\n masked_edge = region_of_interest(canny_edges, vertices)\n \n # (step 5) get lane lines from hough transform\n rho = 2\n theta = np.pi/18 \n threshold = 15\n min_line_length = 10\n max_line_gap = 20\n lines = hough_lines(masked_edge, rho, theta, threshold, min_line_length, max_line_gap)\n \n # (step 6) seperate left and right lines\n left_lines = []\n right_lines = []\n for line in lines:\n for x1,y1,x2,y2 in line:\n if y1 > y2:\n temp_line = [x1,y1,x2,y2]\n if x2 != x1:\n m = (float(y2) - float(y1)) / (float(x2) - float(x1))\n else:\n m = 1000 # it will be dicarded, any high value will work\n temp_line.append(m)\n if x1 < x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n else:\n temp_line = [x2,y2,x1,y1]\n if x2 != x1:\n m = (float(y1) - float(y2)) / (float(x1) - float(x2))\n else:\n m = 1000\n temp_line.append(m)\n if x1 > x2:\n left_lines.append(temp_line)\n else:\n right_lines.append(temp_line)\n \n # (step 7) get left and right lines slopes, can be done with step 6 although\n left_slop = []\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; \n if x1 != x2:\n left_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_left_slop = sum(left_slop)/len(left_slop) # not used yet\n \n right_slop = []\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; \n if x1 != x2:\n right_slop.append( (float(y2) - float(y1)) / (float(x2) - float(x1)) )\n average_right_slope = sum(right_slop)/len(right_slop) # not used yet\n \n \n # (step 8) delete left lines which deviate from thersold_s slope\n thersold_s = 0.4\n delet_left_index = []\n i = 0\n for left_line in left_lines:\n x1 = left_line[0]; y1 = left_line[1]; x2 = left_line[2]; y2 = left_line[3]; m = left_line[4]; \n if abs(m) < thersold_s:\n delet_left_index.append(i)\n i=i+1\n for i in range((len(delet_left_index)-1), -1, -1):\n del left_lines[delet_left_index[i]]\n \n # (step 9) delete right lines which deviate from average slope\n delet_index_right = []\n i = 0\n for right_line in right_lines:\n x1 = right_line[0]; y1 = right_line[1]; x2 = right_line[2]; y2 = right_line[3]; m = right_line[4]; \n if abs(m) < thersold_s:\n delet_index_right.append(i)\n i=i+1\n for i in range((len(delet_index_right)-1), -1, -1):\n del right_lines[delet_index_right[i]]\n \n # (step 10) extrapolate left and right lines\n left_line_draw = True\n x_lefts = []\n y_lefts = []\n for line in left_lines:\n x1, y1, x2, y2, m = line\n x_lefts.append(x1)\n x_lefts.append(x2) \n y_lefts.append(y1)\n y_lefts.append(y2)\n \n if len(x_lefts) > 0:\n slope_left, c_left = np.polyfit(x_lefts, y_lefts, 1)\n else:\n slope_left, c_left = 1, 1\n left_line_draw = False\n \n right_line_draw = True\n x_rights = []\n y_rights = []\n for line in right_lines:\n x1, y1, x2, y2, m = line\n x_rights.append(x1)\n x_rights.append(x2)\n y_rights.append(y1)\n y_rights.append(y2)\n if len(x_rights) > 0:\n slope_right, c_right = np.polyfit(x_rights, y_rights, 1)\n else:\n slope_right, c_right = 1, 1\n right_line_draw = False\n \n y1_left = 530 # again hardcoded values, from ROI\n y2_left = 330 # again hardcoded values, from ROI\n x1_left = int((y1_left - c_left) / slope_left)\n x2_left = int((y2_left - c_left) / slope_left)\n \n y1_right = 530 # again hardcoded values, from ROI\n y2_right = 330 # again hardcoded values, from ROI \n x1_right = int((y1_right - c_right) / slope_right)\n x2_right = int((y2_right - c_right) / slope_right)\n \n # (step 11) check if left/right line is out of ROI\n left_point1 = Point(x1_left, y1_left)\n left_point2 = Point(x2_left, y2_left)\n \n right_point1 = Point(x1_right, y1_right)\n right_point2 = Point(x2_right, y2_right)\n \n if polygon.contains(left_point1) and polygon.contains(left_point2):\n left_line_draw = True\n else:\n #print (\"left line out\", left_point1, left_point2)\n left_line_draw = False\n \n if polygon.contains(right_point1) and polygon.contains(right_point2):\n right_line_draw = True\n else:\n #print (\"right line out\", right_point1, right_point2)\n right_line_draw = False\n \n \n # (step 12) draw lines\n line_image = np.copy(image)\n # Draw the right and left lines on image\n if left_line_draw:\n cv2.line(line_image, (x1_left, y1_left), (x2_left, y2_left), (255,0,0),5)\n if right_line_draw:\n cv2.line(line_image, (x1_right, y1_right), (x2_right, y2_right), (255,0,0),5)\n \n # Create a \"color\" binary image to combine with line image\n color_edges = np.dstack((masked_edge, masked_edge, masked_edge)) \n \n # Draw the lines on the edge image\n lines_edges = cv2.addWeighted(color_edges, 0.4, line_image, 1, 0) \n #plt.imshow(lines_edges)\n #plt.show()\n return lines_edges", "def create_predictions_blob(self):\n self.human_blob = cv2.dnn.blobFromImage(cv2.resize(self.human_blob,\n (MODEL_INPUT_SIZE, MODEL_INPUT_SIZE)), 1.0 / 255,\n (MODEL_INPUT_SIZE, MODEL_INPUT_SIZE), (0, 0, 0),\n swapRB=True, crop=False)", "def hard_nms(box_scores, iou_threshold, top_k=-1, candidate_size=10):\n # TOP_K was originally -1, to keep all faces, but trying to filter\n # CANDIDATE_SIZE was originally 200, trying to limit # of faces\n scores = box_scores[:, -1]\n boxes = box_scores[:, :-1]\n picked = []\n indexes = np.argsort(scores)\n indexes = indexes[-candidate_size:]\n while len(indexes) > 0:\n current = indexes[-1]\n picked.append(current)\n if 0 < top_k == len(picked) or len(indexes) == 1:\n break\n current_box = boxes[current, :]\n\n indexes = indexes[:-1]\n rest_boxes = boxes[indexes, :]\n iou = iou_of(\n rest_boxes,\n np.expand_dims(current_box, axis=0),\n )\n indexes = indexes[iou <= iou_threshold]\n \n # additional method of discrimination, only the boxes\n # with the largest areas are selected\n new_boxes = box_scores[picked, :]\n areas = []\n for box in new_boxes:\n left_top = np.asarray([box[0], box[1]])\n right_bottom = np.asarray([box[2], box[3]])\n area = area_of(left_top, right_bottom)\n areas.append(area)\n areas = np.asarray(areas)\n biggest = np.argsort(areas)\n last_index = len(biggest) - 1\n middle = max(len(biggest)// 2, 1)\n size = min(middle, candidate_size / 2)\n \n final_boxes = []\n for i in range(size):\n final_boxes.append(new_boxes[biggest[last_index-i]])\n final_boxes = np.asarray(final_boxes)\n \n return final_boxes\n #return box_scores[picked, :]", "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n #boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n #count = int(get_output_tensor(interpreter, 3))\n\n #results = []\n #for i in range(count):\n # if scores[i] >= threshold:\n # result = {\n # #'bounding_box': boxes[i],\n # 'class_id': classes[i],\n # 'score': scores[i]\n # }\n # results.append(result)\n \n \n #print(\"detection results:\\n\" + str(results))\n #return results\n return np.array([int(_class) for _class in classes]), np.array(scores)", "def classify_breed(self, image: LoadedImage, animal: AnimalType, bbox: yolo.BoundBox, top_n: int) \\\n -> Dict[BreedName, float]:\n predict_utils = self.models[animal]\n # get sub-image\n cropped_image = image[bbox.ymin:bbox.ymax, bbox.xmin:bbox.xmax, :]\n new_image_data = misc.imresize(cropped_image, (IMG_SIZE, IMG_SIZE))\n # pass it to the breed classifier\n breed_names = predict_utils.breed_modeler.predict_one_loaded(new_image_data,\n predict_utils.model, predict_utils.cls_names,\n top_n)\n return breed_names", "def yolo_show_img(image, class_ids, boxes, labels, confidences, colors):\n for i, box in enumerate(boxes):\n # extract the bounding box coordinates\n (x, y) = (box[0], box[1])\n (w, h) = (box[2], box[3])\n\n # draw a bounding box rectangle and label on the image\n color = [int(c) for c in colors[class_ids[i]]]\n cv2.rectangle(image, (x, y), (x + w, y + h), color, 3)\n text = '{}: {:.4f}'.format(labels[i], confidences[i])\n print(text)\n\n font_scale = 1.3\n # set the rectangle background to white\n rectangle_bgr = color\n # set some text\n # get the width and height of the text box\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=1)[0]\n # set the text start position\n text_offset_x = x\n text_offset_y = y - 3 \n # make the coords of the box with a small padding of two pixels\n box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 10, text_offset_y - text_height - 10 ))\n cv2.rectangle(image, box_coords[0], box_coords[1], rectangle_bgr, cv2.FILLED)\n cv2.putText(image, text, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=font_scale, color=(255, 255, 255), thickness=2)\n\n cv2.imshow('yolo prediction', image)\n cv2.waitKey(0)", "def im_detect(net, target_data,im_data, im_info, features_given=True):\n\n cls_prob, rois = net(target_data, im_data, im_info,\n features_given=features_given)\n scores = cls_prob.data.cpu().numpy()[0,:,:]\n zs = np.zeros((scores.size, 1))\n scores = np.concatenate((zs,scores),1)\n boxes = rois.data.cpu().numpy()[0,:, :]\n\n return scores, boxes", "def im_detect_bbox_hflip(model, im, box_proposals=None):\n # Compute predictions on the flipped image\n im_hf = im[:, ::-1, :]\n im_width = im.shape[1]\n\n if not cfg.MODEL.FASTER_RCNN:\n box_proposals_hf = box_utils.flip_boxes(box_proposals, im_width)\n else:\n box_proposals_hf = None\n\n scores_hf, boxes_hf, im_scales = im_detect_bbox(\n model, im_hf, box_proposals_hf\n )\n\n # Invert the detections computed on the flipped image\n boxes_inv = box_utils.flip_boxes(boxes_hf, im_width)\n\n return scores_hf, boxes_inv, im_scales", "def getBoundingBoxes():\n allBoundingBoxes = BoundingBoxes()\n import glob\n import os\n # Read ground truths\n pth = \"../Wildtrack_dataset/annotations_positions/*.json\"\n files = glob.glob(pth)\n files.sort()\n # Class representing bounding boxes (ground truths and detections)\n allBoundingBoxes = BoundingBoxes()\n # Read GT detections from txt files\n # Each value of each line is \"class_id, x, y, width, height\" respectively\n # Class_id represents the class of the bounding box\n # x, y represents the most top-left coordinates of the bounding box\n # x2, y2 represents the most bottom-right coordinates of the bounding box\n for idx, f in enumerate(files):\n with open(f) as j:\n data = json.load(j)\n # print(f)\n for d in data:\n for v in d['views']:\n if v['viewNum'] != 0 or v['xmax'] == -1:\n continue\n\n idClass = 'person' # class\n x = float(v['xmin']) # confidence\n y = float(v['ymin'])\n w = float(v['xmax'])\n h = float(v['ymax'])\n bb = BoundingBox(\n str(idx),\n idClass,\n x,\n y,\n w,\n h,\n CoordinatesType.Absolute, (1920, 1080),\n BBType.GroundTruth,\n format=BBFormat.XYX2Y2)\n allBoundingBoxes.addBoundingBox(bb)\n \n\n\n # Read detections\n with open(\"wildtrack_yolo_tiny.out\", \"rb\") as fin:\n pred = pickle.load(fin)\n for idx, (k, value) in enumerate(pred.items()):\n for d in value:\n if d['tag'] != \"person\":\n continue\n \n box = d['box']\n idClass = 'person' # class\n x = float(box[0]) # confidence\n y = float(box[1])\n w = float(box[2])\n h = float(box[3])\n bb = BoundingBox(\n str(idx),\n idClass,\n x,\n y,\n w,\n h,\n CoordinatesType.Absolute, (1920, 1080),\n BBType.Detected,\n d['score'],\n format=BBFormat.XYX2Y2)\n allBoundingBoxes.addBoundingBox(bb)\n return allBoundingBoxes", "def __extract_rectangle(img: np.ndarray, rectangle: Rectangle):\n number_img = img[rectangle.y:rectangle.y+rectangle.h, rectangle.x:rectangle.x+rectangle.w]\n\n canvas = np.zeros((MNIST_PIXEL, MNIST_PIXEL), dtype=np.uint8)\n filled_max_side = MNIST_PIXEL - 4\n if number_img.shape[0] >= number_img.shape[1]:\n h_w_ratio = number_img.shape[0] / number_img.shape[1]\n\n if (max(int(filled_max_side / h_w_ratio), 1), filled_max_side) == (number_img.shape[1], number_img.shape[0]):\n number_img = cv2.resize(number_img, (number_img.shape[1] + 1, number_img.shape[0]))\n else:\n number_img = cv2.resize(number_img, (max(int(filled_max_side / h_w_ratio), 1), filled_max_side))\n\n x_start = int(MNIST_PIXEL / 2 - number_img.shape[1] / 2)\n x_end = x_start + number_img.shape[1]\n canvas[2:MNIST_PIXEL - 2, x_start:x_end] = number_img\n else:\n h_w_ratio = number_img.shape[0] / number_img.shape[1]\n\n if (filled_max_side, max(int(filled_max_side * h_w_ratio), 1)) == (number_img.shape[1], number_img.shape[0]):\n number_img = cv2.resize(number_img, (number_img.shape[1], number_img.shape[0] + 1))\n else:\n number_img = cv2.resize(number_img, (filled_max_side, max(int(filled_max_side * h_w_ratio), 1)))\n\n y_start = int(MNIST_PIXEL / 2 - number_img.shape[0] / 2)\n y_end = y_start + number_img.shape[0]\n canvas[y_start:y_end, 2:MNIST_PIXEL - 2] = number_img\n\n return canvas", "def encode_bboxes(ann, bboxes, img_name):\n\n ann_root = ann.getroot()\n\n folder = ET.Element(\"folder\")\n folder.text = ann_root.find('folder').text\n filename = ET.Element(\"filename\")\n filename.text = img_name\n path = ET.Element(\"path\")\n path.text = ann_root.find('folder').text + '/' + img_name\n source = ET.Element(\"source\")\n database = ET.Element(\"database\")\n database.text = ann_root.find(\"source\").find('database').text\n source.append(database)\n size = ET.Element(\"size\")\n width = ET.Element(\"width\")\n width.text = ann_root.find(\"size\").find('width').text\n height = ET.Element(\"height\")\n height.text = ann_root.find(\"size\").find('height').text\n depth = ET.Element(\"depth\")\n depth.text = ann_root.find(\"size\").find('depth').text\n size.append(width)\n size.append(height)\n size.append(depth)\n segmented = ET.Element(\"segmented\")\n segmented.text = ann_root.find('segmented').text\n\n new_root = ET.Element(\"annotation\")\n new_root.append(folder)\n new_root.append(filename)\n new_root.append(path)\n new_root.append(source)\n new_root.append(size)\n new_root.append(segmented)\n\n for b in bboxes:\n xmin = ET.Element(\"xmin\")\n xmin.text = str(int(b[0]))\n ymin = ET.Element(\"ymin\")\n ymin.text = str(int(b[1]))\n xmax = ET.Element(\"xmax\")\n xmax.text = str(int(b[2]))\n ymax = ET.Element(\"ymax\")\n ymax.text = str(int(b[3]))\n name = ET.Element(\"name\")\n name.text = self.classes[int(b[4])]\n bndbox = ET.Element(\"bndbox\")\n bndbox.append(xmin)\n bndbox.append(ymin)\n bndbox.append(xmax)\n bndbox.append(ymax)\n pose = ET.Element(\"pose\")\n truncated = ET.Element(\"truncated\")\n difficult = ET.Element(\"difficult\")\n pose.text = \"Unspecified\"\n truncated.text = \"0\"\n difficult.text = \"0\"\n obj = ET.Element(\"object\")\n obj.append(name)\n obj.append(pose)\n obj.append(truncated)\n obj.append(difficult)\n obj.append(bndbox)\n\n new_root.append(obj)\n\n new_tree = ET.ElementTree(new_root)\n\n return new_tree", "def bbox_eval(results,\n class_num,\n overlap_thresh=0.5,\n map_type='11point',\n is_bbox_normalized=False,\n evaluate_difficult=False):\n assert 'bbox' in results[0]\n logger.info(\"Start evaluate...\")\n\n detection_map = DetectionMAP(\n class_num=class_num,\n overlap_thresh=overlap_thresh,\n map_type=map_type,\n is_bbox_normalized=is_bbox_normalized,\n evaluate_difficult=evaluate_difficult)\n\n for t in results:\n bboxes = t['bbox'][0]\n bbox_lengths = t['bbox'][1][0]\n\n if bboxes.shape == (1, 1) or bboxes is None:\n continue\n gt_boxes = t['gt_bbox'][0]\n gt_labels = t['gt_class'][0]\n difficults = t['is_difficult'][0] if not evaluate_difficult \\\n else None\n\n if len(t['gt_bbox'][1]) == 0:\n # gt_bbox, gt_class, difficult read as zero padded Tensor\n bbox_idx = 0\n for i in range(len(gt_boxes)):\n gt_box = gt_boxes[i]\n gt_label = gt_labels[i]\n difficult = None if difficults is None \\\n else difficults[i]\n bbox_num = bbox_lengths[i]\n bbox = bboxes[bbox_idx:bbox_idx + bbox_num]\n gt_box, gt_label, difficult = prune_zero_padding(\n gt_box, gt_label, difficult)\n detection_map.update(bbox, gt_box, gt_label, difficult)\n bbox_idx += bbox_num\n else:\n # gt_box, gt_label, difficult read as LoDTensor\n gt_box_lengths = t['gt_bbox'][1][0]\n bbox_idx = 0\n gt_box_idx = 0\n for i in range(len(bbox_lengths)):\n bbox_num = bbox_lengths[i]\n gt_box_num = gt_box_lengths[i]\n bbox = bboxes[bbox_idx:bbox_idx + bbox_num]\n gt_box = gt_boxes[gt_box_idx:gt_box_idx + gt_box_num]\n gt_label = gt_labels[gt_box_idx:gt_box_idx + gt_box_num]\n difficult = None if difficults is None else \\\n difficults[gt_box_idx: gt_box_idx + gt_box_num]\n detection_map.update(bbox, gt_box, gt_label, difficult)\n bbox_idx += bbox_num\n gt_box_idx += gt_box_num\n\n logger.info(\"Accumulating evaluatation results...\")\n detection_map.accumulate()\n map_stat = 100. * detection_map.get_map()\n logger.info(\"mAP({:.2f}, {}) = {:.2f}%\".format(overlap_thresh, map_type,\n map_stat))\n return map_stat", "def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])", "def predict(frame):\n cv_net = cv2.dnn.readNetFromTensorflow(PATH_TO_MODEL_WEIGHTS, PATH_TO_GRAPH)\n labels = coco_label_reader(PATH_TO_LABELS)\n\n rows, cols, _ = frame.shape\n blob = cv2.dnn.blobFromImage(frame, size=(rows, cols), swapRB=True, crop=False)\n cv_net.setInput(blob)\n cv_out = cv_net.forward()\n boxes = []\n classes = []\n for detection in cv_out[0, 0, :, :]:\n score = float(detection[2])\n if score > 0.3:\n left = detection[3] * cols\n top = detection[4] * rows\n right = detection[5] * cols\n bottom = detection[6] * rows\n class_ = int(detection[1])\n if left > right:\n left, right = right, left\n if top > bottom:\n top, bottom = bottom, top\n boxes.append([left, top, right, bottom])\n classes.append(labels[class_])\n return non_max_suppression(np.asarray(boxes), np.asarray(classes))", "def binarize(img):\n image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n image = cv.GaussianBlur(image, (3, 3), 0)\n ret, image = cv.threshold(image, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n return image", "def _predict_image(im, net, transformer):\n\n net.blobs[\"data\"].data[...] = transformer.preprocess(\"data\", im)\n out = net.forward()\n\n probs = out[\"prob\"][0]\n prob_cloud = probs[1] * 100.0\n return prob_cloud", "def get_upper_body_box(self, img_w, img_h):\n\n if not (img_w > 0 and img_h > 0):\n raise Exception(\"img size should be positive\")\n\n _NOSE = CocoPart.Nose.value\n _NECK = CocoPart.Neck.value\n _RSHOULDER = CocoPart.RShoulder.value\n _LSHOULDER = CocoPart.LShoulder.value\n _THRESHOLD_PART_CONFIDENCE = 0.3\n parts = [part for idx, part in self.body_parts.items() if part.score > _THRESHOLD_PART_CONFIDENCE]\n part_coords = [(img_w * part.x, img_h * part.y) for part in parts if\n part.part_idx in [0, 1, 2, 5, 8, 11, 14, 15, 16, 17]]\n\n if len(part_coords) < 5:\n return None\n\n # Initial Bounding Box\n x = min([part[0] for part in part_coords])\n y = min([part[1] for part in part_coords])\n x2 = max([part[0] for part in part_coords])\n y2 = max([part[1] for part in part_coords])\n\n # # ------ Adjust heuristically +\n # if face points are detcted, adjust y value\n\n is_nose, part_nose = _include_part(parts, _NOSE)\n is_neck, part_neck = _include_part(parts, _NECK)\n torso_height = 0\n if is_nose and is_neck:\n y -= (part_neck.y * img_h - y) * 0.8\n torso_height = max(0, (part_neck.y - part_nose.y) * img_h * 2.5)\n #\n # # by using shoulder position, adjust width\n is_rshoulder, part_rshoulder = _include_part(parts, _RSHOULDER)\n is_lshoulder, part_lshoulder = _include_part(parts, _LSHOULDER)\n if is_rshoulder and is_lshoulder:\n half_w = x2 - x\n dx = half_w * 0.15\n x -= dx\n x2 += dx\n elif is_neck:\n if is_lshoulder and not is_rshoulder:\n half_w = abs(part_lshoulder.x - part_neck.x) * img_w * 1.15\n x = min(part_neck.x * img_w - half_w, x)\n x2 = max(part_neck.x * img_w + half_w, x2)\n elif not is_lshoulder and is_rshoulder:\n half_w = abs(part_rshoulder.x - part_neck.x) * img_w * 1.15\n x = min(part_neck.x * img_w - half_w, x)\n x2 = max(part_neck.x * img_w + half_w, x2)\n\n # ------ Adjust heuristically -\n\n # fit into the image frame\n x = max(0, x)\n y = max(0, y)\n x2 = min(img_w - x, x2 - x) + x\n y2 = min(img_h - y, y2 - y) + y\n\n if _round(x2 - x) == 0.0 or _round(y2 - y) == 0.0:\n return None\n return {\"x\": _round((x + x2) / 2),\n \"y\": _round((y + y2) / 2),\n \"w\": _round(x2 - x),\n \"h\": _round(y2 - y)}" ]
[ "0.6780052", "0.6780052", "0.66179", "0.66036594", "0.6570653", "0.6464624", "0.6442197", "0.6430756", "0.63636744", "0.63617635", "0.629576", "0.6279707", "0.62130153", "0.6206307", "0.61893594", "0.61628205", "0.61508197", "0.6141496", "0.61407137", "0.61125773", "0.60966784", "0.6081802", "0.60804707", "0.60784763", "0.60652983", "0.60580313", "0.60145116", "0.6004682", "0.60036", "0.60027736", "0.5998906", "0.5993765", "0.5971346", "0.596498", "0.59589523", "0.59519523", "0.59412706", "0.5924828", "0.5922146", "0.5901741", "0.58879906", "0.5886848", "0.5869505", "0.58563286", "0.58422536", "0.5829505", "0.58224887", "0.582044", "0.58116794", "0.5806035", "0.5801506", "0.5799837", "0.5799019", "0.5794597", "0.5793048", "0.57927924", "0.57841146", "0.5777607", "0.57768875", "0.5774142", "0.5763937", "0.5763394", "0.5759403", "0.5756348", "0.5754179", "0.57540727", "0.57351303", "0.5734775", "0.5719478", "0.57117164", "0.5711409", "0.5697293", "0.5694198", "0.569294", "0.5687946", "0.5680407", "0.5678266", "0.5675781", "0.5671001", "0.56642914", "0.5663105", "0.5662965", "0.5659454", "0.56539834", "0.5653627", "0.56520724", "0.5622078", "0.5620352", "0.56187934", "0.56184", "0.56168324", "0.5616195", "0.5614753", "0.56146723", "0.5612598", "0.56105673", "0.55951506", "0.5589104", "0.55834806", "0.55808425", "0.5579692" ]
0.0
-1
loop through a dir and run all images
def predict_all_images(): #Read config config = read_config() #read model model = read_model(config["model_path"], config) tifs = glob.glob(os.path.join("data","**","*.tif")) for tif in tifs: print(tif) prediction = predict_image(model, tif, score_threshold = 0.1, max_detections= 200,return_plot=False) #reshape and save to csv df = pd.DataFrame(prediction) df.columns = ["xmin","ymin","xmax","ymax"] #save boxes file_path = os.path.splitext(tif)[0] + ".csv" df.to_csv(file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def display_imgs(img_dir,img_list):\n for img in img_list:\n display_img(img_dir, img)", "def process_files(self):\n for filename in self.temp_directory.iterdir():\n im = Image.open(str(filename))\n scaled = im.resize((640, 480))\n scaled.save(str(filename))", "def load_images(self, folder):\n cwd = os.getcwd()\n dir = cwd + '/' + folder\n files = os.listdir(dir)\n for file in files:\n img = pygame.image.load(dir + '/' + file)\n self.images.append(img)", "def main():\n base_dir = '/home/sjimenez/imagenes_prueba'\n out_dir = '/home/sjimenez/easy_analysis'\n for _, _, files in os.walk(base_dir, topdown=False):\n for f in files:\n print('--------- {} ---------'.format(f))\n act_dir = osp.join(base_dir, f)\n act_im = cv2.imread(act_dir)\n if act_im is not None:\n get_image_stats(act_im, out_dir, f)\n else:\n print('Not able to open the image')", "def run_yolo_indir(images_path):\n for filename in os.listdir(images_path):\n try:\n # print(filename)\n Image.open(os.path.join(images_path, filename))\n test_detector(b'cfg/voc.data', b'cfg/yolo.cfg', b'yolo.weights', os.path.join(\n images_path, filename).encode('utf-8'), parameters.YOLO_THRES, 0.5)\n w, h, o = read_bounding_boxes('bounding_boxes.txt')\n crop_all_bounding_boxes(o, filename, os.path.join, images_path)\n except:\n print('Cannot test image', filename)\n continue", "def main(vis_dirs, outdir):\n assert len(vis_dirs) == 4\n\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n\n for i, filename in enumerate(tqdm(os.listdir(vis_dirs[-1]))):\n # if i % 100 == 0:\n # print(i)\n\n files = [os.path.join(vis_dir, filename) for vis_dir in vis_dirs]\n outimg = os.path.join(outdir, filename)\n merge_four_images(files, outimg)\n\n print (\"Finished! Result dir is %s\" % outdir)", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def _process_images(self, docname: pathlib.Path, images: List[nodes.image]) -> None:\n logger.debug(\"[nbtutorial]: Processing images for %s\", docname)\n\n if len(images) == 0:\n return\n\n img_dir = pathlib.Path(self.outdir, docname.parent, RESOURCE_DIR)\n\n if not img_dir.exists():\n img_dir.mkdir(parents=True)\n\n for img in images:\n fname = pathlib.Path(img[\"uri\"]).name\n\n source = pathlib.Path(self.app.confdir, img[\"uri\"])\n destination = pathlib.Path(img_dir, fname)\n\n shutil.copy(source, destination)", "def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)", "def load_images(subdir):\n with perform(\n name='dbutils load_images',\n before='Loading images to gallery',\n fail='Error occured while loading images to gallery',\n after='Images succesfully loaded'\n ):\n load_dummy_images(subdir)", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def bulk_process_images(inputpath, outputpath, extension):\n\n for dirpath, dirnames, filenames in os.walk(inputpath):\n structure = os.path.join(outputpath, dirpath[len(inputpath) + 1:])\n for file in filenames:\n if file.endswith(extension):\n src = os.path.join(dirpath, file)\n dest = os.path.join(structure, file)\n img = load_and_preprocess_image(src)\n cv2.imwrite(dest, img)", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def walk_through_dir(dir_path):\n for dirpath, dirnames, filenames in os.walk(dir_path):\n print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")", "def open_images_in(directory):\n\n files = [\n filename\n for filename in os.listdir(directory)\n if \"_\" in filename and not filename.startswith(\"joined\")\n ]\n tiles = []\n if len(files) > 0:\n i = 0\n for file in files:\n pos = get_image_column_row(file)\n im = Image.open(os.path.join(directory, file))\n\n position_xy = [0, 0]\n count = 0\n for a, b in zip(pos, im.size):\n position_xy[count] = a * b\n count = count + 1\n tiles.append(\n Tile(\n image=im,\n position=pos,\n number=i + 1,\n coords=position_xy,\n filename=file,\n )\n )\n i = i + 1\n return tiles", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def walk_through_dir(dir_path):\n for dirpath, dirnames, filenames in os.walk(dir_path):\n print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")", "def process(directory):\n files = []\n\n options = [\"Load\", \"Create\"]\n choice = options[int(ui.prompt(options=options))]\n\n for item in os.listdir(directory):\n if os.path.isfile(os.path.join(directory, item)):\n filename = os.path.join(directory, item)\n if choice == \"Load\" and item.endswith(\".png\"):\n files.append(filename)\n elif choice == \"Create\" and item.endswith(\".file\"):\n files.append(filename)\n\n filenames, pageNames = imagePages(files, choice)\n \n targets = [name.split('/')[-1][:5] for name in filenames]\n return pageNames, targets, filenames", "def _iter_images(self):\n raise NotImplementedError", "def populate_image_lists():\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_a\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_a.append(path.path)\r\n with os.scandir(os.path.join(dir_path, \"inputs\", \"type_b\")) as filepaths:\r\n for path in filepaths:\r\n extension = os.path.splitext(path)[1].lower()\r\n if extension == \".png\" or extension == \".jpg\":\r\n images_b.append(path.path)", "def list_images(path=['.']):\n for image_dir in set(path):\n if not os.path.isdir(image_dir):\n continue\n for filename in os.listdir(image_dir):\n bname, ext = os.path.splitext(filename)\n if ext.lower() not in VALID_IMAGE_EXTS:\n continue\n\n filepath = os.path.join(image_dir, filename)\n yield strutils.decode(filepath)", "def run_images_analysis(filepath, ID, method):\n for path in filepath:\n try:\n Image.open(path)\n except IOError:\n msg = 'Please import images files, or just a single zip archive'\n else:\n filename, extension = get_file_name(path)\n\n # Save raw image to database\n msg = client.upload_file(ID, filename, extension, path)\n\n err, msg = check_msg(msg)\n\n if err is False: # if no error in uploading image\n # Request to process image\n client.process_image(ID, filename, method)\n return msg", "def createAllImageFiles(poly, name) :\n \n for i in range(len(poly.getPaths())):\n fileName = name + \"_\" + str(i) + \".dot\"\n imgName = name + \"_\" + str(i) + \".jpg\"\n \n Command = \"neato -Tjpeg \" + fileName + \" -o \" + imgName\n run(Command, shell=True)", "def index_files():\n\n print(\"Indexing files\")\n\n for root, _, files in os.walk(image_directory):\n for item in files:\n for file_type in file_types:\n if file_type in item:\n images_in_directory.append(os.path.join(root, item))\n\n print(f'Finished indexing {len(images_in_directory)} files')\n\n pass", "def process_imgdir(self,imgdir):\n #Write images into resultdir\n resultdir = os.path.join(imgdir, 'results')\n #Read images from input dir\n inputdir = os.path.join(imgdir, 'inputs')\n shutil.rmtree(resultdir)\n os.mkdir(resultdir)\n #Read files from input images\n for fullname in os.listdir(inputdir):\n filepath = os.path.join(inputdir, fullname)\n if os.path.isfile(filepath):\n basename = os.path.basename(filepath)\n image = cv2.imread(filepath, cv2.IMREAD_COLOR)\n if len(image.shape) == 3 and image.shape[2] == 3:\n print('Processing %s ...' % basename)\n else:\n sys.stderr.write('Skipping %s, not RGB' % basename)\n continue\n #Extract haze from the scene and then save the image\n dehazed = self.get_scene_radiance(image)\n cv2.imwrite(os.path.join(resultdir, basename), dehazed)\n return os.path.join(resultdir, basename)", "def loop_dir(dir_name: str, graph_ext: str) -> None:\n directory = fsencode(dir_name)\n for file in listdir(directory):\n filename = fsdecode(file)\n if filename.endswith(graph_ext):\n draw_graph(filename)", "def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def image_iter() -> iter:\r\n return ('Images/' + image for image in IMAGES)", "def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs", "def list_images(img_dir) -> Iterable[str]:\n extensions = (\".png\", \".jpg\", \".jpeg\", \".tif\", \".tiff\")\n\n paths = Path(img_dir).glob(\"**/*\")\n paths = filter(lambda p: p.is_file() and p.suffix.lower() in extensions, paths)\n return (str(p) for p in paths)", "def preprocessfolder(self):\n imgs, _ = getFilesAndHdf(str(self.in_directory.text()))\n self.img_list = sorted(imgs)\n self.updateImageGroups()", "def main():\n argvs = sys.argv\n argc = len(argvs)\n if argc == 1:\n print('usage: convert2png.py <path/to/*.ppm> ...')\n sys.exit(1)\n\n os.makedirs('result/convert2png', exist_ok=True)\n\n for i in range(1, argc):\n img = cv2.imread(argvs[i])\n\n # root, ext = os.path.splitext(argvs[i])\n # cv2.imwrite(root + '.png', img)\n\n root, ext = os.path.splitext(argvs[i])\n strImgName = root.split('/')[-1]\n cv2.imwrite('result/convert2png/' + strImgName + '.png', img)", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def main():\n print(\"For each image, type the new name of the file.\" +\n \" No extension necessary!\", end=\"\\n\\n\")\n file_list = input_path.glob(f\"*.{args.ext}\")\n plt.ion()\n\n for pic in file_list:\n img = io.imread(str(pic))\n img = rescale(img, 0.25)\n img = rotate(img, 90, resize = True)\n plt.draw()\n plt.pause(0.001)\n if args.vinyl:\n new_name = get_vinyl_name()\n else:\n print(\"\\n\")\n new_name = input(\n \"Please enter a new filename. Press [enter] to skip: \")\n if new_name:\n if not new_name.endswith(args.ext):\n new_name += \".\" + args.ext\n # io.imsave(output_path / new_name, img)\n shutil.copyfile(pic, output_path / new_name)\n if args.replace:\n os.remove(pic)", "def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list", "def fastset(self):\n for d in dirlist(os.path.join(self.datadir)):\n for f in imlist(d):\n yield ImageDetection(filename=f, category=filebase(d))", "def getimgs():", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def image_generator(img_list):\n while True:\n img = random.choice(img_list)\n label = os.path.basename(os.path.dirname(img)) # add label function according to the dataset tree\n img = preprocess_image(img)\n yield img, label", "def main():\n os.makedirs(\"./img/event_generated/\", exist_ok=True)\n for category in categories():\n render_icon(category + \".png\")", "def load_images(path):\n images = []\n images_names = []\n \n for file_name in os.listdir(path):\n image_name = file_name\n images_names.append(image_name)\n images_names = sorted(images_names) #use sort to insure linux file sys behaves\n print(images_names) #check for proper order\n\n for file_name in images_names:\n image = pygame.image.load(path + os.sep + file_name).convert()\n images.append(image)\n return images", "def load_images_from_folder(folder):\n images = []\n for filename in os.listdir(folder):\n img = Image.open(os.path.join(folder,filename))\n images.append(img)\n return images", "def main():\n \n # for inserting other images, add tem to /input folder and list them here\n images = (\n 'image-0',\n 'image-1',\n 'image-2'\n )\n\n for image_name in images:\n print(image_name, \"image:\")\n\n image = open_image(image_name)\n display_image(image, \"Original input \" + image_name)\n\n grayscale_v = transform_colors(image)\n display_image(grayscale_v[:,:,0], \"Grayscale \" + image_name)\n save_image(image_name + \"-grayscale\", grayscale_v[:,:,0])\n\n contours_v, contours = get_contours(grayscale_v)\n display_image(contours_v, \"Contours \" + image_name)\n save_image(image_name + \"-contours\", contours_v)\n\n labeled_img, areas = get_measures(image, contours[1:])\n display_image(labeled_img, \"Labeled \" + image_name)\n save_image(image_name + \"-labeled\", labeled_img)\n\n areas_histogram(areas, image_name)", "def load_images(image_name_to_label):\n images = []\n labels = []\n\n image_names = os.listdir(DEFAULT_IMG_PATH_EDITED)\n\n # Remove directories\n image_names.remove(\"COVID-19\")\n image_names.remove(\"Normal\")\n image_names.remove(\"ViralPneumonia\")\n\n # Load images from specific image directories (COVID-19, normal, viral pneumonia)\n def load_directory(directory):\n notifier.send(\" Loading from directory: \" + directory + \"...\")\n directory_path = DEFAULT_IMG_PATH_EDITED + os.sep + directory\n directory_image_names = os.listdir(directory_path)\n for i, image_name in enumerate(directory_image_names):\n base_image_name = get_base_image_name(image_name)\n query_name = directory + \"/\" + base_image_name\n query_name = query_name.lower().replace(\" \", \"\")\n if query_name in image_name_to_label:\n print(f\" {i / len(directory_image_names) * 100}% - [{image_name}]\")\n image_path = directory_path + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[query_name])\n load_directory(\"COVID-19\")\n load_directory(\"Normal\")\n load_directory(\"ViralPneumonia\")\n\n # Load images from default directory\n if LOAD_ALL_IMAGES:\n notifier.send(\" Loading from directory: default...\")\n for i, image_name in enumerate(image_names):\n base_image_name = get_base_image_name(image_name)\n if base_image_name in image_name_to_label:\n print(f\" {i / len(image_names) * 100}% - [{image_name}]\")\n image_path = DEFAULT_IMG_PATH_EDITED + os.sep + image_name\n image = get_processed_image(image_path)\n images.append(image)\n labels.append(image_name_to_label[base_image_name])\n\n return images, labels", "def _iterate_over_files(self):\n stats = Statistics()\n\n args = arguments.Args()\n\n for file in args.files:\n\n if isimage(file):\n before_size = stats.calculate_before_optimization(file)\n\n puts(\"%s %s\" % (\n e(\"==>\"),\n os.path.basename(file))\n )\n\n if \"--lossy\" in args.flags:\n Optimize.lossy(file)\n if \"--lossless\" in args.flags:\n Optimize.lossless(file)\n after_size = stats.calculate_after_optimization(file)\n\n puts(\"%s %s (%s)\" % (\n p(\"<==\"),\n os.path.basename(file),\n s(after_size) if after_size < before_size else after_size\n ))\n\n stats.show_statistics()", "def getImages(path):\n files = list()\n\n for f in listdir(path):\n file = join(path, f)\n if isfile(file):\n files.append(getImage(file))\n\n return files", "def process_images():\n image_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/img/')\n static_images = os.path.join(settings.BASE_DIR, 'static/CMESH/img/')\n\n copy_files(image_path, static_images)", "def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape", "def get_images(directory=None): #import from mask.py\n \n if directory == None:\n directory = os.getcwd() # Use working directory if unspecified\n \n image_list = [] # Initialize aggregaotrs\n file_list = []\n \n directory_list = os.listdir(directory) # Get list of files\n for entry in directory_list:\n absolute_filename = os.path.join(directory, entry)\n try:\n image = PIL.Image.open(absolute_filename)\n file_list += [entry]\n image_list += [image]\n except IOError:\n pass # do nothing with errors tying to open non-images\n return image_list, file_list", "def _locate_images(self):\r\n extensions = '|'.join(self.valid_extensions)\r\n extension_re = re.compile('.+\\.(%s)$' % extensions, re.IGNORECASE)\r\n files = sorted(os.listdir(self.path))\r\n\r\n images = []\r\n for root, dirs, files in os.walk(self.path, followlinks=self.config['follow_links']):\r\n for filename in sorted(files):\r\n if not filename.startswith('.') and extension_re.match(filename):\r\n images.append(Image(path=os.path.join(root, filename), config=self.config))\r\n if not self.config['recursive']:\r\n break\r\n\r\n if not images:\r\n raise SourceImagesNotFoundError(self.path)\r\n\r\n images = sorted(images, reverse=self.config['algorithm_ordering'][0] != '-')\r\n\r\n return images", "def show_files(file_locations):\n for file_loc in file_locations:\n show_image(file_loc)", "def load_images(self, image_paths):\n \n fill_list = []\n \n for idx in tqdm(range(len(image_paths))):\n path = image_paths[idx]\n yield cv2.imread(path)", "def do_stage(self, images):\n\n for i, image in enumerate(images):\n pass\n # logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)", "def _iter_images(self):\n for image in self._images:\n yield image", "def directory_walker(start_dir):\n\n for root, dirs, files in os.walk(os.path.expanduser(start_dir)):\n for f in files:\n filename = os.path.join(root, f)\n # Only process if its a type of image\n file_type = mimetypes.guess_type(filename.lower())[0]\n if file_type is not None and file_type.startswith('image/'):\n yield filename", "async def extractimages(self, ctx):\n if self.extract_images_running:\n await ctx.send(inline('Extract images already running'))\n return\n\n event_loop = asyncio.get_event_loop()\n running_load = event_loop.run_in_executor(self.executor, self.do_extract_images)\n\n self.extract_images_running = True\n await ctx.send(inline('Running image extract pipeline: this could take a while'))\n await running_load\n self.extract_images_running = False\n await ctx.send(inline('Image extract finished'))", "def load_pic_in_directory(directory):\n return [Image.open(os.path.join(directory, img)) for img in os.listdir(directory)]", "def scale_all_images(image_dir, ratio):\n pool = Pool(1)\n pool.starmap(scale_image, zip(\n image_dir, itertools.repeat(ratio)))\n pool.close()\n pool.join()", "def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def create_image_lists(image_dir):\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n print('in sub loop')\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(image_dir)\n print(\"Looking for images in '\" + image_dir + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n testing_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n testing_images.append(base_name)\n return testing_images", "def get_images(fish):\n fish_dir = TRAIN_DIR+'{}'.format(fish)\n images = [fish+'/'+im for im in os.listdir(fish_dir)]\n return images", "def load_from_folder(path):\n images = []\n files = os.listdir(path)\n files.sort()\n for file in tqdm(files):\n images.append(io.imread(path + file))\n return images", "def preprocess_images(file_path, new_file_path):\n if not os.path.isdir(new_file_path):\n os.mkdir(new_file_path)\n i = 0\n for dir in listdir(file_path):\n j = 0\n for image_path in listdir(file_path + '/' + dir):\n image = open_image(image_path)\n cv2.imwrite(file_path + '/' + image_path + '/' str(i) + '/' +str(i) + '.jpg', image)\n j += 1\n i += 1", "def createAverageImages(self):\n for grabber in self.grabbers:\n callsign = grabber[\"ID\"]\n callMatch = \"%s/%s*\" % (self.downloadFolder, callsign)\n fnameOut = \"%s/%s.%s.jpg\" % (self.averagesFolder, callsign, self.timeCode())\n cmd = \"convert %s -evaluate-sequence Mean %s\" %(callMatch, fnameOut)\n print(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()", "def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def execute(args, **kwargs):\n p = set_options()\n a = p.parse_args(args)\n # logging.info(str(a))\n\n ifiles = ImageFiles(a)\n\n if a.info:\n ifiles.describe()\n else:\n ifiles.write()", "def search_images(\n current_dir: str,\n exts={\"jpg\", \"png\", \"jpeg\", \"gif\"}\n) -> typing.Iterable[typing.Tuple[str, str]]:\n for root, _, files in os.walk(current_dir):\n for file_name in files:\n ext = file_name.rsplit('.', 1)[-1].lower()\n if ext in exts:\n yield os.path.join(root, file_name), file_name", "def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)", "def read_images(folder):\n distinct_frames = DistinctFrames()\n\n for file in sorted(sorted(os.listdir(folder)),\n key=len): # sorting files on basis of 1) length and 2) numerical order\n '''\n Sorting is done 2 times because\n if files in the folder are\n 1. image100.pkl\n 2. image22.pkl\n 3. image21.pkl\n firstly sort them to image100.pkl,image21.pkl,image22.pkl then according to length to image21.pkl,image22.pkl,image100.pkl\n '''\n try:\n img_obj = load_from_memory(file, folder)\n time_stamp = img_obj.get_time()\n distinct_frames.add_img_obj(img_obj)\n print(\"Reading image ..\" + str(time_stamp) + \" from \" + folder) # for debug purpose\n except:\n # exception will occur for files like .DS_Store and jpg directory\n continue\n\n if distinct_frames.no_of_frames() != 0:\n distinct_frames.calculate_time()\n\n return distinct_frames", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n self.images = []\n self.display_match = False\n self.useBlending = False\n print('found %d images' % len(self.files))", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def __init__(self, data_dir, file_prefix, num_images):\n self.file_prefix = file_prefix\n self.files = [os.path.join(data_dir, '%s%03d.jpg' % (file_prefix, i + 1)) for i in range(num_images)]\n self.files = list(filter(os.path.exists, self.files))\n self.panoramas = None\n self.homographies = None\n print('found %d images' % len(self.files))", "def load_images(self,im_paths,imlist,im_index):\n\n\t\timlist_arr = []\n\t\tj = 0\n\t\tfor im_path in im_paths:\n\t\t\tim = None\n\n\t\t\ttry:\n\t\t\t\tim = Image.open(im_path)\n\t\t\t\t#im = imread(im_path)\n\t\t\t\t#print im.shape\n\t\t\texcept Exception, e:\n\t\t\t\tprint e\n\t\t\t\n\t\t\tif im != None:\n\t\t\t\ttry:\n\t\t\t\t\tim_aux = np.array(im,dtype=theano.config.floatX)\n\t\t\t\t\tim_converted = True\n\t\t\t\texcept TypeError, e:\n\t\t\t\t\tim_converted = False\n\t\t\t\t\tprint e\n\t\t\t\t\n\t\t\t\tif im_converted == True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif im_aux.shape[2] == 4:\n\t\t\t\t\t\t\tbackground = Image.new(\"RGB\", im.size, (255, 255, 255))\n\t\t\t\t\t\t\tbackground.paste(im, mask=im.split()[3]) # 3 is the alpha channel\n\t\t\t\t\t\t\tim = background\n\t\t\t\t\t\t\tim_aux = np.array(background,dtype=theano.config.floatX)\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\tprint e\n\t\t\t\t\t\n\t\t\t\t\ttry:\n\n\t\t\t\t\t\tif im_aux.shape[2] == 3:\n\t\t\t\t\t\t\tbn_parsed = os.path.basename(im_path).split(\"_\")\n\t\t\t\t\t\t\tim_id = int(bn_parsed[0])\n\t\t\t\t\t\t\t#print im_id\n\t\t\t\t\t\t\t#Ignore potential duplicates\n\t\t\t\t\t\t\t#if im_id not in self.im_index:\n\t\t\t\t\t\t\tif im_id not in im_index:\n\t\t\t\t\t\t\t\tim_aux = self.scale_and_crop_img(im)\n\t\t\t\t\t\t\t\t# This is for multiprocessing\n\t\t\t\t\t\t\t\tim_index.append(im_id)\n\t\t\t\t\t\t\t\timlist.append(np.asarray(im_aux))\n\n\t\t\t\t\t\t\t\t# Uncomment this if you are not using multiprocessing\n\t\t\t\t\t\t\t\t# self.im_index.append(im_id)\n\t\t\t\t\t\t\t\t# self.imlist.append(np.asarray(im_aux))\n\t\t\t\t\t\t\t\t#self.imlist.append(im_aux)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"invalid image: {} size:{}\".format(im.filename, im_aux.shape)\n\t\t\n\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t#raise e\n\t\t\t\t\t\tprint e\n\t\n\t\t\t# if self.verbose:\n\t\t\t# \tsys.stdout.write(\"\\r Process: {0}/{1}\".format(j, len(im_paths)))\n\t\t\t# \tsys.stdout.flush()\n\n\t\t\tj += 1", "def load_images(folder_path, num_images):\n imgs = np.zeros(shape=[num_images, 400, 400, 3])\n for i in range(1, num_images + 1):\n image_name = \"satImage_%.3d\" % i\n image_path = folder_path + image_name + \".png\"\n if os.path.isfile(image_path):\n print('Loading ' + image_path)\n img = mpimg.imread(image_path)\n\n #imgs[i - 1] = np.asarray(img).reshape(400, 400, 3)\n imgs[i - 1] = img.reshape(400, 400, 3)\n else:\n print('File ' + image_path + ' does not exist')\n return imgs", "def __call__(self, images, targets):\n pass", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def readImages(image_dir):\n extensions = ['bmp', 'pbm', 'pgm', 'ppm', 'sr', 'ras', 'jpeg',\n 'jpg', 'jpe', 'jp2', 'tiff', 'tif', 'png']\n\n search_paths = [os.path.join(image_dir, '*.' + ext) for ext in extensions]\n image_files = sorted(sum(map(glob, search_paths), []))\n images = [cv2.imread(f, cv2.IMREAD_UNCHANGED | cv2.IMREAD_COLOR) for f in image_files]\n\n bad_read = any([img is None for img in images])\n if bad_read:\n raise RuntimeError(\n \"Reading one or more files in {} failed - aborting.\"\n .format(image_dir))\n\n return images", "def process_images(image_folder: Path) -> List[Dict]:\n images = []\n files = image_folder.glob(\"*.jpg\")\n\n for file_path in files:\n file_name = file_path.name\n file_id = file_name.split(\".jpg\")[0]\n file_id = file_id.split(\"in\")[-1]\n file_id = int(file_id)\n file_id = f\"{file_path.parent.parent.name}_{str(file_id)}\"\n\n width, height = imagesize.get(str(file_path))\n\n image_data = {\"id\": file_id,\n \"width\": width,\n \"height\": height,\n \"filename\": str(file_path)}\n images.append(image_data)\n\n return images", "def main_one(string_path_to_folder, destination_folder):\n # .jpg and .JPG are the same\n # photos = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.JPG\") # Examples of location format\n # pho = glob.glob(\"C:/Personal/pp2_photo/dataBase/*.jpg\")\n photos = glob.glob(string_path_to_folder+\"/*.JPG\")\n print(\"Number of files: \", len(photos))\n for k in photos:\n print(get_photo_date(k))\n process_all(k, destination_folder)", "def getImages(self,Project=\"\"):\n #images = [\"image1.jpg\",\"image2.jpg\",\"image3.jpg\"]\n \n os.chdir(self.dataDir)\n images = glob.glob(\"*.png\")\n \n return images", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths", "def getfiles_from_dir(self,dir):\n assert not os.path.isdir(dir),\"Invalid dir format\"+str(dir)\n print(\"-----Read Dir :\",dir)\n self.files=glob.glob(os.path.join(dir,\"./*.tif\"))", "def print_images_in_statistics(self):\n self._print_images_statistics(self._images_in_folder, self._pose_class_names)", "def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size", "def run():\n\n today = datetime.now().strftime(\"%Y-%m-%d\")\n log_file = os.path.abspath(\"logs/{}.log\".format(today))\n logger = RsmasLogger(\"pipeline\", log_file)\n\n images = get_list_of_images()\n # LOG: list of images to process\n logger.log(loglevel.INFO, [img.key for img in images])\n\n for im in images:\n\n logger.log(loglevel.INFO, \"Processing image: {}\".format(im.key))\n\n file_path = \"{}/{}\".format(im.bucket_name, im.key)\n full_path = \"{}_full.jpg\"\n mod_path = \"{}_mod.jpg\"\n aws_path = \"{}/{}/{}/{}\"\n try:\n haz_id, haz_name, sat_name, sat_dir, img_type, img_date, center = summary.pull_summary_data(\n \"/vsis3/{}\".format(file_path))\n sat_id = Satellite.from_params(sat_name, bool(sat_dir))\n except:\n # LOG: error in image metadata format\n logger.log(loglevel.ERROR, '\\tThere was an error in the metadata format of the image. Skipping.')\n continue\n\n aws_path = aws_path.format(haz_id, sat_id, img_type, img_date)\n full_path = full_path.format(img_date)\n mod_path = mod_path.format(img_date)\n\n # 1. Read in image file\n with rasterio.open(\"s3://{}\".format(file_path)) as data:\n band = data.read(1)\n img = plot.show(band)\n img.get_figure().savefig(full_path, dpi=300)\n\n # 3. Compress image\n compressed = immanip.compress_image(full_path, compression_amount=0.3)\n\n # 4 - 5. Pad image and add date on image\n text_image = immanip.add_text_to_image(compressed, img_date)\n\n # 6. Save image locally\n text_image.save(mod_path.format(img_date))\n mod_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, mod_path))\n full_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, full_path))\n\n tif_path_aws = save.get_s3_url(\"{}/{}\".format(aws_path, im.key))\n\n # LOG: images successfully moved to S3 bucket\n # LOG: mod_path_aws, full_path_aws, tif_path_aws\n\n hazard = Hazard(haz_id, haz_name, HazardType.VOLCANO, Location(center[0], center[1]), Date(img_date), 0)\n satellite = Satellite.from_params(sat_name, bool(sat_dir))\n image = Image(str(randint(1, 10000000)),\n haz_id,\n satellite,\n ImageType.from_string(img_type),\n Date(img_date),\n ImageURL(full_path_aws),\n ImageURL(tif_path_aws),\n ImageURL(mod_path_aws))\n\n try:\n db = Database()\n except ConnectionError:\n logger.log(loglevel.ERROR, \"\\tThere was an error while connecting to the database. Skipping this image.\")\n continue\n\n db.create_new_hazard(hazard)\n db.create_new_satellite(satellite)\n db.create_new_image(image)\n\n db.close()\n\n # LOG: database successfully updated\n logger.log(loglevel.INFO, \"\\tDatabase succesfully updated.\")\n\n save.save_image_s3(mod_path, \"{}/{}\".format(aws_path, mod_path))\n save.save_image_s3(full_path, \"{}/{}\".format(aws_path, full_path))\n save.move_tif(im.key, \"{}/{}\".format(aws_path, im.key))\n\n logger.log(loglevel.INFO, \"\\tImages were successfully uploaded to the S3 bucket\")\n logger.log(loglevel.INFO, \"\\t\\tmod_path_aws: {}\".format(mod_path_aws))\n logger.log(loglevel.INFO, \"\\t\\tfull_path_aws: {}\".format(full_path_aws))\n logger.log(loglevel.INFO, \"\\t\\ttif_path_aws: {}\".format(tif_path_aws))\n\n # LOG: image completed\n logger.log(loglevel.INFO, \"\\tProcessing of {} completed.\".format(im.key))\n\n # LOG: finished processing images\n logger.log(loglevel.INFO, \"Processing complete.\")", "def scan_instances(root_dir, omitsample=False):\n rp = Path(root_dir)\n for instance in rp.iterdir():\n if not instance.is_dir() or \\\n (omitsample and instance.name.startswith('_')):\n continue\n instance = instance.name\n inspath = Path(rp.joinpath(instance))\n versions = [x for x in inspath.iterdir() if x.is_dir()]\n for version in versions:\n version = version.name\n vpath = inspath.joinpath(version)\n imgpath = vpath.joinpath('images.yaml')\n if not imgpath.exists():\n # Omit subfolder without valid images yaml\n continue\n images = yaml.load(open(imgpath), Loader=yaml.FullLoader)\n\n # Validate images meta info\n validate_versioned_image(images, instance, version)", "def _process_image_files(name, cnts, roots, num_shards): \n \n # Break all images into batches with a [ranges[i][0], ranges[i][1]].\n spacing = np.linspace(0, sum(cnts), FLAGS.num_threads + 1).astype(np.int)\n ranges = []\n for i in range(len(spacing) - 1):\n ranges.append([spacing[i], spacing[i + 1]])\n\n # Launch a thread for each batch.\n print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))\n sys.stdout.flush()\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n\n threads = []\n for thread_index in range(len(ranges)):\n args = (thread_index, ranges, name, cnts, roots, num_shards)\n t = threading.Thread(target=_process_image_files_batch, args=args)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print('%s: Finished writing all %d images in data set.' %\n (datetime.now(), sum(cnts)))\n sys.stdout.flush()", "def get_images_name(folder):\n onlyfiles = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]\n for f in onlyfiles:\n yield f", "def _fetch_all_images(self, path) -> List[str]:\n files_all = []\n\n for ext in self.exts:\n files_all.extend(glob.glob(join(path, ext)))\n\n return files_all", "def environmentImages(dirPath):\n images = []\n for f in os.listdir(dirPath):\n if os.path.isfile(os.path.join(dirPath, f)):\n name, ext = os.path.splitext(f)\n if ext.lower().replace(\".\", \"\") in [\"hdr\", \"exr\", \"rad\", \"tif\", \"tiff\"]:\n images.append(f)\n return sorted(images)", "def get_images(path_list):\n images = []\n labels = []\n names = []\n i = 0\n for path in path_list:\n for fruit_dir_path in glob.glob(path):\n fruit_label = fruit_dir_path.split(\"/\")[-1]\n for image_path in glob.glob(os.path.join(fruit_dir_path, \"*.jpg\")):\n image = cv2.imread(image_path, cv2.IMREAD_COLOR)\n\n image = cv2.resize(image, (45, 45))\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n\n images.append(image)\n names.append(fruit_label)\n labels.append(i)\n i += 1\n\n images = np.array(images)\n print(images.shape)\n # add a new dimension here\n with np.nditer(images, op_flags=['readwrite']) as it:\n for x in it:\n x = np.expand_dims(x, axis=0)\n labels = np.array(labels)\n return images, labels, i", "def getimagelist(folder):\n imagefolder = Path(folder) \n imagelist = imagefolder.glob(\"**/*.png\") \n return list(imagelist)" ]
[ "0.78397024", "0.7330907", "0.7104293", "0.7006195", "0.691466", "0.68556714", "0.6756341", "0.66847354", "0.6673864", "0.6630209", "0.6581177", "0.65660363", "0.6560143", "0.6554627", "0.65188265", "0.6516782", "0.65092206", "0.65087897", "0.6484184", "0.6457262", "0.6429507", "0.64165634", "0.64098704", "0.6382306", "0.6376104", "0.6375898", "0.6374151", "0.63668245", "0.6362671", "0.6356825", "0.6302827", "0.62979335", "0.6277683", "0.62540597", "0.62536746", "0.62533265", "0.62431663", "0.62353224", "0.6232276", "0.62298965", "0.6217895", "0.6214223", "0.6212463", "0.62061447", "0.6169554", "0.6157002", "0.61516804", "0.61384207", "0.613174", "0.6127325", "0.6125333", "0.61196554", "0.6117453", "0.6113343", "0.6113209", "0.61130047", "0.610069", "0.60854304", "0.6076975", "0.60739297", "0.607039", "0.60700566", "0.60611457", "0.6053095", "0.60482824", "0.6029537", "0.60223204", "0.6004725", "0.6002858", "0.5999105", "0.5998781", "0.5991379", "0.598931", "0.59854674", "0.59779286", "0.5965758", "0.5951301", "0.5951301", "0.5951301", "0.5947858", "0.5943821", "0.5937228", "0.5931801", "0.5923654", "0.59219074", "0.5921137", "0.59187007", "0.59161127", "0.5902397", "0.5896746", "0.5892817", "0.588925", "0.58789515", "0.58787566", "0.58784", "0.5873766", "0.5873123", "0.58681256", "0.58652115", "0.58625835", "0.5861201" ]
0.0
-1
Fixture to clean up logging output before each test.
def before_and_after_each_test(self, caplog): #before each test # Set to capture logs above INFO caplog.set_level(logging.INFO) caplog.clear() yield #after each test
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n clean_temlogger_config()", "def setUp(self):\n self.path = tempfile.mkdtemp()\n self.log = log.Log(self.path)", "def beforeTest(self, test):\n self.setupLoghandler()", "def setUp(self):\n logging.disable(logging.ERROR)", "def setUp(self):\n logging.disable(logging.ERROR)", "def setUp(self):\n logging.disable(logging.ERROR)", "def setUp(self):\n logging.disable(logging.ERROR)", "def setUp(self):\n logging.disable(logging.ERROR)", "def setUp(self):\n self.logger = logging.getLogger(\"dbs test logger\")", "def log_util_fixture():\n with patch(\"camacq.bootstrap.log_util\"):\n yield", "def setUp(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def tearDown(self):\n log.reset_instance()", "def teardown():\n enstools.core.set_behavior(log_level=\"ERROR\")", "def setUp(self):\n self.actualstdout = sys.stdout\n sys.stdout = StringIO.StringIO()", "def setUp(self):\n self._token_checker = TokenChecker()\n logging.disable(logging.CRITICAL)", "def setUp(self):\n # Disable log messages to silence expected warnings\n cfdm.log_level(\"DISABLE\")\n # Note: to enable all messages for given methods, lines or calls (those\n # without a 'verbose' option to do the same) e.g. to debug them, wrap\n # them (for methods, start-to-end internally) as follows:\n # cfdm.log_level('DEBUG')\n # < ... test code ... >\n # cfdm.log_level('DISABLE')", "def setUp(self):\n self.logger = logging.getLogger(glutil.root_package_name)\n self.orig_handlers = self.logger.handlers\n self.logger.handlers = []\n self.level = self.logger.level\n self.logger.level = logging.DEBUG\n\n self.rt_logger = logging.getLogger()\n self.orig_root_handlers = self.rt_logger.handlers\n self.rt_logger.handlers = []\n self.root_level = self.rt_logger.level\n self.rt_logger.level = logging.CRITICAL", "def setUp(self):\n self.log = message(name=\"SYSLOG\")\n self.tool = flow_common_tool()\n self.xml = xml_tool()\n self.ins = security_logging()", "def on_test_begin(self, logs=None):", "def setUp(self):\n # keep log messages from interfering with our tests\n logging.getLogger('COT').setLevel(logging.DEBUG)\n self.logging_handler.setLevel(logging.NOTSET)\n self.logging_handler.flush()\n logging.getLogger('COT').addHandler(self.logging_handler)\n\n self.start_time = time.time()\n # Set a temporary directory for us to write our OVF to\n self.temp_dir = tempfile.mkdtemp(prefix=\"cot_ut\")\n self.temp_file = os.path.join(self.temp_dir, \"out.ovf\")\n logger.debug(\"Created temp dir %s\", self.temp_dir)\n # Monitor the global temp directory to make sure COT cleans up\n self.tmps = set(glob.glob(os.path.join(tempfile.gettempdir(), 'cot*')))\n\n self.validate_output_with_ovftool = True", "def setUp(self):\n\n self.logger_stats = DataScreen()", "def tearDown(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def setUp(self):\n self.tmp = TemporaryDirectory()", "def tearDown(self):\n self.logger.info(\"tearDown begin\")\n self.logger.info(\"tearDown end\\n\")", "def test_logging(self):\n self._verify_logging()", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables\n self.app = app.test_client()\n initialize_logging(logging.CRITICAL)", "def setUp(self):\n self._output = io.StringIO()", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def setUp(self):\n super(TestCase, self).setUp()\n # Change the default directory that the tempfile\n # module places temporary files and directories in\n self.useFixture(fixtures.NestedTempfile())\n # Create a temporary directory and set it as $HOME in the environment.\n self.useFixture(fixtures.TempHomeDir())\n self.useFixture(tools.StandardLogging())\n self.addCleanup(self._clear_attrs)", "def tearDown(self):\n self.logger.handlers = self.orig_handlers\n self.logger.level = self.level\n self.rt_logger.handlers = self.orig_root_handlers\n self.rt_logger.level = self.root_level", "def tearDownFixture(self):\n pass", "def testDefault(self):\n _baseLogger = logging.getLogger(BASE_LOGGER_NAME)\n\n _baseLogger.debug(\"_baseLogger.debug\")\n _baseLogger.info(\"_baseLogger.info\")\n _baseLogger.warning(\"_baseLogger.warning\")\n _baseLogger.error(\"_baseLogger.error\")\n\n rootOutput, baseOutput = self.getLogOutput()\n # Printed for debugging, when test fails:\n print(\"ROOT OUTPUT:\\n'{}'\\nBASE OUTPUT:\\n'{}'\".format(rootOutput, baseOutput))\n\n # No output should be generated in the root logger\n assert rootOutput == \"\"\n # The library logger should default to INFO level\n # (this output will not be visble, because the base logger only has a NullHandler)\n assert \".debug\" not in baseOutput\n assert \".info\" in baseOutput\n assert \".warning\" in baseOutput\n assert \".error\" in baseOutput", "def setUp(self):\n recorder = opentracing.tracer.recorder\n recorder.clear_spans()", "def setUp(self):\n\t\tself.output = self.switchstdout()", "def on_test_end(self, logs=None):", "def debug(self):\r\n self.setUp()\r\n getattr(self, self._testMethodName)()\r\n self.tearDown()\r\n while self._cleanups:\r\n function, args, kwargs = self._cleanups.pop(-1)\r\n function(*args, **kwargs)", "def setUp(self):\n coloredlogs.install(level='DEBUG')", "def test_fixture(request):\n def finalizer():\n teardown()\n request.addfinalizer(finalizer)\n setup()", "def setUp(self):\n # Disable log messages to silence expected warnings\n cfdm.LOG_LEVEL(\"DISABLE\")\n # Note: to enable all messages for given methods, lines or\n # calls (those without a 'verbose' option to do the same)\n # e.g. to debug them, wrap them (for methods, start-to-end\n # internally) as follows:\n #\n # cfdm.LOG_LEVEL('DEBUG')\n # < ... test code ... >\n # cfdm.log_level('DISABLE')\n\n self.filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_file_c.nc\"\n )", "def test_logs(self):\n # Purge all logs\n log_dir = self.test_config['LOG_DIR']\n pattern = re.compile('^nginx-access-ui.log-(?P<day_of_log>\\d{8})(\\.gz)?$')\n logs = [f for f in os.listdir(log_dir) if re.search(pattern, f)]\n map(os.remove, logs)\n\n # Try to make report without logs\n self.generate_report()\n self.assertTrue(self.check_in_log(\"Not found logs in directory {}\".format(self.test_config['LOG_DIR'])))", "def test_loggers(self):\n pass", "def pytest_logger_stdoutloggers(self, item):", "def test_write_on_loaded(self):\n # Run a first time 100 epochs\n logger = Logger(file=\"test_logs.csv\", nb_epochs=200)\n logger.__print_function__ = mock.Mock()\n for i in range(0, 100):\n i += 1\n logger.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)})\n del logger\n\n # Run a second time 100 epochs\n logger_second = Logger(shell=False, file=\"test_logs.csv\", nb_epochs=200)\n logger_second.__print_function__ = mock.Mock()\n self.assertEqual(logger_second.__print_function__.called, False, \"Print should not be called\")\n self.assertEqual(\n logger_second.logs,\n [(i, {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)}) for i in range(1, 101)],\n \"Each first 100 lines should be well written\"\n )\n for i in range(100, 200):\n i += 1\n logger_second.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)})\n del logger_second\n\n # Load and read\n logger_reader = Logger(shell=False, file=\"test_logs.csv\", nb_epochs=200)\n self.assertEqual(\n logger_reader.logs,\n [(i, {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)}) for i in range(1, 201)],\n \"Every line + the old one should be well written\"\n )", "def setUp(self):\n self.tmpdir = mkdtemp()", "def teardown(self, log, info):\n raise NotImplementedError", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def cleanup():\n default_log_dir = Path(DEFAULT_LOGDIR)\n if default_log_dir.exists():\n shutil.rmtree(default_log_dir)\n yield \"Cleanup\"\n if default_log_dir.exists():\n shutil.rmtree(default_log_dir)", "def setUp(cls):\n\n cls.temp_file_output_series = tempfile.NamedTemporaryFile(delete=True, encoding='utf-8', mode='wt')\n # create empty mmp object each time\n cls.test_mmp_series_object = MMPSeriesObjectClass(cls.mmplogger)", "def teardown_class(cls):\n os.remove(logfilename)", "def tearDown(self):\n sys.stdout = sys.__stdout__\n os.remove(\"file.json\")", "def clean_up(request, tmpdir, monkeypatch, config):\n\n multiproc = tmpdir.mkdir('multiproc')\n monkeypatch.setenv('prometheus_multiproc_dir', str(multiproc))\n orig_client = talisker.sentry._client\n\n yield\n\n talisker.testing.clear_all()\n # some tests mess with the sentry client\n talisker.sentry.set_client(orig_client)\n\n # reset stdlib logging\n talisker.logs.reset_logging()\n talisker.logs.configure_test_logging(logging.FileHandler('/dev/null'))\n\n # reset metrics\n talisker.testing.reset_prometheus()", "def setUp(self):\n self.db_name = '_mongolog_test_dict'\n self.collection_name = 'log_test'\n\n self.configDict = {\n 'version': 1,\n 'handlers': {\n 'mongo': {\n 'class': 'mongolog.handlers.MongoHandler',\n 'db': self.db_name,\n 'collection': self.collection_name,\n 'level': 'INFO'\n }\n },\n 'root': {\n 'handlers': ['mongo'],\n 'level': 'INFO'\n }\n }\n\n self.conn = Connection('localhost')\n self.conn.drop_database(self.db_name)", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def test_setup_logging_info(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n setup_logging()\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertFalse(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertFalse(self.boto3_logger.isEnabledFor(LogLevels.INFO))\n self.assertFalse(self.botocore_logger.isEnabledFor(LogLevels.INFO))", "def tearDown(self):\n # Fail if any WARNING/ERROR/CRITICAL logs were generated\n self.logging_handler.assertNoLogsOver(logging.INFO)\n\n logging.getLogger('COT').removeHandler(self.logging_handler)\n\n self.validate_with_ovftool(self.temp_file)\n\n # Delete the temporary directory\n if os.path.exists(self.temp_dir):\n logger.debug(\"Deleting temp dir %s\", self.temp_dir)\n shutil.rmtree(self.temp_dir)\n self.temp_dir = None\n self.temp_file = None\n\n tmps2 = set(glob.glob(os.path.join(tempfile.gettempdir(), 'cot*')))\n delta = tmps2 - self.tmps\n if delta:\n self.fail(\"Temp directory(s) {0} left over after test!\"\n .format(delta))\n\n # Clear output caches for helper commands:\n for helper in helpers.values():\n helper.cached_output.clear()\n\n # Let's try to keep things lean...\n delta_t = time.time() - self.start_time\n if delta_t > 5.0:\n print(\"\\nWARNING: Test {0} took {1:.3f} seconds to execute. \"\n \"Consider refactoring it to be more efficient.\"\n .format(self.id(), delta_t))", "def setUp(self):\n\n PyFunceble.load_config()\n\n StdoutBase.setUp(self)\n\n self.file = \"the_file_is_a_ghost\"\n self.to_print = {\n \"basic\": {\"hello\": 5, \"world\": 6, \"here\": 7, \"is\": 8, \"PyFunceble\": 10},\n \"size_constructor\": [5, 6, 7, 8, 9, 10],\n \"basic_string\": \"Hello, World!\",\n \"hosts\": {\"0.0.0.0\": 7, \"hello.world\": 11},\n }\n\n self.file_instance = PyFunceble.helpers.File(self.file)\n self.file_instance.delete()", "def test_creation_logfile(self):\n log_file = os.path.join(DATA_DIR, 'sample_log.txt')\n manager = execution.LogManager('MainThread', log_file)\n LOGGER.debug('Log me!')\n manager.close()\n self.assertEqual(count_lines(log_file), 1)\n os.remove(log_file)", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)", "def main():\n fixture_util = FixtureUtil()\n fixture_util.set_stream_handler_logging_level()\n fixture_util.execute()", "def before_run_tests(cls):\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def testSetupLogging(self):\n dftimewolf_recipes.SetupLogging(True)\n logger = logging.getLogger('dftimewolf')\n root_logger = logging.getLogger()\n self.assertEqual(len(logger.handlers), 2)\n self.assertEqual(len(root_logger.handlers), 1)", "def test_write(self):\n logger = Logger(file=\"test_logs.csv\", nb_epochs=100)\n logger.__print_function__ = mock.Mock()\n for i in range(100):\n i += 1\n logger.epoch(i, lambda: {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)})\n\n self.assertEqual(logger.__print_function__.called, True, \"Calling to print should have been done\")\n\n self.assertEqual(\n logger.__print_function__.call_args_list,\n [\n call\n for i in range(1, 101)\n for call in [\n mock.call(\"::: Train Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 0+i),\n mock.call('+\\tkno acc:', 1+i),\n mock.call('+\\tunk acc:', 2+i),\n mock.call(\"::: Dev Scores (lemma) :::\"),\n mock.call('+\\tall acc:', 1+i),\n mock.call('+\\tkno acc:', 2+i),\n mock.call('+\\tunk acc:', 3+i),\n ]\n ]\n )\n del logger\n logger_reader = Logger(file=\"test_logs.csv\", nb_epochs=100)\n self.assertEqual(\n logger_reader.logs,\n [(i, {\"train_lemma\": (0+i, 1+i, 2+i), \"dev_lemma\": (1+i, 2+i, 3+i)}) for i in range(1, 101)],\n \"Every line should be well written\"\n )", "def teardown(self):\n self.tcex.log.trace('teardown')", "def setUp(self):\n self.junk_file = '/tmp/asdfoowersdfuixlwles'", "def setUp(self):\n\n self.file = \"this_file_should_be_deleted\"\n self.data_to_write = [\"Hello World!\", \"Thanks for using PyFunceble\"]\n\n self.expected_hashed = {\n \"md5\": \"ba2e0e1774c2e60e2327f263402facd4\",\n \"sha1\": \"b5c8520cd2c422019997dc6fdbc9cb9d7002356e\",\n \"sha224\": \"863c46d5ed52b439da8f62a791e77c0cbbfb7d92af7c5549279f580d\",\n \"sha384\": \"6492f4b5732e0af4b9edf2c29ee4622c62ee418e5d6e0f34b13cb80560a28256c6e21e949119872d26d2327fc112a63b\", # pylint: disable=line-too-long\n \"sha512\": \"f193ad6ee2cfbecd580225d8e6bfb9df1910e5ca6135b21b03ae208a007f71e9b57b55e299d27157551a18ef4dfdde23c96aaea796064846edc6cd25ac7eaf7f\", # pylint: disable=line-too-long\n }", "def setUp(self):\n self.output = StringIO()\n self.error_listener = TyptErrorListener(self.output)", "def _cleanup_logger(self):\n logger = logging.getLogger('units-converter')\n self._log_handler.flush()\n logger.removeHandler(self._log_handler)", "def teardown_global_logging():\n\n global global_logging_started\n if not global_logging_started:\n return\n\n stdout_logger = logging.getLogger(__name__ + '.stdout')\n stderr_logger = logging.getLogger(__name__ + '.stderr')\n if sys.stdout is stdout_logger:\n sys.stdout = sys.stdout.stream\n if sys.stderr is stderr_logger:\n sys.stderr = sys.stderr.stream\n\n # If we still have an unhandled exception go ahead and handle it with the\n # replacement excepthook before deleting it\n exc_type, exc_value, exc_traceback = sys.exc_info()\n if exc_type is not None:\n sys.excepthook(exc_type, exc_value, exc_traceback)\n del exc_type\n del exc_value\n del exc_traceback\n\n del sys.excepthook\n logging.captureWarnings(False)\n\n rawinput = 'input'\n if hasattr(builtins, '_original_raw_input'):\n setattr(builtins, rawinput, builtins._original_raw_input)\n del builtins._original_raw_input\n\n global_logging_started = False", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\n from logi_circle import LogiCircle\n\n self.logi = LogiCircle(client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=REDIRECT_URI,\n cache_file=CACHE_FILE,\n api_key=API_KEY)\n self.fixtures = FIXTURES\n self.client_id = CLIENT_ID\n self.client_secret = CLIENT_SECRET\n self.redirect_uri = REDIRECT_URI\n self.cache_file = CACHE_FILE\n\n self.loop = asyncio.new_event_loop()", "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is a critical message\")\n self.info(\"LOGGING: Testing log messages COMPLETE\")\n return", "def setUp(self):\n\n self.level = 42\n self.format = Formatter('%(message)s')", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def test_root_logger(self):\n # root logs are Stream handled\n # log_path = self.log_paths['']\n # log = logging.getLogger('df')\n # ctrl = self.md5(log_path)\n # log.debug(\"test\")\n # assert self.md5(log_path) != ctrl", "def setUpClass(cls):\n server.app.debug = False\n server.initialize_logging(logging.ERROR)", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:", "def setUp(self):\n # Disable log messages to silence expected warnings\n cfdm.log_level(\"DISABLE\")\n # Note: to enable all messages for given methods, lines or\n # calls (those without a 'verbose' option to do the same)\n # e.g. to debug them, wrap them (for methods, start-to-end\n # internally) as follows:\n #\n # cfdm.log_level('DEBUG')\n # < ... test code ... >\n # cfdm.log_level('DISABLE')\n\n nc_group_structure_names = [\n None,\n \"/\",\n \"group/...\",\n \"group/\",\n \"group/.../\",\n \"/group/.../\",\n ]\n self.nc_grouped_dimension_names = [\n obj.replace(\"...\", \"ncdim\")\n for obj in nc_group_structure_names\n if obj is not None\n ]\n self.nc_grouped_variable_names = [\n obj.replace(\"...\", \"ncvar\")\n for obj in nc_group_structure_names\n if obj is not None\n ]", "def tearDown(self):\n self.clearTempDir()", "def safe_fixture(request):\n print \"\\n(Starting safe_fixture setup)\"\n\n request.addfinalizer(safe_cleanup)\n\n risky_function()\n\n print \"(Finishing safe_fixture setup)\"", "def pytest_logger_config(self, logger_config):", "def pytest_configure(config):\n disabled = ['gensim.models.word2vec', 'faker.factory']\n for name in disabled:\n logger = logging.getLogger(name)\n logger.propagate = False", "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")", "def setUpModule():\n os.environ['COLOREDLOGS_DEMO_DELAY'] = '0'\n coloredlogs.demo.DEMO_DELAY = 0", "def setUpFixture(self):\n pass", "def test_setup_logging_debug(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n with self.assertLogs(self.f_logger, LogLevels.DEBUG) as setup_ctx:\n setup_logging(LogLevels.DEBUG)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertTrue(self.boto3_logger.isEnabledFor(LogLevels.DEBUG))\n self.assertTrue(self.botocore_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertEqual(setup_ctx.output,\n [f'DEBUG:f-cli:Initalized logging for f-cli version {__version__}'])", "def begin(self):\n os.mkdir(self.meta)\n\n self.logname = os.path.join(self.rundir, self.meta, 'log')\n self.logfile = open(self.logname, 'a')\n if settings.verbosity >= 3:\n self.logfile = Tee(self.logfile)\n\n if self.test.setup:\n self.setup_script = self._make_setup_script()\n self.steps_script = self._make_steps_script()\n if self.test.teardown:\n self.teardown_script = self._make_teardown_script()", "def setUp(self):\n self.module = os.path.abspath(inspect.getfile(self.__class__))\n self.module_path = os.path.dirname(self.module)\n self.log = logging.getLogger()\n self.temp = tempfile.mkdtemp()\n self.test_site = os.sep.join([self.module_path, \"test_site\"])\n self.test_eac = self.test_site + os.sep + 'eac' + os.sep", "def setUp(self):\n fixtures_dir = os.path.abspath(os.path.join(\n os.path.dirname(__file__), 'fixtures'))\n\n config = get_collector_config('NagiosPerfdataCollector', {\n 'perfdata_dir': fixtures_dir\n })\n\n self.collector = NagiosPerfdataCollector(config, None)\n self.fixtures = os.listdir(fixtures_dir)" ]
[ "0.7646536", "0.7646536", "0.7646536", "0.7646536", "0.7533063", "0.7408682", "0.73239565", "0.73239565", "0.73239565", "0.73239565", "0.73239565", "0.7239843", "0.7204147", "0.7201381", "0.7155247", "0.69071877", "0.6867982", "0.68438387", "0.6788774", "0.675041", "0.6707021", "0.6685629", "0.668506", "0.6614375", "0.6605575", "0.6602095", "0.6590222", "0.65548664", "0.65289927", "0.65284675", "0.64495915", "0.64495915", "0.6384666", "0.63699144", "0.63489294", "0.63424134", "0.6342305", "0.63323826", "0.63281494", "0.63134575", "0.63101035", "0.6305015", "0.628291", "0.62775713", "0.6272578", "0.62662596", "0.6264408", "0.6258929", "0.62574804", "0.624287", "0.62414503", "0.62410176", "0.6239686", "0.6227479", "0.622438", "0.6213858", "0.6213858", "0.6202457", "0.61967576", "0.6186759", "0.6168559", "0.6166105", "0.61645055", "0.61390376", "0.6125171", "0.6124168", "0.6123401", "0.6121141", "0.61139846", "0.61122876", "0.6102313", "0.60965836", "0.6095865", "0.60955507", "0.60955507", "0.60955507", "0.6086535", "0.60838985", "0.6082411", "0.60812235", "0.6077471", "0.6077471", "0.6077471", "0.6062966", "0.6053959", "0.60533756", "0.60533756", "0.60533756", "0.6053071", "0.6050881", "0.6048413", "0.60443604", "0.60382754", "0.6035818", "0.60341835", "0.60148954", "0.60040236", "0.6001138", "0.5999714", "0.59990203" ]
0.6735152
20
Description When is given a directory name that exist Expected Result returns True
def test_has_directory(self, check_fn_true): #setup has_directory = extractor.make_has_directory(os.path.isdir) #when test1 = has_directory("./data/observed") #result assert test1 is True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False", "def test_ensure_dir_exists(self):\n pass", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def _existDir(d):\n\treturn os.path.exists(d)", "def is_dir(self, path):", "def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def is_directory(path_name):\n if not is_file(path_name):\n return True\n else:\n return False", "def is_valid_directory(parser, arg):", "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))", "def dir_exists(dir: str) -> bool:\n return os.path.isdir(dir)", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0", "def check_directory(self, directory: str) -> bool:\n return self.run(\"/\", \"root\", [\"test\", \"-d\", directory], check=False).returncode == 0", "def folder_exists(path: str) -> bool:\n\treturn os.path.isdir(path)", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def is_dir(self, path: PathLike):", "def dir_exists(self, path):\n return self._dir_model.get_by_name(name=path) != []", "def path_exists(dir):\n if os.path.exists(dir): return 1\n else: return 0", "def isDir(self, fname):\n\t\tif fname in self.getAllDirs():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def _assert_dir_already_exists(dirname):\n\n if not dirname:\n return\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def test_has_directory_log(self, check_fn_true, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/observed\"\n \n #when\n test1 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It was found directory {directory_path}\"", "def dirCheck(dirPath):\n if not os.path.exists(dirPath):\n os.mkdir(dirPath)\n return dirPath", "def request_directory(self, name):\n fp = self.dir / str(name)\n create_dir(fp)\n if not fp.is_dir():\n return False\n return fp", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def isdir (self, path):\r\n pass", "def check_folder_exists(location: str) -> bool:\n if os.path.isdir(location):\n return True\n else:\n return False", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def check_is_directory(val, name):\n check_path_exists(val, name)\n if not os.path.isdir(val):\n raise ValueError(name + ' of value ' + val + '\" is not a legal directory.')", "def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def is_dir(self, path):\n return self.dir_exists(path)", "def checkDirExists(dirPath):\n if not MyFile.checkFileExists(dirPath):\n MyFile.makeDir(dirPath)", "def _is_directory(input_data) -> bool:\n # TODO(cezequiel): Implement in phase 2.\n _ = input_data\n return False", "def check_dir(path, create = True):\n if os.path.exists(path):\n if os.path.isdir(path):\n return path\n else:\n return False\n if create:\n msg = \"Creating directory: '%s'\" % (path)\n print msg\n log.info(msg)\n os.mkdir(path)\n else:\n return False", "def _is_folder_exists() -> bool:\n\n pwd: str = os.getcwd()\n data_folder: str = os.path.join(pwd, \"data\")\n return os.path.isdir(data_folder)", "def DirExists(folder):\n return os.path.isdir(folder)", "def dir_is_empty(dir):\n if os.path.exists(dir) and os.path.isdir(dir):\n if not os.listdir(dir):\n return True\n else:\n return False\n else:\n print(\"Given Directory don't exists\")", "def directory_exists(destination):\n\n if not os.path.isdir(destination):\n raise RuntimeError('Directory %s does not exists' % (destination))\n\n return True", "def check_charm_dir_exists(charm_dir: Path) -> None:\n assert charm_dir.is_dir()", "def is_directory(filename):\n\n return os.path.isdir(filename)", "def _directory(self):\n dir = self.target\n\n if not os.path.exists(dir):\n return os.makedirs(dir)\n return True", "def empty_dir(value):\n return not os.listdir(value)", "def hisdir(file_path: str) -> bool:\n return os.path.isdir(file_path)", "def test_test_directory_identifer_exists(self):\n self.logger.info(\"STEP: Initialize the workspace.\")\n with Workspace(Mock()) as workspace:\n self.workspace = workspace\n\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with identifier \"\n \"'dir1'.\"\n )\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Check that test directory was created and exit the \"\n \"context.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n first_stat = directory.stat()\n\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with the \"\n \"same identifer.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n\n self.logger.info(\"STEP: Verify that the folder was re-used.\")\n self.assertEqual(\n first_stat,\n directory.stat(),\n \"Second directory is not the same as the first directory.\",\n )", "def is_directory(path: str) -> bool:\n return os.path.isdir(path)", "def check_directory_valid(self):\n Util.print_standout(\"check is there haven`t empty directory.\")\n for p, dirs, filename_list in os.walk(self.data_dir):\n for dir_name in dirs:\n if not os.listdir(os.path.join(p, dir_name)):\n Util.print_error(\"There shouldn't be a empty directory in [%s] of [%s]\" % (dir_name, self.data_dir))\n return False\n return True", "def assertDirPresent(self, root_path, path):\n full_path = os.path.join(root_path, path)\n self.assertTrue(os.path.exists(full_path))\n self.assertTrue(os.path.isdir(full_path))", "def __is_dir(path):\n if path[-2:] == \"..\":\n return False\n try:\n os.listdir(path)\n return True\n except OSError:\n return False", "def is_directory(self, directory):\n mgm, directory = self._safe_split_mgm(directory)\n cmd = [ 'xrdfs', mgm, 'stat', '-q', 'IsDir', directory ]\n status = (subprocess.check_output(cmd) == 0)\n if not status:\n logger.info('Directory {0} does not exist'.format(self._join_mgm_lfn(mgm, directory)))\n return status", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def _assert_dir_exists(dirname):\n\n if not dirname:\n return\n\n if not os.path.exists(dirname):\n text = \"directory %s doesn't exist, so creating\"\n print(\"\\033[93m\" + text % dirname + \"\\033[0m\")\n\n os.makedirs(dirname)\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def is_dir(filename):\n return os.path.isdir(filename)", "def is_dir(argstr):\n arg = Path(argstr)\n return arg.exists() and arg.is_dir()", "def contains_dir_path(file_name: str) -> bool:\n return os.path.sep in file_name", "def is_folder_empty(cls, dir_name: str) -> bool:\n\t\tif os.path.exists(dir_name) and os.path.isdir(dir_name):\n\t\t\treturn not os.listdir(dir_name)\n\t\telse:\n\t\t\traise Exception(f\"Directory {dir_name} doesn't exist\")", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def is_directory(self):\n return self._security_class == \"dir\"", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def dir_exists(dirname):\n global datapath\n current_dir = './' + dirname + '/'\n parent_dir = '../' + dirname + '/'\n grandparent_dir = '../../' + dirname + '/'\n if os.path.isdir(current_dir):\n datapath = current_dir\n elif os.path.isdir(parent_dir):\n datapath = parent_dir\n elif os.path.isdir(grandparent_dir):\n datapath = grandparent_dir\n else:\n response = raw_input(\"'{}' directory does not exist. \"\n \"Create it [Y/n]? \".format(dirname))\n if not response or response[0].lower() == 'y':\n directory = os.getcwd()\n if os.path.basename(directory) == 'caribou-data-collection':\n # E.g. /caribou-data-collection/scraper.py\n datapath = current_dir\n os.makedirs(datapath)\n print 'Created', datapath\n elif os.path.basename(directory) in COUNTRIES:\n # E.g. /caribou-data-collection/country/scraper.py\n datapath = parent_dir\n os.makedirs(datapath)\n print 'Created', datapath\n elif os.path.basename(os.path.dirname(directory)) in COUNTRIES:\n # E.g. /caribou-data-collection/country/region/scraper.py\n datapath = grandparent_dir\n os.makedirs(datapath)\n print 'Created', datapath\n else:\n raise RuntimeError('Directory not created. '\n 'Please switch to the caribou-data-collection'\n ' directory and try again.')\n else:\n return False\n return True", "def _dir_empty(path):\n try:\n next(os.scandir(str(path)))\n except StopIteration:\n return True\n return False", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def exist(name: str) -> bool:\n return bool(os.path.exists(name))", "def _check_is_dir(self, path):\n if os.path.isdir(path) and os.path.exists(path):\n self.__save_dir = path\n else:\n print(f'No existing directory found. Creating new directory at {path}')\n os.mkdir(path)\n self.__save_dir = path", "def check_exists(self, directory):\n if self.name == 'dropbox':\n directory = dropbox.normalise_path(directory)\n dbx = dropbox.get_dropbox()\n return dropbox.is_folder(dbx, directory)", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def _is_dir(path: str)->bool:\n if _is_s3(path):\n return path.endswith(\"/\")\n else:\n return os.path.isdir(os.path.abspath(path))", "def check_test_dir(self, dir_name):\n\n # assume that the directory is an absolute path\n orig_name = dir_name\n dir_name = os.path.normpath(os.path.abspath(dir_name)) + \"/\"\n\n if os.path.isdir(dir_name):\n return dir_name\n\n # instead check if it is relative to test top dir?\n dir_name = os.path.normpath(os.path.join(self.testTopDir, dir_name))\n\n if os.path.isdir(dir_name):\n return dir_name\n\n # we failed :(\n self.log.fail(f\"ERROR: {orig_name} is not a valid directory\")", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)", "def isDirectory( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.isDirectory: Attempting to determine whether %s paths are directories.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n res = serviceClient.getMetadata( url )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'Directory':\n gLogger.debug( \"DIPStorage.isDirectory: Successfully obtained metadata for %s.\" % url )\n successful[url] = True\n else:\n successful[url] = False\n else:\n failed[url] = 'Directory does not exist'\n else:\n gLogger.error( \"DIPStorage.isDirectory: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def isDir(self,v):\n v = self.expandvars(v)\n if os.path.isdir(v): return v\n return False", "def dir_empty(dir: str) -> bool:\n if dir_exists(dir) and len(list_files_recursively(dir)) > 0:\n return False\n return True", "def exists(self, path):\n path = path.strip(\"/\")\n if not path: # it's a directory, for all narratives\n return True\n return self.file_exists(path)", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def print_is_directory(dir_name):\n print('pwgrep: {}: is a directory'.format(dir_name))", "def datafolderexist(name):\n folderpath = os.path.join(pathtofolder(), name)\n return os.path.exists(folderpath)", "def check_folder(directory):\n global path_checked\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n path_checked = True", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)", "def check_folder(dir, folder_name): \n \n items = os.listdir(dir)\n if folder_name in items: \n print(' has a folder named: ' + folder_name)\n return True\n else:\n print(' does not have a folder named: ' + folder_name)\n return False", "def IsADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.EISDIR", "def exists(self, prefix, args=()):\n dir_path = self.path(prefix, args)\n return os.path.isdir(dir_path)", "def check_is_dir(path):\n if not os.path.isdir(path):\n raise DirectoryNotFoundError(path)", "def valid(self):\r\n if self.dir_exists and self.files_exist:\r\n return True\r\n else:\r\n return False", "def check_if_dir_exists(path):\n\n # From http://stackoverflow.com/questions/8933237/how-to-find-if-directory-exists-in-python\n return os.path.isdir(path)", "def handle_directory_pre(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n dir = os.path.join(prefix, dir)\n try:\n dir_lstats = os.lstat(dir)\n except WindowsError as e:\n if e.winerror == 3 and len(dir) > hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH:\n self.log.error('Unable to stat dir due to path length > %d characters. Try setting HKLM\\System\\CurrentControlSet\\Control\\FileSystem\\LongPathsEnabled to 1'%hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH)\n else:\n if hydra.is_invalid_windows_filename(dir):\n self.log.error('Directory contains invalid characters or invalid names for Windows: %s'%dir)\n else:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n return True\n except Exception as e:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n if stat.S_ISLNK(dir_lstats.st_mode):\n # We do not want to process a symlink so account for it here as a symlink\n self.stats['symlink_dirs'] += 1\n return True\n return False", "def Exists(self, path: str) -> bool:\n ...", "def check_path(dir_path):\n if not os.path.exists(dir_path):\n os.mkdir(dir_path, 0755)", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)" ]
[ "0.7946343", "0.774155", "0.7725892", "0.7708306", "0.75825566", "0.749166", "0.7445757", "0.74027336", "0.73993707", "0.73776567", "0.73469585", "0.7326901", "0.7297167", "0.7291639", "0.7277154", "0.7249648", "0.72335654", "0.72275263", "0.72248363", "0.71985984", "0.7189949", "0.71663827", "0.71466213", "0.7128672", "0.71263987", "0.71078753", "0.7103186", "0.70789146", "0.7078326", "0.7073804", "0.7043973", "0.7043799", "0.7002266", "0.6990277", "0.69902116", "0.69841796", "0.6978254", "0.69626147", "0.6957187", "0.69452727", "0.69247645", "0.691246", "0.69099337", "0.6908573", "0.6897232", "0.68840265", "0.68825406", "0.68691105", "0.6866902", "0.68610346", "0.6858662", "0.6857859", "0.68227816", "0.6816555", "0.68164504", "0.68110955", "0.6789761", "0.6781115", "0.67619354", "0.6735388", "0.67340386", "0.6733992", "0.67309266", "0.67240804", "0.6719911", "0.6716277", "0.67156166", "0.6714873", "0.67146677", "0.6703393", "0.66946596", "0.66920984", "0.66826445", "0.66814315", "0.6678369", "0.6672053", "0.6668259", "0.66665703", "0.66585666", "0.66538393", "0.66536856", "0.66531515", "0.66484004", "0.6639978", "0.6638394", "0.6637164", "0.66361505", "0.66324705", "0.66230667", "0.66230553", "0.66218597", "0.6610575", "0.6605081", "0.658969", "0.65877897", "0.65761864", "0.65691876", "0.6566334", "0.6558779", "0.6542255" ]
0.7547904
5
Description When is given a directory name that exist Expected Result Shows log that directory was found
def test_has_directory_log(self, check_fn_true, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/observed" #when test1 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It was found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def test_ensure_dir_exists(self):\n pass", "def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)", "def print_is_directory(dir_name):\n print('pwgrep: {}: is a directory'.format(dir_name))", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def Directory(self) -> str:", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def test_has_directory(self, check_fn_true):\n\n #setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n \n #when\n test1 = has_directory(\"./data/observed\")\n\n #result\n assert test1 is True", "def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)", "def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def test_test_directory_identifer_exists(self):\n self.logger.info(\"STEP: Initialize the workspace.\")\n with Workspace(Mock()) as workspace:\n self.workspace = workspace\n\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with identifier \"\n \"'dir1'.\"\n )\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Check that test directory was created and exit the \"\n \"context.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n first_stat = directory.stat()\n\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with the \"\n \"same identifer.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n\n self.logger.info(\"STEP: Verify that the folder was re-used.\")\n self.assertEqual(\n first_stat,\n directory.stat(),\n \"Second directory is not the same as the first directory.\",\n )", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def is_dir(self, path):", "def is_valid_directory(parser, arg):", "def scan_sample_directory(sample_dir: Path) -> None:\n if not (sample_dir / 'README.md').is_file():\n print(f\"WARNING ({sample_dir}): No README.md file\")\n if not (sample_dir / 'main.py').is_file():\n print(f\"ERROR ({sample_dir}): No main.py file\")", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def _existDir(d):\n\treturn os.path.exists(d)", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def valid_directory(self, directory):\n\n if os.path.isdir(directory):\n return directory\n else:\n msg = f\"The write directory provided by the user does not exist: {directory}\"\n logging.exception(msg)\n self.close_logger()\n raise NotADirectoryError(msg)", "def scan_directory(self, dirname):\n if not dirname:\n dirname = os.getcwd()\n\n if os.path.exists(dirname):\n for item in os.listdir(dirname):\n item_path = os.path.join(dirname, item)\n if os.path.isfile(item_path):\n self.file_confidence.append(self.confidence(item_path))\n else:\n raise FileNotFoundError('Directory does not exist. Change your path and try again')", "def test_get_result_directories(self):\n pass", "def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def test_isdir(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.isdir(posixpath.join(remote_mock_dir, \"subdir\"))\n assert not hook.isdir(posixpath.join(remote_mock_dir, \"test.txt\"))", "def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)", "def handle_directory_pre(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n dir = os.path.join(prefix, dir)\n try:\n dir_lstats = os.lstat(dir)\n except WindowsError as e:\n if e.winerror == 3 and len(dir) > hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH:\n self.log.error('Unable to stat dir due to path length > %d characters. Try setting HKLM\\System\\CurrentControlSet\\Control\\FileSystem\\LongPathsEnabled to 1'%hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH)\n else:\n if hydra.is_invalid_windows_filename(dir):\n self.log.error('Directory contains invalid characters or invalid names for Windows: %s'%dir)\n else:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n return True\n except Exception as e:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n if stat.S_ISLNK(dir_lstats.st_mode):\n # We do not want to process a symlink so account for it here as a symlink\n self.stats['symlink_dirs'] += 1\n return True\n return False", "def logs_directory(self):", "def list_dir(self):\n x = [x for x in os.listdir(self.spath) if os.path.isdir(os.path.join(self.spath, x))]\n if x != [] :\n print (f\"choose one of these : {x}\")", "def test_exists(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.exists(posixpath.join(remote_mock_dir, \"subdir\"))\n assert hook.exists(posixpath.join(remote_mock_dir, \"test.txt\"))\n assert not hook.exists(posixpath.join(remote_mock_dir, \"non-existing.txt\"))", "def isdir (self, path):\r\n pass", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def test_get_result_directory(self):\n pass", "def check_directory(self, directory: str) -> bool:\n return self.run(\"/\", \"root\", [\"test\", \"-d\", directory], check=False).returncode == 0", "def checkDirExists(dirPath):\n if not MyFile.checkFileExists(dirPath):\n MyFile.makeDir(dirPath)", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def test_nonExistentDir(self):\n e = self.assertRaises(\n IOError, logfile.LogFile, self.name, \"this_dir_does_not_exist\"\n )\n self.assertEqual(e.errno, errno.ENOENT)", "def test_does_static_directory_exist(self):\n does_static_dir_exist = os.path.isdir(self.static_dir)\n does_css_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'css'))\n does_js_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'js'))\n \n self.assertTrue(does_static_dir_exist, f\"{FAILURE_HEADER}The static directory was not found in the expected location. Check and try again.{FAILURE_FOOTER}\")\n self.assertTrue(does_css_static_dir_exist, f\"{FAILURE_HEADER}The css subdirectory was not found in your static directory.{FAILURE_FOOTER}\")\n self.assertTrue(does_js_static_dir_exist, f\"{FAILURE_HEADER}The js subdirectory was not found in your static directory.{FAILURE_FOOTER}\")", "def testDirectoryReturn(self):\n self.assertEqual(\n self.directory,\n self.mr.directory\n )\n\n self.mr._dir = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.directory\n )", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def path_exists(dir):\n if os.path.exists(dir): return 1\n else: return 0", "def folderExistsWithTimeOut(dirToCheck, waitIntervalSec, sleepInterSec, outStream):\n\twtime=0\n\tfound=0\n\t#wait for the directory to appear\n\twhile ((wtime<=waitIntervalSec) and (found==0)):\n\t\tif not (os.access (dirToCheck, os.F_OK)):\n\t\t\twtime+=sleepInterSec\n\t\t\ttime.sleep(sleepInterSec)\n\t\t\toutStream.write (\".\")\n\t\telse:\n\t\t\tfound=1\n\t#exit if it times out\n\treturn os.access (dirToCheck, os.F_OK)", "def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))", "def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def dir_exception_handler(dpath: str,\n dryrun: bool,\n dirs_created: list = [],\n overwrite: bool = False) -> bool:\n # If this dir was created during this session, do not create it again\n if dpath in dirs_created:\n return False\n elif os.path.exists(dpath):\n if dryrun == False:\n # Get user input\n while overwrite not in ['Y', 'y', 'N', 'n', True]:\n overwrite = input(f\"\\n*** WARNING: Your directory {dpath} already exists. Overwrite? Y/N: \")\n if overwrite == True or overwrite.lower() == 'y':\n print(f\"Your directory {dpath} will be overwritten\")\n shutil.rmtree(dpath)\n return True\n else:\n return False\n else: # If dry run:\n print(f\"\\n*** WARNING: This is a dry run but if you run cp_packager in normal mode,\")\n print(f\"*** your directory {dpath} may be overwritten\")\n else:\n return True", "def check_charm_dir_exists(charm_dir: Path) -> None:\n assert charm_dir.is_dir()", "def test_nodelog_missing_files(self):\n build_dir = self.BUILD_DIR + 'nodelog?pod=abc'\n response = app.get('/build' + build_dir, status=404)\n self.assertIn('Unable to find', response)", "def check_dir(path, create = True):\n if os.path.exists(path):\n if os.path.isdir(path):\n return path\n else:\n return False\n if create:\n msg = \"Creating directory: '%s'\" % (path)\n print msg\n log.info(msg)\n os.mkdir(path)\n else:\n return False", "def test_add1_dir(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\n \"Expected IOError because directory 'add1' does not exist\")\n except IOError:\n pass", "def displayPathInfo():\n # TODO: Remove unwanted / unused functions\n\n dirpath = os.getcwd()\n logging.info(\"Current Directory is : \" + dirpath)\n foldername = os.path.basename(dirpath)\n logging.info(\"Directory name is : \" + foldername)", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def isPath(self,pin,head=\"check path exist\",exit_on_error=False,logmsg=False):\n p = os.path.abspath(self.expandvars(pin))\n if os.path.isdir(p):\n if logmsg:\n logger.info(head + \"\\n --> dir exist: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n return p\n #--- error no such file\n logger.error(head + \"\\n --> no such directory: {}\\n -> abs dir{:>18} {}\".format(pin,':',p))\n if exit_on_error:\n raise SystemError(self.__MSG_CODE_PATH_NOT_EXIST)\n return False", "def report_dir (self, dir_path):\n print len(os.listdir(dir_path)), 'in archive directory'\n dupset = self.find_dups_for_directory (dir_path)\n keys = dupset.keys()\n keys.sort()\n print '- ', len(keys), 'dups found'\n for key in keys:\n # print '\\n', key.replace(archives_base_path, '')\n dedup_key_path = self.make_deduped_path(key)\n # print '\\n', '{}{}'.format(dedup_key_path, os.path.exists(dedup_key_path) and ' *' or '')\n print '\\n', '{}{}'.format(self.get_dup_display_path(dedup_key_path), os.path.exists(dedup_key_path) and ' *' or '')\n dups = dupset[key]\n for dup in dups:\n dedup_path = self.make_deduped_path(dup)\n # print ' - {}{}'.format(dedup_path, os.path.exists(dedup_path) and ' *' or '')\n print ' - {}{}'.format(self.get_dup_display_path(dedup_path), os.path.exists(dedup_path) and ' *' or '')", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def testListDirectory(self):\n test_file_path = self._GetTestFilePath(['unified_logging'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n expected_directory_entries = [\n '0000000000000030.tracev3',\n '0000000000000f85.tracev3',\n 'timesync',\n 'uuidtext']\n\n directory_entries = sorted(test_helper.ListDirectory(test_file_path))\n self.assertEqual(directory_entries, expected_directory_entries)", "def dirCheck(dirPath):\n if not os.path.exists(dirPath):\n os.mkdir(dirPath)\n return dirPath", "def dir_is_empty(dir):\n if os.path.exists(dir) and os.path.isdir(dir):\n if not os.listdir(dir):\n return True\n else:\n return False\n else:\n print(\"Given Directory don't exists\")", "def test_verify_path_3(self):\n result = basic.verify_path(str(self.test_directory1), \"dir\")\n self.assertTrue(result)", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def testDirExists(self, mock_dir, mock_exists, mock_listdir):\n mock_dir.return_value = True\n mock_exists.return_value = True\n mock_listdir.return_value = self.files\n\n self.assertEqual(\n self.is_seq,\n self.mr.is_seq\n )\n\n if len(self.seqs) > 0:\n self.assertEqual(\n self.seqs[0],\n self.mr.seq\n )\n else:\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n self.assertEqual(\n self.seqs,\n self.mr.seqs\n )\n\n mock_listdir.assert_called_once_with(self.mr.path)", "def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def _assert_dir_already_exists(dirname):\n\n if not dirname:\n return\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def directory_exists(destination):\n\n if not os.path.isdir(destination):\n raise RuntimeError('Directory %s does not exists' % (destination))\n\n return True", "def _Run(self, dir_exists):\n with patch(os.path, 'isdir', return_value=dir_exists):\n self.RunStage()", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def test_missing_dir_in_custom_log_path(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"another_dir\" / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def _look_for_stat(self, dir):\n\n if os.path.exists('stat.json'):\n self.stat_files.append(os.path.join(dir,'stat.json'))\n return True\n else:\n return False", "def check_directory_valid(self):\n Util.print_standout(\"check is there haven`t empty directory.\")\n for p, dirs, filename_list in os.walk(self.data_dir):\n for dir_name in dirs:\n if not os.listdir(os.path.join(p, dir_name)):\n Util.print_error(\"There shouldn't be a empty directory in [%s] of [%s]\" % (dir_name, self.data_dir))\n return False\n return True", "def _dodir ( self, dirpath, mkdir_p ):\n return", "def test_empty_directory(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'empty_directory')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"\")\n print(result)\n self.assertTrue(result == {})\n\n result = indexer.search(\"hello\")\n self.assertTrue(result == {})\n\n result = indexer.search(\"world\")\n self.assertTrue(result == {})", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def _directory(self):\n dir = self.target\n\n if not os.path.exists(dir):\n return os.makedirs(dir)\n return True", "def pytest_logger_logsdir(self, config):", "def is_dir(self, path: PathLike):", "def test_infodir(self):\n self.chck_triple('infodir')", "def on_dir_changed(self, event):\r\n\r\n if not self.searchin_update:\r\n pth = event.directory\r\n if pth is not None and exists(pth):\r\n self.searchin_update = True\r\n self.m_searchin_text.safe_set_value(pth)\r\n self.searchin_update = False\r\n event.Skip()", "def _check_is_dir(self, path):\n if os.path.isdir(path) and os.path.exists(path):\n self.__save_dir = path\n else:\n print(f'No existing directory found. Creating new directory at {path}')\n os.mkdir(path)\n self.__save_dir = path", "def _assert_dir_exists(dirname):\n\n if not dirname:\n return\n\n if not os.path.exists(dirname):\n text = \"directory %s doesn't exist, so creating\"\n print(\"\\033[93m\" + text % dirname + \"\\033[0m\")\n\n os.makedirs(dirname)\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def __check_exist_path(self):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among parameters')\n self.params['path_out'] = update_path(self.params.get('path_out'))\n list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])]\n for n in list_names:\n p = os.path.abspath(os.path.expanduser(self.params[n]))\n if not os.path.exists(p):\n raise FileNotFoundError('given path/file/dir \"%s\" does not exist!' % p)\n self.params[n] = p\n for n in [n for n in self.params if 'exec' in n]:\n # in case you define executable in your home\n if os.path.expanduser(self.params[n]) != self.params[n]:\n self.params[n] = os.path.expanduser(self.params[n])", "def test_base_dir(self):\n old_base_dir = self.path_translator.BASE_REAL_DIR\n self.path_translator.BASE_REAL_DIR = \"/tmp/study\"\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1\".format(self.search.instance))]\n self.assertEqual(result, expected)\n self.path_translator.BASE_REAL_DIR = old_base_dir", "def info_directory(self) -> Optional[str]:\n raise NotImplementedError()" ]
[ "0.74612963", "0.66516936", "0.6648649", "0.6637668", "0.6592905", "0.65451527", "0.6507091", "0.6476768", "0.6445445", "0.64397144", "0.641731", "0.6323137", "0.6317305", "0.6291415", "0.6287829", "0.6264551", "0.62638617", "0.62398607", "0.6232754", "0.6224753", "0.62079227", "0.62067014", "0.62065524", "0.61675984", "0.6151805", "0.61463827", "0.6136915", "0.6132248", "0.61214143", "0.6103149", "0.6100163", "0.6097386", "0.60957223", "0.608083", "0.6065482", "0.60518456", "0.6046728", "0.6035486", "0.60215497", "0.6014043", "0.60073876", "0.600359", "0.59953797", "0.59937644", "0.59937644", "0.59833133", "0.59664774", "0.59642905", "0.59635735", "0.59602094", "0.5958913", "0.5956306", "0.59530866", "0.59317225", "0.5926092", "0.592229", "0.5909389", "0.5897723", "0.58751345", "0.58719367", "0.5862506", "0.5856225", "0.5846284", "0.58240646", "0.58218735", "0.58081585", "0.5803799", "0.58012646", "0.5791056", "0.5790843", "0.57822883", "0.57788306", "0.577749", "0.5776033", "0.57600355", "0.57569546", "0.57556874", "0.5751005", "0.5747697", "0.5743646", "0.57378113", "0.5737012", "0.57324004", "0.57300735", "0.5725536", "0.57232285", "0.571928", "0.57178175", "0.57173747", "0.57130146", "0.5711905", "0.5706118", "0.570385", "0.56910104", "0.568738", "0.5681685", "0.5678763", "0.5678495", "0.56776744", "0.56748974" ]
0.7359447
1
Description When is given a directory name that doesnt exist Expected Result returns False
def test_doesnt_have_directory(self, check_fn_false): # setup has_directory = extractor.make_has_directory(os.path.isdir) # when test2 = has_directory("./data/tests") # result assert test2 is False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def is_directory(path_name):\n if not is_file(path_name):\n return True\n else:\n return False", "def is_valid_directory(parser, arg):", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def test_doesnt_have_directory_log(self, check_fn_false, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/tests\"\n \n #when\n test2 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It wasn't found directory {directory_path}\"", "def is_dir(self, path):", "def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def test_ensure_dir_exists(self):\n pass", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def _is_directory(input_data) -> bool:\n # TODO(cezequiel): Implement in phase 2.\n _ = input_data\n return False", "def _existDir(d):\n\treturn os.path.exists(d)", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def isDir(self, fname):\n\t\tif fname in self.getAllDirs():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def empty_dir(value):\n return not os.listdir(value)", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)", "def check_directory(self, directory: str) -> bool:\n return self.run(\"/\", \"root\", [\"test\", \"-d\", directory], check=False).returncode == 0", "def check_is_directory(val, name):\n check_path_exists(val, name)\n if not os.path.isdir(val):\n raise ValueError(name + ' of value ' + val + '\" is not a legal directory.')", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def check_directory_valid(self):\n Util.print_standout(\"check is there haven`t empty directory.\")\n for p, dirs, filename_list in os.walk(self.data_dir):\n for dir_name in dirs:\n if not os.listdir(os.path.join(p, dir_name)):\n Util.print_error(\"There shouldn't be a empty directory in [%s] of [%s]\" % (dir_name, self.data_dir))\n return False\n return True", "def test_has_directory(self, check_fn_true):\n\n #setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n \n #when\n test1 = has_directory(\"./data/observed\")\n\n #result\n assert test1 is True", "def IsADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.EISDIR", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0", "def isdir (self, path):\r\n pass", "def is_dir(self, path: PathLike):", "def dir_exists(dir: str) -> bool:\n return os.path.isdir(dir)", "def is_directory(self):\n return self._security_class == \"dir\"", "def check_test_dir(self, dir_name):\n\n # assume that the directory is an absolute path\n orig_name = dir_name\n dir_name = os.path.normpath(os.path.abspath(dir_name)) + \"/\"\n\n if os.path.isdir(dir_name):\n return dir_name\n\n # instead check if it is relative to test top dir?\n dir_name = os.path.normpath(os.path.join(self.testTopDir, dir_name))\n\n if os.path.isdir(dir_name):\n return dir_name\n\n # we failed :(\n self.log.fail(f\"ERROR: {orig_name} is not a valid directory\")", "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def folder_exists(path: str) -> bool:\n\treturn os.path.isdir(path)", "def is_directory(filename):\n\n return os.path.isdir(filename)", "def dir_is_empty(dir):\n if os.path.exists(dir) and os.path.isdir(dir):\n if not os.listdir(dir):\n return True\n else:\n return False\n else:\n print(\"Given Directory don't exists\")", "def _assert_dir_already_exists(dirname):\n\n if not dirname:\n return\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def dirCheck(dirPath):\n if not os.path.exists(dirPath):\n os.mkdir(dirPath)\n return dirPath", "def task_dir_is_valid(task_dir: str) -> bool:\n return True", "def _dir_empty(path):\n try:\n next(os.scandir(str(path)))\n except StopIteration:\n return True\n return False", "def request_directory(self, name):\n fp = self.dir / str(name)\n create_dir(fp)\n if not fp.is_dir():\n return False\n return fp", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def __is_dir(path):\n if path[-2:] == \"..\":\n return False\n try:\n os.listdir(path)\n return True\n except OSError:\n return False", "def is_folder_empty(cls, dir_name: str) -> bool:\n\t\tif os.path.exists(dir_name) and os.path.isdir(dir_name):\n\t\t\treturn not os.listdir(dir_name)\n\t\telse:\n\t\t\traise Exception(f\"Directory {dir_name} doesn't exist\")", "def is_directory(path: str) -> bool:\n return os.path.isdir(path)", "def valid_directory(self, directory):\n\n if os.path.isdir(directory):\n return directory\n else:\n msg = f\"The write directory provided by the user does not exist: {directory}\"\n logging.exception(msg)\n self.close_logger()\n raise NotADirectoryError(msg)", "def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def dir_exists(self, path):\n return self._dir_model.get_by_name(name=path) != []", "def is_valid(self, value) -> 'True | str':\n err_str = super().is_valid()\n if isinstance(err_str, str):\n return err_str\n if self.must_exists and not os.path.isdir(value):\n return f'The directory \"{value}\" does not exist.'\n return True", "def isDir(self,v):\n v = self.expandvars(v)\n if os.path.isdir(v): return v\n return False", "def check_folder_exists(location: str) -> bool:\n if os.path.isdir(location):\n return True\n else:\n return False", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def hisdir(file_path: str) -> bool:\n return os.path.isdir(file_path)", "def is_directory(self, directory):\n mgm, directory = self._safe_split_mgm(directory)\n cmd = [ 'xrdfs', mgm, 'stat', '-q', 'IsDir', directory ]\n status = (subprocess.check_output(cmd) == 0)\n if not status:\n logger.info('Directory {0} does not exist'.format(self._join_mgm_lfn(mgm, directory)))\n return status", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def emptySegmentDir(recordedVideoDir):\n try:\n if not os.listdir(recordedVideoDir):\n return True\n except:\n return True\n return False", "def is_dir(value):\n if not (type(value) is str and os.path.isdir(value)):\n return False\n else:\n return True", "def is_dir(filename):\n return os.path.isdir(filename)", "def test_matlab_install_dir_absent(self):\n directories = (\"/\", \"/tmp\")\n for dirname in directories:\n with self.subTest(dirname=dirname):\n self.assertNotIn(\"matlab-install\", self.host.file(dirname).listdir())", "def _is_dir(path: str)->bool:\n if _is_s3(path):\n return path.endswith(\"/\")\n else:\n return os.path.isdir(os.path.abspath(path))", "def _directory(self):\n dir = self.target\n\n if not os.path.exists(dir):\n return os.makedirs(dir)\n return True", "def contains_dir_path(file_name: str) -> bool:\n return os.path.sep in file_name", "def is_dir(self, path):\n return self.dir_exists(path)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def is_directory(self):\n return bool(self.flags & 2)", "def check_is_dir(path):\n if not os.path.isdir(path):\n raise DirectoryNotFoundError(path)", "def handle_directory_pre(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n dir = os.path.join(prefix, dir)\n try:\n dir_lstats = os.lstat(dir)\n except WindowsError as e:\n if e.winerror == 3 and len(dir) > hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH:\n self.log.error('Unable to stat dir due to path length > %d characters. Try setting HKLM\\System\\CurrentControlSet\\Control\\FileSystem\\LongPathsEnabled to 1'%hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH)\n else:\n if hydra.is_invalid_windows_filename(dir):\n self.log.error('Directory contains invalid characters or invalid names for Windows: %s'%dir)\n else:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n return True\n except Exception as e:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n if stat.S_ISLNK(dir_lstats.st_mode):\n # We do not want to process a symlink so account for it here as a symlink\n self.stats['symlink_dirs'] += 1\n return True\n return False", "def dir_empty(dir: str) -> bool:\n if dir_exists(dir) and len(list_files_recursively(dir)) > 0:\n return False\n return True", "def _valid_dir(path, description):\r\n _path_exists(path, description)\r\n\r\n if not os.path.isdir(path):\r\n raise ValueError('{0} is not directory.'.format(description))", "def _is_folder_exists() -> bool:\n\n pwd: str = os.getcwd()\n data_folder: str = os.path.join(pwd, \"data\")\n return os.path.isdir(data_folder)", "def checkDirExists(dirPath):\n if not MyFile.checkFileExists(dirPath):\n MyFile.makeDir(dirPath)", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def directory_exists(destination):\n\n if not os.path.isdir(destination):\n raise RuntimeError('Directory %s does not exists' % (destination))\n\n return True", "def testNotADirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"not_a_directory\")", "def isDirNotSymlink(d):\n try:\n return stat.S_ISDIR(os.lstat(d).st_mode)\n except OSError, e:\n if e.errno == errno.ENOENT:\n return False\n raise", "def DirExists(folder):\n return os.path.isdir(folder)", "def path_exists(dir):\n if os.path.exists(dir): return 1\n else: return 0", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def _check_is_dir(self, path):\n if os.path.isdir(path) and os.path.exists(path):\n self.__save_dir = path\n else:\n print(f'No existing directory found. Creating new directory at {path}')\n os.mkdir(path)\n self.__save_dir = path", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def check_path_to_directory(path_to_directory, logger):\n logger.info('Checking the entered path...')\n if os.path.isdir(path_to_directory) is False:\n logger.error('Entered path is invalid: not a folder')\n raise NotADirectoryError('Entered path is invalid: folder does not exist')\n else:\n return True", "def check_charm_dir_exists(charm_dir: Path) -> None:\n assert charm_dir.is_dir()", "def _isdir(dirname):\n if sys.platform[:3] == 'win' and dirname[:2] == r'\\\\':\n if os.path.exists(dirname):\n return os.path.isdir(dirname)\n try:\n os.listdir(dirname)\n except WindowsError:\n return 0\n else:\n return os.path.ismount(dirname)\n else:\n return os.path.isdir(dirname)", "def is_folder_not_found(self):\n return self._tag == 'folder_not_found'", "def is_folder_not_found(self):\n return self._tag == 'folder_not_found'", "def non_empty_folder(folder):\n if not os.path.exists(folder):\n return False\n if not os.path.isdir(folder):\n return False\n if not os.listdir(folder):\n return False\n return True", "def test_has_directory_log(self, check_fn_true, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/observed\"\n \n #when\n test1 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It was found directory {directory_path}\"", "def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def is_directory(dirarg):\n if not os.path.isdir(dirarg):\n raise argparse.ArgumentError(\n \"The directory '{0}' does not exist!\".format(dirarg))\n return dirarg", "def print_is_directory(dir_name):\n print('pwgrep: {}: is a directory'.format(dir_name))", "def is_directory_empty(directory: Path) -> bool:\n has_contents = next(directory.iterdir(), None)\n return has_contents is None", "def is_dir(argstr):\n arg = Path(argstr)\n return arg.exists() and arg.is_dir()", "def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)", "def valid(self):\r\n if self.dir_exists and self.files_exist:\r\n return True\r\n else:\r\n return False", "def _check_path(path):\n os.system(\"if [ ! -d \" + path + \" ]; then mkdir -p \" + path + \"; fi\")" ]
[ "0.7637506", "0.7587286", "0.75317335", "0.7509946", "0.7493378", "0.74204826", "0.74024594", "0.7333082", "0.7327076", "0.7321167", "0.73203564", "0.72724897", "0.7250266", "0.7245775", "0.7242602", "0.7218288", "0.7178371", "0.71603537", "0.71599776", "0.714316", "0.7106386", "0.71054476", "0.70886093", "0.7052941", "0.7050756", "0.7017541", "0.70102763", "0.7010186", "0.7003531", "0.6987885", "0.6962981", "0.69626087", "0.6960103", "0.69271445", "0.6920309", "0.69097805", "0.68982005", "0.68937576", "0.68815905", "0.68810666", "0.68777394", "0.68640804", "0.68524444", "0.6850589", "0.68487895", "0.6839872", "0.6837959", "0.6822182", "0.6820661", "0.6816999", "0.681273", "0.68083817", "0.6798223", "0.67909884", "0.6790386", "0.67677057", "0.67645013", "0.67552924", "0.6752958", "0.67418253", "0.6730052", "0.6719804", "0.6717316", "0.6702772", "0.66984016", "0.66984016", "0.66959393", "0.66915596", "0.6675447", "0.66689634", "0.6662592", "0.6662357", "0.6659558", "0.6657937", "0.66551", "0.6649732", "0.6645088", "0.66400856", "0.66259056", "0.6610219", "0.6605192", "0.65898365", "0.65891063", "0.6572891", "0.6548014", "0.6538496", "0.65306693", "0.65306693", "0.65268475", "0.6526733", "0.6522761", "0.6522006", "0.65175223", "0.651525", "0.6512375", "0.65007377", "0.6491481", "0.648983", "0.64830875", "0.64667934" ]
0.7918296
0
Description When is given a directory name that doesnt exist Expected Result Shows log that directory wasn't found
def test_doesnt_have_directory_log(self, check_fn_false, caplog): #setup records = caplog.records has_directory = extractor.make_has_directory(os.path.isdir) directory_path = "./data/tests" #when test2 = has_directory(directory_path) #result assert len(records) == 1 assert records[0].message == f"It wasn't found directory {directory_path}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testNoSuchDirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"no_such_directory\")", "def test_ensure_dir_exists(self):\n pass", "def check_dir(directory: str, err_string: str) -> None:\n if not pathlib.Path(directory).is_dir():\n print('\\n' + err_string + '\\n')\n raise NotADirectoryError", "def test_error_is_thrown_if_directory_does_not_exist(fs):\n\n output_dir = 'user_specified_directory'\n assert not os.path.exists(output_dir)\n\n with pytest.raises(NotADirectoryError) as exception_info:\n verify_output_dir(output_dir)\n\n assert exception_info.value.args[0] == 'The \"user_specified_directory\" directory, which was specified by ' \\\n 'the --output-dir command-line argument, is not an existing directory. ' \\\n 'Please either create that directory or specify a different one.'", "def check_dir(dirname):\n print('Checking directory...{}'.format(dirname))\n if dirname is not None and not is_dir(dirname):\n raise FileNotFoundError('{} is not a valid directory'.format(dirname))", "def test_nonExistentDir(self):\n e = self.assertRaises(\n IOError, logfile.LogFile, self.name, \"this_dir_does_not_exist\"\n )\n self.assertEqual(e.errno, errno.ENOENT)", "def test_doesnt_have_directory(self, check_fn_false):\n\n # setup\n has_directory = extractor.make_has_directory(os.path.isdir)\n\n # when\n test2 = has_directory(\"./data/tests\")\n\n # result\n assert test2 is False", "def test_has_directory_log(self, check_fn_true, caplog):\n\n #setup\n records = caplog.records\n has_directory = extractor.make_has_directory(os.path.isdir)\n directory_path = \"./data/observed\"\n \n #when\n test1 = has_directory(directory_path)\n\n #result\n assert len(records) == 1\n assert records[0].message == f\"It was found directory {directory_path}\"", "def check_if_dir_exists():\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"logs\")\n logger.debug(\"Dir for logs has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'logs'} failed\")\n\n if not os.path.exists(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\"):\n try:\n os.mkdir(str(__CURRENT_DIRECTORY) + os.sep + \"..\" + os.sep + \"db\")\n logger.debug(\"Dir for DB has been created\")\n except OSError:\n logger.debug(f\"Creation of the directory {str(__CURRENT_DIRECTORY) + os.sep + '..' + os.sep + 'db'} failed\")", "def check_dir(dname):\n direc = os.path.dirname(dname)\n try:\n os.stat(direc)\n except:\n os.mkdir(direc)\n print \"Made directory %s....\" % dname\n return dname", "def validate(self):\n if not self.path.is_dir() or not self.path.exists():\n raise NotADirectoryError", "def check_dir_exist(scheme):\n if os.path.exists(scheme.prefix) is False:\n print(\"ERROR: Required directory '{}' is missing! Exiting!\").format(scheme.prefix)\n sys.exit(1)", "def test_scan_dir_not_found(self, dir_path):\n with self.assertRaises(FileNotFoundError):\n self.file_scanner.scan(dir_path)", "def testNotADirectory(self):\n\n self.assertRaises(OSError,\n parse_package,\n \"not_a_directory\")", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def check_dir(filedir, olddir): # Yasemin's code\r\n\tgoodname = False\r\n\twhile goodname == False:\r\n\t\tif exists(filedir + olddir):\r\n\t\t\tprint(\"Directory already exists! Please pick a knew directory name for old lab files:\")\r\n\t\t\tolddir = input(\"> \")\r\n\t\t\tolddir = name_check(olddir)\r\n\t\telse:\r\n\t\t\tgoodname = True\r\n\treturn olddir", "def checkDir(directory):\n ## test if directory is there\n if not os.path.exists(directory):\n os.mkdir(directory)\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Making new directory: \" + directory + \"\\n\")\n else:\n sys.out = open(directory + '/' + str(time.time()) + '.log', 'w')\n print(\"Found directory: \" + directory + \"\\n\")", "def test_log_dir(self):\n false_dir = '/tmp/any'\n self.test_config['LOG_DIR'] = false_dir\n self.write_config_to_file()\n self.log = nginx_log_generator()\n self.generate_report()\n # Check our log\n path_to_log = '{}/assets/{}'.format(self.test_dir, 'analyzer.log')\n with open(path_to_log) as f:\n log_content = f.read()\n self.assertTrue(\"Sorry, directory {} wasn't found\".format(false_dir) in log_content)", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def valid_directory(self, directory):\n\n if os.path.isdir(directory):\n return directory\n else:\n msg = f\"The write directory provided by the user does not exist: {directory}\"\n logging.exception(msg)\n self.close_logger()\n raise NotADirectoryError(msg)", "def assert_is_dir_and_exists(self):\n if not self.is_dir():\n msg = \"'%s' is not a file or doesn't exists!\" % self\n raise EnvironmentError(msg)", "def test_non_existing_directory_raises_when_metavar_is_dir_for_db_export_cleaned(self):\n with contextlib.redirect_stderr(io.StringIO()) as stderr:\n with pytest.raises(SystemExit):\n parser = cli_parser.get_parser()\n parser.parse_args([\"db\", \"export-archived\", \"--output-path\", \"/non/existing/directory\"])\n error_msg = stderr.getvalue()\n\n assert error_msg == (\n \"\\nairflow db export-archived command error: The directory \"\n \"'/non/existing/directory' does not exist!, see help above.\\n\"\n )", "def checkDirectory(path,logger):\n newPath = completePath(path)\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n if (logger):\n print(\"Did not found required directories. Creating them...\")\n else:\n if (logger):\n print(\"Found the required directories!\")", "def test_process_args_should_reject_non_existent_input_directory(self, arg_dict):\n self.use_source_path(arg_dict, 'sample/directory_does_not_exist/')\n self.use_resolution_val(arg_dict, 600)\n\n with pytest.raises(FileNotFoundError):\n change_resolution.process_args(arg_dict)", "def _check_directories(self, dist, component):\n path = join(self.repository, 'dists', dist, component, 'source')\n\n if not isdir(path):\n makedirs(path)", "def is_valid_directory(parser, arg):", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def _is_valid_log_dir(log_dir):\n return os.path.isdir(log_dir)", "def checkDir(dirName=None):\r\n if not os.path.exists(dirName):\r\n os.makedirs(dirName)\r\n return 0", "def test_matlab_install_dir_absent(self):\n directories = (\"/\", \"/tmp\")\n for dirname in directories:\n with self.subTest(dirname=dirname):\n self.assertNotIn(\"matlab-install\", self.host.file(dirname).listdir())", "def check_dir(self):\n\n if not os.path.isdir(self.directory):\n os.mkdir(self.directory, 755)\n if not os.path.exists(self.log_file):\n from pathlib import Path\n Path(self.log_file).touch()", "def test_invalid_dir(self):\n self.assertRaises(OSError, awstats_reader.AwstatsReader, '/tmp/XYZ', 'example.com')", "def test_missing_dir(self):\n testargs = [\"python\", \"scripts/smac\", \"--restore_state\",\n \"nonsense_test_dir\", \"--scenario_file\",\n self.scenario_two, \"--verbose\", \"DEBUG\"]\n with mock.patch.object(sys, 'argv', testargs):\n self.assertRaises(FileNotFoundError, self.smaccli.main_cli)", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def dir_exception_handler(dpath: str,\n dryrun: bool,\n dirs_created: list = [],\n overwrite: bool = False) -> bool:\n # If this dir was created during this session, do not create it again\n if dpath in dirs_created:\n return False\n elif os.path.exists(dpath):\n if dryrun == False:\n # Get user input\n while overwrite not in ['Y', 'y', 'N', 'n', True]:\n overwrite = input(f\"\\n*** WARNING: Your directory {dpath} already exists. Overwrite? Y/N: \")\n if overwrite == True or overwrite.lower() == 'y':\n print(f\"Your directory {dpath} will be overwritten\")\n shutil.rmtree(dpath)\n return True\n else:\n return False\n else: # If dry run:\n print(f\"\\n*** WARNING: This is a dry run but if you run cp_packager in normal mode,\")\n print(f\"*** your directory {dpath} may be overwritten\")\n else:\n return True", "def dir_exists(self, path):\n if not path:\n return True\n return False", "def Directory(self) -> str:", "def test_is_summary_directory(self):\n summary_base_dir = tempfile.mkdtemp()\n file_count = 1\n directory_count = 1\n gen_directories_and_files(summary_base_dir, file_count, directory_count)\n\n summary_watcher = SummaryWatcher()\n flag = summary_watcher.is_summary_directory(summary_base_dir, './')\n assert flag\n flag = summary_watcher.is_summary_directory(summary_base_dir, './\\x00')\n assert not flag\n shutil.rmtree(summary_base_dir)", "def test_add1_dir(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\n \"Expected IOError because directory 'add1' does not exist\")\n except IOError:\n pass", "def check_dir(dir):\n if not os.path.exists(dir):\n print(\"[+] Creating directory for target..\")\n os.makedirs(dir)", "def testDirDoesNotExist(self, mock_dir, mock_exists):\n mock_dir.return_value = True\n mock_exists.return_value = False\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n self.mr._get_sequences\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n self.assertEqual(\n False,\n self.mr.is_seq\n )\n\n self.assertEqual(\n [],\n self.mr.seqs\n )\n\n self.assertEqual(\n None,\n self.mr.seq\n )", "def is_folder_not_found(self):\n return self._tag == 'folder_not_found'", "def is_folder_not_found(self):\n return self._tag == 'folder_not_found'", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))", "def check_test_dir(self, dir_name):\n\n # assume that the directory is an absolute path\n orig_name = dir_name\n dir_name = os.path.normpath(os.path.abspath(dir_name)) + \"/\"\n\n if os.path.isdir(dir_name):\n return dir_name\n\n # instead check if it is relative to test top dir?\n dir_name = os.path.normpath(os.path.join(self.testTopDir, dir_name))\n\n if os.path.isdir(dir_name):\n return dir_name\n\n # we failed :(\n self.log.fail(f\"ERROR: {orig_name} is not a valid directory\")", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def _assert_dir_already_exists(dirname):\n\n if not dirname:\n return\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def checkDirExists(dirPath):\n if not MyFile.checkFileExists(dirPath):\n MyFile.makeDir(dirPath)", "def print_is_directory(dir_name):\n print('pwgrep: {}: is a directory'.format(dir_name))", "def test_missing_dir_in_custom_log_path(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"another_dir\" / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher", "def test_supply_directory(self):\n supplied_value = '/tmp'\n returned_value = generic.check_path(supplied_value)\n\n self.assertEqual(supplied_value, returned_value)", "def handle_directory_pre(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n dir = os.path.join(prefix, dir)\n try:\n dir_lstats = os.lstat(dir)\n except WindowsError as e:\n if e.winerror == 3 and len(dir) > hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH:\n self.log.error('Unable to stat dir due to path length > %d characters. Try setting HKLM\\System\\CurrentControlSet\\Control\\FileSystem\\LongPathsEnabled to 1'%hydra.Utils.MAX_WINDOWS_FILEPATH_LENGTH)\n else:\n if hydra.is_invalid_windows_filename(dir):\n self.log.error('Directory contains invalid characters or invalid names for Windows: %s'%dir)\n else:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n return True\n except Exception as e:\n self.log.exception(e)\n self.stats['error_stat_dirs'] += 1\n if stat.S_ISLNK(dir_lstats.st_mode):\n # We do not want to process a symlink so account for it here as a symlink\n self.stats['symlink_dirs'] += 1\n return True\n return False", "def checkPath(self, filename):\r\n if (not os.path.exists(filename)):\r\n filename = os.getenv('MDLROOT')+'/'+filename\r\n if (not os.path.exists(filename)):\r\n print \"[MDL] ERROR, FILE\", filename, \"DOES NOT EXIST.\"\r\n sys.exit(1)\r\n return filename", "def test_reference_test_dir_has_no_subdir_for_repo(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(NO_TEST_DIR_REPO)\n\n assert result.status == Status.ERROR\n assert \"no reference test directory for\" in result.msg", "def __is_directory_name(filename):\n return filename[-1] == '/'", "def is_valid_directory(args):\n if args.directory is not None:\n return True\n return False", "def IsADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.EISDIR", "def test_nodelog_missing_files(self):\n build_dir = self.BUILD_DIR + 'nodelog?pod=abc'\n response = app.get('/build' + build_dir, status=404)\n self.assertIn('Unable to find', response)", "def test_test_directory_identifer_exists(self):\n self.logger.info(\"STEP: Initialize the workspace.\")\n with Workspace(Mock()) as workspace:\n self.workspace = workspace\n\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with identifier \"\n \"'dir1'.\"\n )\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Check that test directory was created and exit the \"\n \"context.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n first_stat = directory.stat()\n\n with workspace.test_directory(\"dir1\") as directory:\n self.logger.info(\n \"STEP: Enter a test directory in a context manager with the \"\n \"same identifer.\"\n )\n if not directory.exists() and directory.is_dir():\n raise Exception(\"Test directory was not properly created.\")\n\n self.logger.info(\"STEP: Verify that the folder was re-used.\")\n self.assertEqual(\n first_stat,\n directory.stat(),\n \"Second directory is not the same as the first directory.\",\n )", "def testIsDir(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n existingDirPath=P(self.existingDirPathStr)\r\n nonExistingDirPath=P(self.nonExistingDirPathStr)\r\n existingDirNoTrailingSlashPath=P(self.existingDirPathStr[:-1])\r\n existingValidDirSymlinkPath=P(self.existingValidSymlinkDirPathStr)\r\n existingInvalidDirSymlinkPath=P(self.existingInvalidSymlinkDirPathStr)\r\n existingFilePath=P(self.existingFilePathStr)\r\n\r\n # 1\r\n self.assertEquals(existingDirPath.isDir(),True,\r\n '%r is a dir'%str(existingDirPath))\r\n\r\n # 2\r\n self.assertEquals(nonExistingDirPath.isDir(),False,\r\n '%r does not exist'%str(nonExistingDirPath))\r\n\r\n # 3\r\n self.assertEquals(existingDirNoTrailingSlashPath.isDir(),True,\r\n '%r is a dir'%str(existingDirNoTrailingSlashPath))\r\n\r\n # 4\r\n self.assertEquals(existingValidDirSymlinkPath.isDir(),True,\r\n '%r is a dir'%str(existingValidDirSymlinkPath))\r\n\r\n # 5\r\n self.assertEquals(existingInvalidDirSymlinkPath.isDir(),False,\r\n '%r is an invalid symlink'\r\n %str(existingInvalidDirSymlinkPath))\r\n\r\n # 6\r\n self.assertEquals(existingFilePath.isDir(),False,\r\n '%r is a file'%str(existingFilePath))", "def test_valid_dir_raises():\n with pytest.raises(ValueError):\n assert cli._valid_dir(__file__)", "def check_charm_dir_exists(charm_dir: Path) -> None:\n assert charm_dir.is_dir()", "def check_dir(path):\n \n if not os.path.exists(path):\n os.makedirs(path)\n print path", "def test_test_directory_no_workspace(self):\n self.logger.info(\"STEP: Enter a test directory without a workspace.\")\n self.workspace = Workspace(Mock())\n self.logger.info(\"STEP: Verify that an exception was raised.\")\n with self.assertRaises(Exception):\n with self.workspace.test_directory(\"dir1\"):\n pass", "def check_directory_valid(self):\n Util.print_standout(\"check is there haven`t empty directory.\")\n for p, dirs, filename_list in os.walk(self.data_dir):\n for dir_name in dirs:\n if not os.listdir(os.path.join(p, dir_name)):\n Util.print_error(\"There shouldn't be a empty directory in [%s] of [%s]\" % (dir_name, self.data_dir))\n return False\n return True", "def test_bad_log_dir():\n with pytest.warns(LoggerWarning):\n log_file = '/abc/log.log'\n logger = init_logger(__name__, log_file=log_file)\n assert len(logger.handlers) == 1\n assert logger.handlers[0].name == 'stream'\n assert LOGGERS.loggers[__name__]['log_file'] is None\n\n LOGGERS.clear()", "def dirCheck(dirPath):\n if not os.path.exists(dirPath):\n os.mkdir(dirPath)\n return dirPath", "def checkExistenceDir(path):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n logger.warning(\n \"Directory {} does not seem to exist, creating one.\".format(path)\n )\n os.mkdir(path)", "def test_does_static_directory_exist(self):\n does_static_dir_exist = os.path.isdir(self.static_dir)\n does_css_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'css'))\n does_js_static_dir_exist = os.path.isdir(os.path.join(self.static_dir, 'js'))\n \n self.assertTrue(does_static_dir_exist, f\"{FAILURE_HEADER}The static directory was not found in the expected location. Check and try again.{FAILURE_FOOTER}\")\n self.assertTrue(does_css_static_dir_exist, f\"{FAILURE_HEADER}The css subdirectory was not found in your static directory.{FAILURE_FOOTER}\")\n self.assertTrue(does_js_static_dir_exist, f\"{FAILURE_HEADER}The js subdirectory was not found in your static directory.{FAILURE_FOOTER}\")", "def create_unexisted_dir(directory, element):\n directory += \"/\" + element\n if get_file_type(directory) == 0:\n mkdir(directory)\n return directory", "def test_default_log_dir(self, cleanup_local_folder):\n test_trainer = pl.Trainer(checkpoint_callback=False, logger=False)\n\n log_dir = exp_manager(test_trainer, {\"create_tensorboard_logger\": False, \"create_checkpoint_callback\": False})\n assert (log_dir / \"..\").resolve() == Path(\"./nemo_experiments/default/\").resolve()\n assert Path(\"./nemo_experiments\").exists()\n assert Path(\"./nemo_experiments/default/\").exists()\n sub_dirs = [x for x in Path(\"./nemo_experiments/default/\").iterdir() if x.is_dir()]\n assert len(sub_dirs) == 1\n assert re.match(r\"[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}\", sub_dirs[0].name)", "def scan_sample_directory(sample_dir: Path) -> None:\n if not (sample_dir / 'README.md').is_file():\n print(f\"WARNING ({sample_dir}): No README.md file\")\n if not (sample_dir / 'main.py').is_file():\n print(f\"ERROR ({sample_dir}): No main.py file\")", "def test_get_result_directories(self):\n pass", "def _existDir(d):\n\treturn os.path.exists(d)", "def test_ls_no_shareddir():\n\n with bad_fixture() as root:\n assert next(pipeline.ls(root=root), None) is None", "def _validate_path(dir_path: str) -> None:\n if os.path.exists(dir_path):\n return\n\n logger.info('Creating directory: %s', dir_path)\n os.mkdir(dir_path)", "def empty_dir(value):\n return not os.listdir(value)", "def check_is_dir(path):\n if not os.path.isdir(path):\n raise DirectoryNotFoundError(path)", "def dirChecking(dir):\n if not os.path.exists(dir):\n os.mkdir(dir)", "def test_missing_repo_dir(self):\r\n\r\n self._setstaff_login()\r\n\r\n if os.path.isdir(getattr(settings, 'GIT_REPO_DIR')):\r\n shutil.rmtree(getattr(settings, 'GIT_REPO_DIR'))\r\n\r\n # Create git loaded course\r\n response = self._add_edx4edx()\r\n self.assertIn(GitImportError.NO_DIR,\r\n response.content.decode('UTF-8'))", "def test_isdir(self, remote_mock_dir):\n\n with HdfsHook() as hook:\n assert hook.isdir(posixpath.join(remote_mock_dir, \"subdir\"))\n assert not hook.isdir(posixpath.join(remote_mock_dir, \"test.txt\"))", "def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname", "def _raise_if_not_dir(cls, isdir, spec, client=None):\n if not isdir:\n if client:\n # Check if exists\n cls.head_obj(client, spec)\n raise ObjectNotADirectoryError(path=spec[\"full_path\"])", "def is_dir(self, path):", "def test_get_result_directory(self):\n pass", "def _assert_dir_exists(dirname):\n\n if not dirname:\n return\n\n if not os.path.exists(dirname):\n text = \"directory %s doesn't exist, so creating\"\n print(\"\\033[93m\" + text % dirname + \"\\033[0m\")\n\n os.makedirs(dirname)\n\n assert os.path.isdir(dirname), dirname\n assert os.access(dirname, os.R_OK), dirname\n assert os.access(dirname, os.W_OK), dirname", "def emptySegmentDir(recordedVideoDir):\n try:\n if not os.listdir(recordedVideoDir):\n return True\n except:\n return True\n return False", "def test_empty_directory(self):\n\n # create indexer object\n indexer = indexing_module.IndexModule()\n\n # index the location (storage/data/test/empty_directory)\n indexer.index(_path_prefix + 'empty_directory')\n\n # search for few words and check that the result is empty\n result = indexer.search(\"\")\n print(result)\n self.assertTrue(result == {})\n\n result = indexer.search(\"hello\")\n self.assertTrue(result == {})\n\n result = indexer.search(\"world\")\n self.assertTrue(result == {})", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def test_get_denoiser_data_dir(self):\r\n\r\n obs = get_denoiser_data_dir()\r\n\r\n self.assertTrue(exists(obs))\r\n self.assertTrue(exists(obs + 'FLX_error_profile.dat'))", "def dir_is_empty(dir):\n if os.path.exists(dir) and os.path.isdir(dir):\n if not os.listdir(dir):\n return True\n else:\n return False\n else:\n print(\"Given Directory don't exists\")", "def _mkdir_if_not_exist(path):\n if not(os.path.isdir(path)):\n os.mkdir(path)\n else:\n _logger.info('Skipping existing directory %s' % path)", "def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)", "def check_directory(self, directory: str) -> bool:\n return self.run(\"/\", \"root\", [\"test\", \"-d\", directory], check=False).returncode == 0", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def check_path(path: str, check_for: str = None, check_as_dir: bool = True) -> None:\n if not os.path.isdir(path):\n raise Exception(path + \" is not a valid directory\")\n if check_for is not None:\n if check_for not in os.listdir(path):\n raise Exception(check_for + \" not found in:\" + path)\n if check_as_dir:\n if not os.path.isdir(path + check_for):\n raise Exception(path + \" is not a valid directory\")", "def test_find_failed_at_root(self):\n directory = os.path.dirname(os.path.realpath(__file__))\n subdirectory = os.path.join(directory, 'fake')\n\n with patch('os.path.dirname', return_value=subdirectory) as func:\n with self.assertRaises(FileNotFoundError):\n steptest.find_project_directory(subdirectory)\n func.assert_called_once_with(subdirectory)", "def dir_noaccess(self,fullname):\n pass" ]
[ "0.7097761", "0.69815487", "0.6958049", "0.69345635", "0.68600756", "0.6768691", "0.67403984", "0.67001784", "0.66480124", "0.65875924", "0.6568089", "0.6561216", "0.6556799", "0.6535799", "0.6534498", "0.6532836", "0.6514325", "0.65090376", "0.64824975", "0.6481356", "0.6477626", "0.64698684", "0.63911563", "0.6369664", "0.6364753", "0.6346357", "0.63405997", "0.63157886", "0.63157886", "0.6312728", "0.6293222", "0.6278287", "0.62748426", "0.62629396", "0.6251241", "0.62261134", "0.6223201", "0.6212459", "0.6212344", "0.62015915", "0.6193822", "0.6192791", "0.6188722", "0.6188722", "0.6185872", "0.61762893", "0.61699516", "0.61653703", "0.614461", "0.6143514", "0.6134418", "0.612808", "0.61148554", "0.61148506", "0.6097388", "0.6087242", "0.60822594", "0.6080642", "0.60754466", "0.60732275", "0.6072413", "0.6064484", "0.6059166", "0.6054581", "0.60477597", "0.6047176", "0.60420233", "0.60333455", "0.6027965", "0.6024235", "0.6022742", "0.60168505", "0.6016276", "0.6012839", "0.60107046", "0.6008008", "0.6006419", "0.60061383", "0.60008734", "0.6000785", "0.59932905", "0.59931666", "0.59818316", "0.5968953", "0.5966919", "0.59637", "0.5958216", "0.5955629", "0.59462774", "0.5945013", "0.59290653", "0.5927068", "0.5923047", "0.5920572", "0.59143305", "0.5905707", "0.5902348", "0.5899696", "0.58948636", "0.5887783" ]
0.7614185
0
Description When is given a directory path that exist and has csv files Expected Result returns array with paths of files
def test_get_filepaths(self): #setup get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn) #when test1 = get_filepaths("./dir1", ".csv") #result assert len(test1) == 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def list_file(csv_directory):\n list_of_files = [os.path.join(dirpath, file_name)\n for dirpath, dirnames, files in os.walk(csv_directory)\n for file_name in fnmatch.filter(files, '*.csv')]\n return list_of_files", "def list_files(path=None):\n if path == None:\n return glob.glob('Data/*.csv')\n else:\n return glob.glob(path+'*.csv')", "def read_csv(folder):\n csv_paths = [(f, os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith('.csv') and '刑事' in f and '司法院-刑事補償_刑事' not in f and '最高' not in f]\n return csv_paths", "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def tdfiles(self):\r\n _tdfiles = []\r\n if self.dir_exists:\r\n files = os.listdir(self.csvdir)\r\n _tdfiles = fnmatch.filter(files, self.search_pattern)\r\n return _tdfiles", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def find_files(main_path, column_names = []):\n main_path = main_path\n files = []\n onlyfiles = [f for f in listdir(main_path) if isfile(join(main_path, f))]\n for file in onlyfiles:\n files.append(File(main_path, file[:-4], \".csv\", column_names))\n return files", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def getPathfromCSV(flist, full_csv_list, outfile=None):\n # Get the file list\n fils, csvfils = [], []\n with open(flist, 'r') as fIn:\n for line in fIn:\n if line:\n fils.append(line.split('.')[0].strip())\n with open(full_csv_list, 'r') as fIn:\n for line in fIn:\n if line:\n csvfils.append([line.split('/')[-1].split('_')[0].strip(), # Filename only\n line.strip()]) # File path only\n \n # replace it with the path list\n paths = []\n for f in fils:\n if f in [o[0] for o in csvfils]:\n idx = [o[0] for o in csvfils].index(f)\n paths.append(csvfils[idx][1])\n else:\n print('Could not find: %s' %f)\n \n print('Retrieved %i paths (of %i)' %(len(paths), len(fils)))\n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for p in paths:\n fOut.write(p)\n fOut.write('\\n')\n \n return paths", "def list_csv_files():\n # See README.txt Ref#2.\n return [filename for filename in glob.glob(\"*.csv\")]", "def get_twitter_files(input_filepath):\n log_main = logging.getLogger(__name__)\n log_import = log_main.getChild('import_files')\n log_files = log_import.getChild('find_files')\n log_files.info('Gathering files to import.')\n\n filtered_files = []\n\n try:\n files = os.listdir(input_filepath)\n\n # Filter log files keeping only csv/non-hidden files\n filtered_files = [f for f in files if \\\n (f.endswith('.csv') and not f.startswith('.'))]\n\n # Create file paths by combining it with our passed directory\n filtered_files = [os.path.join(input_filepath, f) for f in filtered_files]\n\n except Exception as error:\n print ('\\tCould not find directory!')\n print ('\\tError: ', error)\n \n finally:\n return filtered_files", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def find_files(path, extension = 'csv', min = 2):\n try:\n os.chdir(path) # go to the directory of the path\n except FileNotFoundError:\n print(\"ERROR - Couldn't find file \" + path)\n exit()\n files = [i for i in glob.glob('*.{}'.format(extension))] # place all the csv files in this array\n\n if len(files) < min:\n print(\"ERROR - Couldn't find at least \" + str(min) + \" \" + extension + \" file(s)\")\n exit()\n\n return files", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def update_csv():\n return os.listdir('./data')", "def get_currency_file_paths(self, dir_path=\"*/*\", extension=\".csv\"):\n csv_files = [\n csv_file for csv_file in glob(dir_path + extension, recursive=True)\n ]\n return [\n currency_file for currency_file in csv_files if \"currency\" in currency_file\n ]", "def get_all_csv_files_in_directory(directory):\n return filter(lambda x: x[-4:] == \".csv\", os.listdir(directory))", "def csvp(startingPath, csv_ext='.csv'):\n print 'walking up path=', startingPath\n csvfn = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(startingPath)\n for filename in filenames if filename.endswith(csv_ext)]\n print 'list is ', len(csvfn), ' images long'\n print 'starting with', csvfn[0]\n print 'ending with', csvfn[-1]\n return csvfn", "def get_csv_paths(top_path):\n # exclude is a set holding all dirnames to be excluded\n exclude = {\"fails\", \"archive\", \"exclude\", \"fail\", \"backup\"}\n # files is a dict that defaults to lists, so values can be appended to keys\n files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n dirnames[:] = [d for d in dirnames if d.lower() not in exclude]\n\n for filename in filenames:\n\n # gather .csv and .tsv files\n if \".csv\" in str(filename).lower() or \".tsv\" in str(filename).lower():\n # Add filename to the key of dirpath\n files[dirpath].append(filename)\n return files", "def scandir(path_):\n return os.listdir", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def get_processed_csv_file_names(directory_path):\n\n\t__supported_extensions = ['.csv', ]\n\n\tprocessed_csv_file_names = list()\n\tlistdir = os.listdir(directory_path)\n\tfor file in listdir:\n\t\tif os.path.splitext(file)[1] in __supported_extensions:\n\t\t\tprocessed_csv_file_names.append(file)\n\n\t# sort so that we always read in a predefined order\n\t# key: smallest file first\n\tprocessed_csv_file_names.sort(key = lambda f: os.path.getsize(os.path.join(directory_path, f)))\n\treturn processed_csv_file_names", "def get_file_names(self):\n return glob.glob(os.path.join(self.path, '*.csv'))", "def get_input_files(dir_path):\n return [os.path.join(dir_path,f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path,f))]", "def get_files_by_name(self, pilot_indir, file_name):\n\n directory_files = glob.glob(pilot_indir + '\\\\*\\\\')\n os.chdir(pilot_indir)\n files = []\n\n for directoryFile in directory_files:\n file = glob.glob(('{}\\\\{}*.csv'.format(directoryFile, file_name)))\n files.append(file[0])\n\n os.chdir('..\\\\..')\n return files", "def GetFilesInDirectory(dir_path):\n\treturn [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path,f))]", "def parse_csv(csv_path):\n song_list = []\n\n try:\n with open(csv_path, encoding='utf-8') as playlist:\n print(\"Parsing \" + csv_path)\n reader = csv.reader(playlist, delimiter=',')\n next(reader) # skip csv header\n for row in reader:\n song_list.append(row[2] + \" - \" + row[1])\n # todo: parse CSV, then check to see which songs already exist in current dir\n # move non-existent results to new list and return that\n except IndexError as error:\n # consider validating playlists when parsing\n # from API on web server instead\n print(str(error))\n \n return song_list", "def read_data(path: str = None, files: List[str] = None) -> list:\n\n if path == None:\n path = \"../input\"\n\n try:\n if files == None:\n all_files = glob.glob(path + \"/*.csv\")\n else:\n all_files = [path + '/' + s for s in files]\n li = []\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n li.append(df)\n return li\n except FileNotFoundError:\n print(\"Files not found. Check the path variable\")", "def getDataFiles(directoryName):\r\n \r\n return listdir(directoryName)", "def files_in_dir(path):\n return os.listdir(path)", "def dir_to_paths(data_dir, data_type):\n file_paths = []\n\n if data_dir:\n tf.logging.info(\"=\" * 120)\n\n case_str = \"uncased.\" if uncased else \"\"\n glob_base = \"data.{}.{}.{}tfrecord*\".format(split, data_type, case_str)\n\n for idx, dir_path in enumerate(data_dir.split(\",\")):\n glob = os.path.join(dir_path, glob_base)\n cur_file_paths = sorted(tf.io.gfile.glob(glob))\n file_paths += cur_file_paths\n\n tf.logging.info(\"[%d] Data glob: %s\", idx, glob)\n tf.logging.info(\"[%d] Num of file path: %d\", idx, len(cur_file_paths))\n\n tf.logging.info(\"[%s] Total number of file path: %d\", data_type,\n len(file_paths))\n\n return file_paths", "def get_files(self, dir):\n path = os.path.join(self.loc, dir)\n return [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]", "def test_GetFilesInDirectory_empty_dir(tempdir: pathlib.Path):\n assert not dpack.GetFilesInDirectory(tempdir, [])", "def get_data_from_files(path, filename):\n\n data_files = []\n\n if path:\n list_of_files = os.listdir(path)\n print(\"List of data files:\", list_of_files)\n\n for file in list_of_files:\n if filename in file:\n full_filepath = path + \"/\" + file\n data_files.append(full_filepath)\n #print(data_files)\n\n else:\n data_files = []\n #print(data_files)\n return data_files", "def search_files(filename, search_path, pathsep=os.pathsep):\n clidFiles = []\n for path in search_path.split(pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): clidFiles.append(os.path.abspath(candidate))\n return clidFiles", "def read_csv_file(csv_file_path):\n file_names = []\n file_labels = []\n with open(csv_file_path, 'r') as files_path:\n path_list = csv.DictReader(files_path)\n fieldnames = path_list.fieldnames\n for path in path_list:\n file_names.append(path[fieldnames[0]])\n file_labels.append(path[fieldnames[1]])\n return file_names, file_labels", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def _get_files(self, path):\n result = []\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n result += self._get_files(os.path.join(path, f))\n else:\n result.append(os.path.join(path, f))\n return result", "def _list_datasets_from_dir(path: github_api.GithubPath) -> List[str]:\n if not path.exists():\n # Should be fault-tolerant in the future\n raise FileNotFoundError(f'Could not find datasets at {path}')\n return sorted([ds for ds in path.iterdir() if _is_dataset_path(ds)])", "def stack_walks(direc):\n files = os.listdir(direc)\n csvs = []\n for x in files:\n if '.csv' in x:\n csvs.append(x)\n complete = np.vstack([get_nx10(direc+'/'+x) for x in csvs])\n return complete", "def get_file_names():\n all_file_names = []\n cwd = os.getcwd()\n # Change to dir with result files to analyze\n os.chdir(args.dir)\n \n for file in glob.glob(\"*.csv\"):\n all_file_names.append(file)\n\n # Return to current working directory\n os.chdir(cwd)\n return all_file_names", "def list_dir(self, path):", "def get_test_files(dirname):\n if not os.path.isdir(dirname):\n return []\n path = dirname + \"/{}\"\n return list(map(path.format, sorted(os.listdir(dirname))))", "def list_files(directory):\r\n try:\r\n abpath = os.path.abspath(directory)\r\n filepaths = os.listdir(abpath)\r\n filepaths = [\r\n os.path.join(abpath, f)\r\n for f\r\n in filepaths\r\n if f.endswith('fa')\r\n ]\r\n except (OSError, IOError):\r\n print('The directory supplied is not readable, or does not exist!')\r\n exit(1)\r\n return filepaths", "def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst", "def get_file_paths_from_directory(directory_path):\n file_paths = [join(directory_path, file) for file in listdir(directory_path)\n if isfile(join(directory_path, file)) and file[-2:] == 'h5']\n return file_paths", "def get_files_from_directory(path):\n return [f for f in listdir(path) if isfile(join(path, f))]", "def filePaths(directory_with_files):\n\n # get a list of file names in directory\n list_of_files = os.listdir(directory_with_files) \n\n # join directory path and file name to get full paths to files\n filepaths = [os.path.join(directory_with_files, filename) for filename in list_of_files]\n\n return filepaths", "def _listdir(self, path):\n return self.__call_with_parser_retry(self._real_listdir, path)", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def get_dataset_filelist(dataset):\n\n query = {\n \"_source\": {\n \"includes\": [\"info.directory\", \"info.name\"]\n },\n \"query\": {\n \"match_phrase_prefix\": {\n \"info.directory.analyzed\": dataset\n }\n }\n }\n\n es = CEDAElasticsearchClient()\n results = scan(es, query=query, index='opensearch-files')\n\n file_list = [\n os.path.join(\n item['_source']['info']['directory'],\n item['_source']['info']['name']\n ) for item in results\n ]\n\n return file_list", "def get_parsed_files(output_path, directory):\n parsed_files = set(os.listdir(os.path.join(output_path, directory)))\n \n return parsed_files", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def read_filepaths(self, directory):\n folder_paths = [os.path.join(directory, folder) for folder in os.listdir(directory) if not folder.startswith('.')]\n filepaths = [[os.path.join(cur_folder, cur_file) for cur_file in os.listdir(cur_folder)] for cur_folder in folder_paths]\n return filepaths", "def find_picard_files(file_and_dir_names):\n filenames = []\n for tgt in file_and_dir_names:\n if os.path.isdir(tgt):\n # Collect the target coverage files from this directory tree\n fnames = subprocess.check_output(['find', tgt,\n '-name', '*targetcoverage.csv']\n ).splitlines()\n if not fnames:\n raise RuntimeError(\"Given directory %s does not contain any \"\n \"'*targetcoverage.csv' files.\"\n % tgt)\n filenames.extend(fnames)\n elif os.path.isfile(tgt):\n filenames.append(tgt)\n else:\n raise ValueError(\"Given path is neither a file nor a directory: %s\"\n % tgt)\n filenames.sort()\n return filenames", "def get_csv(self):\n all_csvs = [each for each in listdir(self.cur_dir) if each.endswith('.csv')]\n return all_csvs", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def get_filepaths(extract_dir):\n\n index = []\n labels = []\n _extract_dir = os.path.join(extract_dir, 'UCF-101')\n for folder in os.listdir(_extract_dir):\n labels.append(folder)\n folderpath = os.path.join(_extract_dir, folder)\n\n if not os.path.isdir(folderpath):\n continue\n\n for filename in os.listdir(folderpath):\n if 'avi' not in filename:\n continue\n\n if filename[0] == '.':\n continue\n\n filepath = os.path.join(folderpath, filename)\n\n if os.path.exists(filepath):\n index.append(filepath)\n else:\n print(filepath)\n return index, labels", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def files_in_dir(dir_path):\r\n files = [f for f in os.listdir(\r\n dir_path) if os.path.isfile(os.path.join(dir_path, f))]\r\n return files", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def directory_to_df(path,filetype = '.csv',ignore_index = True):\n filenames = []\n file_column = []\n frames = []\n test_index = 1\n for filename in os.listdir(path):\n if filetype in filename:\n curr_df = pd.read_csv(path+filename)\n frames.append(curr_df)\n filenames.append(filename.replace(filetype,''))\n for i in range(curr_df.shape[0]):\n file_column.append(test_index)\n test_index+=1\n \n df = pd.concat(frames,ignore_index = ignore_index)\n df['files'] = file_column\n return df, filenames", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def input_files_from_path(path):\n import glob\n input_files = None\n if type(path) is list:\n input_files = []\n for p in path:\n if '*' in p:\n input_files.extend(glob.glob(p))\n else: # neither wildcard nor comma separated list\n input_files.append(p)\n else:\n if ',' in path:\n input_files = path.split(',')\n elif '*' in path:\n input_files = glob.glob(path)\n else: # neither wildcard nor comma separated list\n input_files = [path]\n input_files = [os.path.abspath(f) for f in input_files]\n return [f for f in input_files if os.path.exists(f) or f.startswith('/store')]", "def parse_path(self) -> list:\n metadata = []\n for f in listdir(self.__path):\n inner_path = join(self.__path, f)\n if len(listdir(inner_path)) > 1:\n LOGGER.error(\"Unwanted files found at {}.\".format(inner_path))\n sys.exit(-1)\n try:\n inner_file = join(inner_path, listdir(inner_path)[0])\n except IndexError as ie:\n LOGGER.error(\"{} does not have any solution file.\".format(f))\n sys.exit(-1)\n if isdir(inner_path) and isfile(inner_file) and \"solution.\" in inner_file:\n metadata.append((f, inner_file))\n else:\n LOGGER.error(\"Unwanted files found at {} or {}.\".format(f, inner_path))\n sys.exit(-1)\n return metadata", "def __getFilesAndExpectedValues(fileToExpected, dir):\n files, expected = [], []\n with open(fileToExpected, \"r\") as filestream:\n for line in filestream:\n row = line.split(\",\")\n files.append(os.path.join(dir, row[0]))\n expected.append(row[1].replace('\\\"', '').rstrip())\n return files, expected", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def get_file_list(dir_path):\n onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]\n return onlyfiles", "def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')", "def read_csv(path):\n csv_data =[]\n \n with open(path, 'r') as csv_file:\n csv_read = csv.reader(csv_file, dialect='excel')\n for row in csv_read:\n csv_data.append(row)\n\n return(csv_data)", "def list_sar_directory(directory_path: str) -> list:\n path_generator = Path(directory_path).rglob('*.tif')\n return sorted([path.name for path in path_generator if path.is_file()])", "def csv_path(name):\n return \"./data/%s\" % name", "def file_path_maker(filepath, zlocs):\n files = []\n for i in zlocs:\n files.append(filepath + str(i) + '.csv')\n return files", "def load_multiple_csv(self, path, column):\n df = pd.concat([pd.read_csv(f\"{path}/{f}\") for f in tqdm(os.listdir(f\"{path}/\"))], ignore_index=True)\n return df[column]", "def list_of_files(path):\r\n files_list=[]\r\n path = os.path.abspath(path)\r\n\r\n #if the path is a file name, returns a list of a single file name\r\n if os.path.isfile(path):\r\n files_list.append(path)\r\n #if the path is a directory name, returns a list of all the file names anded with .asm\r\n else:\r\n for file in os.listdir(path):\r\n if file.endswith(\".asm\"):\r\n files_list.append(os.path.join(path, file))\r\n return files_list", "def _get_files_list(self):\n ts_filepaths = []\n conn_filepaths = []\n ts_filepaths_from_dir = sorted(os.listdir(self.ts_dir))\n conn_filepaths_from_dir = sorted(os.listdir(self.conn_dir))\n for sub_id in self.ids:\n for ts_file in ts_filepaths_from_dir:\n if sub_id in ts_file:\n ts_filepaths += [os.path.join(self.ts_dir, ts_file)]\n ts_filepaths_from_dir.remove(ts_file)\n break\n for conn_file in conn_filepaths_from_dir:\n if sub_id in conn_file:\n conn_filepaths += [os.path.join(self.conn_dir, conn_file)]\n conn_filepaths_from_dir.remove(conn_file)\n break\n\n return ts_filepaths, conn_filepaths", "def get_files_from_directory(path):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n return files", "def _create_file_paths(folder):\n debut = \"chroma-nnls\"\n instrument = [\"piano\", \"orchestra\"]\n style = [\"baroque\", \"classical\", \"romantic\", \"modern\", \"addon\"]\n file_names = [\"_\".join([debut, i, s]) for i in instrument for s in style]\n # file_names = [\"test0\"]\n\n return [folder + fn + \".csv\" for fn in file_names]", "def list_of_files(self, dirname): \n\n list_of_files = os.listdir(dirname)\n all_files = []\n\n for entry in list_of_files:\n full_path = os.path.join(dirname, entry)\n\n if os.path.isdir(full_path):\n all_files = all_files + self.list_of_files(full_path)\n else:\n all_files.append(full_path)\n\n return all_files", "def list_files_into_directory(directory_path: str) -> [str]:\n for root, directory_names, file_names in walk(directory_path):\n return file_names", "def _complete_path(path=None):\r\n if not path:\r\n return _listdir('.')\r\n dirname, rest = os.path.split(path)\r\n tmp = dirname if dirname else '.'\r\n res = [p for p in _listdir(tmp) if p.startswith(rest)]\r\n # more than one match, or single match which does not exist (typo)\r\n if len(res) > 1 or not os.path.exists(path):\r\n return res\r\n # resolved to a single directory, so return list of files below it\r\n if os.path.isdir(path):\r\n return [p for p in _listdir(path)]\r\n # exact file match terminates this completion\r\n return [path + ' ']", "def process_directory(dir_path, items):\n result = []\n for item in items:\n name = os.path.join(dir_path, item)\n if os.path.isfile(name) and not os.path.islink(name):\n for mask in masks:\n if fnmatch.fnmatch(name, mask):\n result.append(os.path.abspath(name))\n break\n return result", "def getImmediateFiles(aDir):\n return [name for name in os.listdir(aDir)\n if os.path.isfile(os.path.join(aDir,name))]", "def get_files(path: str) -> List[str]:\n if not isdir(path):\n return [path] # its expected to return a list each time even if its a single element\n return [file for fileOrDir in listdir(path) for file in get_files(path + '/' + fileOrDir)]\n # return list of each file returned by the recursive call getFiles(fileOrDir) on\n # each fileOrDir in listdir(path)", "def parse_csv_file(file_path):\n\n complete_data_list = []\n\n try:\n import_file = open(file_path, \"rb\")\n\n except IOError:\n print 'An error occured trying to read the file.'\n\n else:\n reader_file = csv.DictReader(import_file)\n complete_data_list = get_file_data(reader_file)\n import_file.close()\n\n return complete_data_list", "def flatfiles(cam):\n return fullpathlist(flatpath(cam))", "def _find_files(research_structure, raise_on_all_missing=True):\n found = []\n filenames = []\n paths_searched = []\n ## config file lookup resolution\n for enforce_file_existence, cascaded, fun in research_structure:\n candidate = fun()\n if candidate is None:\n continue\n paths_searched.append(candidate)\n filenames.append((cascaded, candidate))\n if os.path.exists(candidate):\n found.append(candidate)\n if cascaded is False:\n break\n else:\n if enforce_file_existence:\n raise ValueError(\"File %r does not exists.\" % candidate)\n if not found and raise_on_all_missing:\n raise ValueError(\"No config file was found in those paths: %s.\"\n % ', '.join(paths_searched))\n return filenames", "def get_files_from_path(path=None):\n\n abspath = os.path.abspath(path)\n if os.path.isfile(abspath):\n files = [abspath]\n elif os.path.isdir(abspath):\n files = [\n os.path.join(abspath, fname)\n for fname in os.listdir(abspath)\n ]\n else:\n raise RuntimeError(f\"[-] '{path}' must be a file or directory\")\n return files", "def get_input_files(workflow_id):\n logger = fsurfer.log.get_logger()\n input_files = []\n conn = None\n try:\n conn = fsurfer.helpers.get_db_client()\n cursor = conn.cursor()\n input_query = \"SELECT path \" \\\n \"FROM freesurfer_interface.input_files \" \\\n \"WHERE job_id = %s\"\n cursor.execute(input_query, [workflow_id])\n for row in cursor.fetchall():\n input_files.append(row[0])\n input_files.append(os.path.dirname(row[0]))\n except psycopg2.Error as e:\n logger.exception(\"Error: {0}\".format(e))\n return None\n finally:\n if conn:\n conn.close()\n return input_files", "def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']", "def load_template_path_from_csv(csv_file_path, col_name=None):\n try:\n data = pd.read_excel(csv_file_path)\n data[col_name].dropna(inplace=True)\n return data[col_name].tolist()\n except Exception as e:\n sys.stderr.write('Template Loading from csv Failed: %s\\n' % e.message)\n exit(1)", "def get_all_filenames_from_dir(directory,suffex, filename_allowed_list = None):\n\n files_list = list()\n if filename_allowed_list == None:\n for item in glob.glob(directory+'*'+suffex): # Example /datasets/Stock_dataset/Stocks/*.txt\n files_list.append(item) \n else:\n filename_allowed_list = [v.lower() for v in filename_allowed_list] # To avoid case sensitve\n for item in glob.glob(directory+'*'+suffex):\n if item.split(\"/\")[-1].split('.')[0].lower() in filename_allowed_list: # Since linux is case sensitive, then so is this function, make sure the names match correctly\n files_list.append(item)\n if not len(files_list) == len(filename_allowed_list):\n print 'Some Stocks files are missing'\n return files_list", "def get_filenames(self, bucket, directory, delimiter=''):\n b = self.conn.get_bucket(bucket)\n rs = b.list(directory, delimiter)\n return [key.name for key in rs if '$folder$' not in key.name]", "def get_files_in_dir(dir: str):\n\n file_list = [f\"{dir}/{file}\" for file in listdir(dir) if isfile(join(dir, file))]\n\n return file_list" ]
[ "0.73693734", "0.70882964", "0.6973099", "0.69681597", "0.6596682", "0.6555726", "0.6549086", "0.6546537", "0.64897746", "0.64868534", "0.6455237", "0.64520305", "0.63143605", "0.6301336", "0.62846804", "0.62800604", "0.6277007", "0.62760013", "0.62456584", "0.6236947", "0.6218519", "0.62012434", "0.6198465", "0.613152", "0.6130295", "0.60975", "0.6093361", "0.60917205", "0.6079256", "0.6077363", "0.60726833", "0.60376376", "0.601793", "0.6003076", "0.5998027", "0.59836733", "0.59800595", "0.5968424", "0.5962084", "0.5959707", "0.59523004", "0.5948048", "0.59475833", "0.5923774", "0.59234136", "0.58970267", "0.5881596", "0.5873817", "0.58677036", "0.58571506", "0.5846965", "0.5829588", "0.58240795", "0.5818753", "0.5810984", "0.5808031", "0.5806005", "0.5798202", "0.579737", "0.57945836", "0.5791718", "0.57866573", "0.57861847", "0.57829124", "0.5780219", "0.5778542", "0.57759917", "0.5766567", "0.57615155", "0.5753289", "0.5752625", "0.5744262", "0.5743705", "0.5735343", "0.57350177", "0.571773", "0.5716486", "0.57140136", "0.5698958", "0.56777877", "0.5671899", "0.5664626", "0.5658232", "0.5650937", "0.5650211", "0.5642649", "0.56405306", "0.5639635", "0.56284475", "0.5627317", "0.56228083", "0.56213266", "0.56208783", "0.5619116", "0.56172013", "0.56031746", "0.56030095", "0.55993235", "0.55928624", "0.55844283" ]
0.6765397
4
Description When is given a directory path that exist and doesn't have csv files Expected Result returns array with paths of files
def test_get_filepaths_empty(self): #setup get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn) #when test2 = get_filepaths("./dir2", ".c") #result assert len(test2) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def list_files(path=None):\n if path == None:\n return glob.glob('Data/*.csv')\n else:\n return glob.glob(path+'*.csv')", "def test_GetFilesInDirectory_empty_dir(tempdir: pathlib.Path):\n assert not dpack.GetFilesInDirectory(tempdir, [])", "def test_get_filepaths(self):\n\n #setup\n get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn)\n \n #when\n test1 = get_filepaths(\"./dir1\", \".csv\")\n\n #result\n assert len(test1) == 2", "def list_file(csv_directory):\n list_of_files = [os.path.join(dirpath, file_name)\n for dirpath, dirnames, files in os.walk(csv_directory)\n for file_name in fnmatch.filter(files, '*.csv')]\n return list_of_files", "def tdfiles(self):\r\n _tdfiles = []\r\n if self.dir_exists:\r\n files = os.listdir(self.csvdir)\r\n _tdfiles = fnmatch.filter(files, self.search_pattern)\r\n return _tdfiles", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def find_files(main_path, column_names = []):\n main_path = main_path\n files = []\n onlyfiles = [f for f in listdir(main_path) if isfile(join(main_path, f))]\n for file in onlyfiles:\n files.append(File(main_path, file[:-4], \".csv\", column_names))\n return files", "def read_csv(folder):\n csv_paths = [(f, os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith('.csv') and '刑事' in f and '司法院-刑事補償_刑事' not in f and '最高' not in f]\n return csv_paths", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def get_twitter_files(input_filepath):\n log_main = logging.getLogger(__name__)\n log_import = log_main.getChild('import_files')\n log_files = log_import.getChild('find_files')\n log_files.info('Gathering files to import.')\n\n filtered_files = []\n\n try:\n files = os.listdir(input_filepath)\n\n # Filter log files keeping only csv/non-hidden files\n filtered_files = [f for f in files if \\\n (f.endswith('.csv') and not f.startswith('.'))]\n\n # Create file paths by combining it with our passed directory\n filtered_files = [os.path.join(input_filepath, f) for f in filtered_files]\n\n except Exception as error:\n print ('\\tCould not find directory!')\n print ('\\tError: ', error)\n \n finally:\n return filtered_files", "def GetFilesInDirectory(dir_path):\n\treturn [f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path,f))]", "def get_input_files(dir_path):\n return [os.path.join(dir_path,f) for f in os.listdir(dir_path)\n if os.path.isfile(os.path.join(dir_path,f))]", "def get_csv_paths(top_path):\n # exclude is a set holding all dirnames to be excluded\n exclude = {\"fails\", \"archive\", \"exclude\", \"fail\", \"backup\"}\n # files is a dict that defaults to lists, so values can be appended to keys\n files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n dirnames[:] = [d for d in dirnames if d.lower() not in exclude]\n\n for filename in filenames:\n\n # gather .csv and .tsv files\n if \".csv\" in str(filename).lower() or \".tsv\" in str(filename).lower():\n # Add filename to the key of dirpath\n files[dirpath].append(filename)\n return files", "def find_files(path, extension = 'csv', min = 2):\n try:\n os.chdir(path) # go to the directory of the path\n except FileNotFoundError:\n print(\"ERROR - Couldn't find file \" + path)\n exit()\n files = [i for i in glob.glob('*.{}'.format(extension))] # place all the csv files in this array\n\n if len(files) < min:\n print(\"ERROR - Couldn't find at least \" + str(min) + \" \" + extension + \" file(s)\")\n exit()\n\n return files", "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def scandir(path_):\n return os.listdir", "def get_all_csv_files_in_directory(directory):\n return filter(lambda x: x[-4:] == \".csv\", os.listdir(directory))", "def get_file_list(dir_path):\n onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]\n return onlyfiles", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def test_list_directory(self):\n import os\n stat_f = lambda x: FakeStat(33188, 16398844, 65024L, 1, 1049, 1049, 0,\n 1409046988, 1409046988, 1409046988)\n os.stat = stat_f\n os.lstat = stat_f\n expected = [\"subdir1\", \"subdir2\"]\n result = [r[0] for r in self.path_translator.list_directory(\n \"/{0}/search1/tmp/study\".format(self.search.instance))]\n self.assertEqual(result, expected)", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def get_files(self, dir):\n path = os.path.join(self.loc, dir)\n return [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]", "def files_in( d ):\n return [ join(d,f) for f in os.listdir(d) if isfile(join(d,f)) ]", "def get_currency_file_paths(self, dir_path=\"*/*\", extension=\".csv\"):\n csv_files = [\n csv_file for csv_file in glob(dir_path + extension, recursive=True)\n ]\n return [\n currency_file for currency_file in csv_files if \"currency\" in currency_file\n ]", "def get_files_from_directory(path):\n return [f for f in listdir(path) if isfile(join(path, f))]", "def getPathfromCSV(flist, full_csv_list, outfile=None):\n # Get the file list\n fils, csvfils = [], []\n with open(flist, 'r') as fIn:\n for line in fIn:\n if line:\n fils.append(line.split('.')[0].strip())\n with open(full_csv_list, 'r') as fIn:\n for line in fIn:\n if line:\n csvfils.append([line.split('/')[-1].split('_')[0].strip(), # Filename only\n line.strip()]) # File path only\n \n # replace it with the path list\n paths = []\n for f in fils:\n if f in [o[0] for o in csvfils]:\n idx = [o[0] for o in csvfils].index(f)\n paths.append(csvfils[idx][1])\n else:\n print('Could not find: %s' %f)\n \n print('Retrieved %i paths (of %i)' %(len(paths), len(fils)))\n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for p in paths:\n fOut.write(p)\n fOut.write('\\n')\n \n return paths", "def get_test_files(dirname):\n if not os.path.isdir(dirname):\n return []\n path = dirname + \"/{}\"\n return list(map(path.format, sorted(os.listdir(dirname))))", "def _get_files(self, path):\n result = []\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n result += self._get_files(os.path.join(path, f))\n else:\n result.append(os.path.join(path, f))\n return result", "def list_files(directory):\r\n try:\r\n abpath = os.path.abspath(directory)\r\n filepaths = os.listdir(abpath)\r\n filepaths = [\r\n os.path.join(abpath, f)\r\n for f\r\n in filepaths\r\n if f.endswith('fa')\r\n ]\r\n except (OSError, IOError):\r\n print('The directory supplied is not readable, or does not exist!')\r\n exit(1)\r\n return filepaths", "def files_in_dir(path):\n return os.listdir(path)", "def list_csv_files():\n # See README.txt Ref#2.\n return [filename for filename in glob.glob(\"*.csv\")]", "def _complete_path(path=None):\r\n if not path:\r\n return _listdir('.')\r\n dirname, rest = os.path.split(path)\r\n tmp = dirname if dirname else '.'\r\n res = [p for p in _listdir(tmp) if p.startswith(rest)]\r\n # more than one match, or single match which does not exist (typo)\r\n if len(res) > 1 or not os.path.exists(path):\r\n return res\r\n # resolved to a single directory, so return list of files below it\r\n if os.path.isdir(path):\r\n return [p for p in _listdir(path)]\r\n # exact file match terminates this completion\r\n return [path + ' ']", "def _listdir(self, path):\n return self.__call_with_parser_retry(self._real_listdir, path)", "def _find_files(research_structure, raise_on_all_missing=True):\n found = []\n filenames = []\n paths_searched = []\n ## config file lookup resolution\n for enforce_file_existence, cascaded, fun in research_structure:\n candidate = fun()\n if candidate is None:\n continue\n paths_searched.append(candidate)\n filenames.append((cascaded, candidate))\n if os.path.exists(candidate):\n found.append(candidate)\n if cascaded is False:\n break\n else:\n if enforce_file_existence:\n raise ValueError(\"File %r does not exists.\" % candidate)\n if not found and raise_on_all_missing:\n raise ValueError(\"No config file was found in those paths: %s.\"\n % ', '.join(paths_searched))\n return filenames", "def update_csv():\n return os.listdir('./data')", "def _list_datasets_from_dir(path: github_api.GithubPath) -> List[str]:\n if not path.exists():\n # Should be fault-tolerant in the future\n raise FileNotFoundError(f'Could not find datasets at {path}')\n return sorted([ds for ds in path.iterdir() if _is_dataset_path(ds)])", "def getDataFiles(directoryName):\r\n \r\n return listdir(directoryName)", "def get_processed_csv_file_names(directory_path):\n\n\t__supported_extensions = ['.csv', ]\n\n\tprocessed_csv_file_names = list()\n\tlistdir = os.listdir(directory_path)\n\tfor file in listdir:\n\t\tif os.path.splitext(file)[1] in __supported_extensions:\n\t\t\tprocessed_csv_file_names.append(file)\n\n\t# sort so that we always read in a predefined order\n\t# key: smallest file first\n\tprocessed_csv_file_names.sort(key = lambda f: os.path.getsize(os.path.join(directory_path, f)))\n\treturn processed_csv_file_names", "def dir_to_paths(data_dir, data_type):\n file_paths = []\n\n if data_dir:\n tf.logging.info(\"=\" * 120)\n\n case_str = \"uncased.\" if uncased else \"\"\n glob_base = \"data.{}.{}.{}tfrecord*\".format(split, data_type, case_str)\n\n for idx, dir_path in enumerate(data_dir.split(\",\")):\n glob = os.path.join(dir_path, glob_base)\n cur_file_paths = sorted(tf.io.gfile.glob(glob))\n file_paths += cur_file_paths\n\n tf.logging.info(\"[%d] Data glob: %s\", idx, glob)\n tf.logging.info(\"[%d] Num of file path: %d\", idx, len(cur_file_paths))\n\n tf.logging.info(\"[%s] Total number of file path: %d\", data_type,\n len(file_paths))\n\n return file_paths", "def get_files_by_name(self, pilot_indir, file_name):\n\n directory_files = glob.glob(pilot_indir + '\\\\*\\\\')\n os.chdir(pilot_indir)\n files = []\n\n for directoryFile in directory_files:\n file = glob.glob(('{}\\\\{}*.csv'.format(directoryFile, file_name)))\n files.append(file[0])\n\n os.chdir('..\\\\..')\n return files", "def test_get_result_directories(self):\n pass", "def get_all_filenames_from_dir(directory,suffex, filename_allowed_list = None):\n\n files_list = list()\n if filename_allowed_list == None:\n for item in glob.glob(directory+'*'+suffex): # Example /datasets/Stock_dataset/Stocks/*.txt\n files_list.append(item) \n else:\n filename_allowed_list = [v.lower() for v in filename_allowed_list] # To avoid case sensitve\n for item in glob.glob(directory+'*'+suffex):\n if item.split(\"/\")[-1].split('.')[0].lower() in filename_allowed_list: # Since linux is case sensitive, then so is this function, make sure the names match correctly\n files_list.append(item)\n if not len(files_list) == len(filename_allowed_list):\n print 'Some Stocks files are missing'\n return files_list", "def read_data(path: str = None, files: List[str] = None) -> list:\n\n if path == None:\n path = \"../input\"\n\n try:\n if files == None:\n all_files = glob.glob(path + \"/*.csv\")\n else:\n all_files = [path + '/' + s for s in files]\n li = []\n for filename in all_files:\n df = pd.read_csv(filename, index_col=None, header=0)\n li.append(df)\n return li\n except FileNotFoundError:\n print(\"Files not found. Check the path variable\")", "def get_file_paths_from_directory(directory_path):\n file_paths = [join(directory_path, file) for file in listdir(directory_path)\n if isfile(join(directory_path, file)) and file[-2:] == 'h5']\n return file_paths", "def __getFilesAndExpectedValues(fileToExpected, dir):\n files, expected = [], []\n with open(fileToExpected, \"r\") as filestream:\n for line in filestream:\n row = line.split(\",\")\n files.append(os.path.join(dir, row[0]))\n expected.append(row[1].replace('\\\"', '').rstrip())\n return files, expected", "def get_parsed_files(output_path, directory):\n parsed_files = set(os.listdir(os.path.join(output_path, directory)))\n \n return parsed_files", "def getImmediateFiles(aDir):\n return [name for name in os.listdir(aDir)\n if os.path.isfile(os.path.join(aDir,name))]", "def get_file_names(self):\n return glob.glob(os.path.join(self.path, '*.csv'))", "def test_get_filenames_in_path():\n with tempfile.TemporaryDirectory() as tmpdir:\n tmpdir2 = os.path.join(tmpdir, \"tmp\")\n os.makedirs(tmpdir2, exist_ok=True)\n\n data = {\"A\": 1, \"B\": 2}\n json_file1 = os.path.join(tmpdir, \"a.json\")\n json_file2 = os.path.join(tmpdir2, \"a.json\")\n dump_data(data, json_file1)\n dump_data(data, json_file2)\n\n # These should not get included.\n toml_file1 = os.path.join(tmpdir, \"b.toml\")\n toml_file2 = os.path.join(tmpdir2, \"b.toml\")\n dump_data(data, toml_file1)\n dump_data(data, toml_file2)\n\n filenames = list(get_filenames_in_path(tmpdir, \"a.json\"))\n assert filenames == [json_file1, json_file2]", "def files_in_dir(dir_path):\r\n files = [f for f in os.listdir(\r\n dir_path) if os.path.isfile(os.path.join(dir_path, f))]\r\n return files", "def search_files(filename, search_path, pathsep=os.pathsep):\n clidFiles = []\n for path in search_path.split(pathsep):\n candidate = os.path.join(path, filename)\n if os.path.exists(candidate): clidFiles.append(os.path.abspath(candidate))\n return clidFiles", "def listdir_nohidden(path):\n\treturn glob.glob(os.path.join(path, '*'))", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def get_data_from_files(path, filename):\n\n data_files = []\n\n if path:\n list_of_files = os.listdir(path)\n print(\"List of data files:\", list_of_files)\n\n for file in list_of_files:\n if filename in file:\n full_filepath = path + \"/\" + file\n data_files.append(full_filepath)\n #print(data_files)\n\n else:\n data_files = []\n #print(data_files)\n return data_files", "def get_filenames(self, bucket, directory, delimiter=''):\n b = self.conn.get_bucket(bucket)\n rs = b.list(directory, delimiter)\n return [key.name for key in rs if '$folder$' not in key.name]", "def checkforfile(filename):\r\n try:\r\n inputlist =[]\r\n with open(filename,'ra') as fin:\r\n reader = csv.reader(fin)\r\n for row in reader:\r\n if row[0]!= '':\r\n inputlist.append(row)\r\n print (\" File Exists. It has \", len(inputlist), \" number of entries\" ) \r\n \r\n except IOError as (errno, strerror):\r\n print (\"IOError : ({0}) : {1}\".format(errno, strerror))\r\n\r\n finally:\r\n fin.close()\r\n return inputlist", "def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files", "def parse_path(self) -> list:\n metadata = []\n for f in listdir(self.__path):\n inner_path = join(self.__path, f)\n if len(listdir(inner_path)) > 1:\n LOGGER.error(\"Unwanted files found at {}.\".format(inner_path))\n sys.exit(-1)\n try:\n inner_file = join(inner_path, listdir(inner_path)[0])\n except IndexError as ie:\n LOGGER.error(\"{} does not have any solution file.\".format(f))\n sys.exit(-1)\n if isdir(inner_path) and isfile(inner_file) and \"solution.\" in inner_file:\n metadata.append((f, inner_file))\n else:\n LOGGER.error(\"Unwanted files found at {} or {}.\".format(f, inner_path))\n sys.exit(-1)\n return metadata", "def list_dir(self, path):", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def test_invalid_csv(self):\n cwd=os.getcwd()\n url=\"http://stackoverflow.com/questions/17730173/python-cant-get-full-path-name-of-file\"\n with self.assertRaises(TypeError):\n requester.url_to_csv(url,\"{0}/{1}.csv\".format(cwd,'tester'))", "def csvp(startingPath, csv_ext='.csv'):\n print 'walking up path=', startingPath\n csvfn = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(startingPath)\n for filename in filenames if filename.endswith(csv_ext)]\n print 'list is ', len(csvfn), ' images long'\n print 'starting with', csvfn[0]\n print 'ending with', csvfn[-1]\n return csvfn", "def get_result_filenames(self,directory):\n return [os.path.join(directory,name) for name in os.listdir(directory)\n if os.path.isfile(os.path.join(directory,name)) and\n os.path.splitext(name)[1].lower() == '.trf']", "def process_directory(dir_path, items):\n result = []\n for item in items:\n name = os.path.join(dir_path, item)\n if os.path.isfile(name) and not os.path.islink(name):\n for mask in masks:\n if fnmatch.fnmatch(name, mask):\n result.append(os.path.abspath(name))\n break\n return result", "def find_picard_files(file_and_dir_names):\n filenames = []\n for tgt in file_and_dir_names:\n if os.path.isdir(tgt):\n # Collect the target coverage files from this directory tree\n fnames = subprocess.check_output(['find', tgt,\n '-name', '*targetcoverage.csv']\n ).splitlines()\n if not fnames:\n raise RuntimeError(\"Given directory %s does not contain any \"\n \"'*targetcoverage.csv' files.\"\n % tgt)\n filenames.extend(fnames)\n elif os.path.isfile(tgt):\n filenames.append(tgt)\n else:\n raise ValueError(\"Given path is neither a file nor a directory: %s\"\n % tgt)\n filenames.sort()\n return filenames", "def fullpathlist(path):\n try:\n return [os.path.join(path, filename) for filename in os.listdir(path)]\n except OSError:\n return []", "def get_filepaths(extract_dir):\n\n index = []\n labels = []\n _extract_dir = os.path.join(extract_dir, 'UCF-101')\n for folder in os.listdir(_extract_dir):\n labels.append(folder)\n folderpath = os.path.join(_extract_dir, folder)\n\n if not os.path.isdir(folderpath):\n continue\n\n for filename in os.listdir(folderpath):\n if 'avi' not in filename:\n continue\n\n if filename[0] == '.':\n continue\n\n filepath = os.path.join(folderpath, filename)\n\n if os.path.exists(filepath):\n index.append(filepath)\n else:\n print(filepath)\n return index, labels", "def get_file_names():\n all_file_names = []\n cwd = os.getcwd()\n # Change to dir with result files to analyze\n os.chdir(args.dir)\n \n for file in glob.glob(\"*.csv\"):\n all_file_names.append(file)\n\n # Return to current working directory\n os.chdir(cwd)\n return all_file_names", "def input_files_from_path(path):\n import glob\n input_files = None\n if type(path) is list:\n input_files = []\n for p in path:\n if '*' in p:\n input_files.extend(glob.glob(p))\n else: # neither wildcard nor comma separated list\n input_files.append(p)\n else:\n if ',' in path:\n input_files = path.split(',')\n elif '*' in path:\n input_files = glob.glob(path)\n else: # neither wildcard nor comma separated list\n input_files = [path]\n input_files = [os.path.abspath(f) for f in input_files]\n return [f for f in input_files if os.path.exists(f) or f.startswith('/store')]", "def FindCheckerFiles(path):\n if not path:\n Logger.fail(\"No source path provided\")\n elif os.path.isfile(path):\n return [ path ]\n elif os.path.isdir(path):\n foundFiles = []\n for root, dirs, files in os.walk(path):\n for file in files:\n extension = os.path.splitext(file)[1]\n if extension in [\".java\", \".smali\"]:\n foundFiles.append(os.path.join(root, file))\n return foundFiles\n else:\n Logger.fail(\"Source path \\\"\" + path + \"\\\" not found\")", "def get_files_from_directory(path):\n files = [f for f in listdir(path) if isfile(join(path, f))]\n return files", "def parse_csv(csv_path):\n song_list = []\n\n try:\n with open(csv_path, encoding='utf-8') as playlist:\n print(\"Parsing \" + csv_path)\n reader = csv.reader(playlist, delimiter=',')\n next(reader) # skip csv header\n for row in reader:\n song_list.append(row[2] + \" - \" + row[1])\n # todo: parse CSV, then check to see which songs already exist in current dir\n # move non-existent results to new list and return that\n except IndexError as error:\n # consider validating playlists when parsing\n # from API on web server instead\n print(str(error))\n \n return song_list", "def file_list_emptydirs(load):\n # TODO - implement this\n _init()\n\n return []", "def read_dir(dir_name):\n\n only_files = []\n only_dirs = []\n\n for f in listdir(dir_name):\n if isfile(join(dir_name, f)):\n only_files.append(dir_name + \"/\" + f)\n else:\n only_dirs.append(dir_name + \"/\" + f)\n\n print(only_files)\n print(only_dirs)", "def get_files(path: str) -> List[str]:\n if not isdir(path):\n return [path] # its expected to return a list each time even if its a single element\n return [file for fileOrDir in listdir(path) for file in get_files(path + '/' + fileOrDir)]\n # return list of each file returned by the recursive call getFiles(fileOrDir) on\n # each fileOrDir in listdir(path)", "def filePaths(directory_with_files):\n\n # get a list of file names in directory\n list_of_files = os.listdir(directory_with_files) \n\n # join directory path and file name to get full paths to files\n filepaths = [os.path.join(directory_with_files, filename) for filename in list_of_files]\n\n return filepaths", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def test_only_files(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result[:-1]]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=True)\n self.assertEqual(sorted(result), sorted(need_result_new))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=False)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def get_all_metrics(dir):\r\n file_lst = os.listdir(dir)\r\n file_lst = list(filter(lambda x: re.findall(r'\\.csv$',x), file_lst))\r\n return file_lst", "def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')", "def list_sar_directory(directory_path: str) -> list:\n path_generator = Path(directory_path).rglob('*.tif')\n return sorted([path.name for path in path_generator if path.is_file()])", "def getFiles(directory):\n # os.listdir only for locally downloaded files\n _files=[]\n for item in os.listdir(directory):\n path = os.path.join(directory, item)\n if not os.path.isdir(path) and \".lhe.gz\" in path:\n _files.append(path)\n elif os.path.isdir(path):\n getFiles(path)\n return _files", "def get_files(path, formats=[]):\n\n # Uses abs path as the directory\n absolute = abspath(path)\n all_files = os.listdir(absolute)\n\n # Get the absolute path of each file\n absolute_files = [\"/\".join([absolute, i]) for i in all_files]\n\n # Filter out non-files and return\n filtered_files = [f for f in absolute_files if os.path.isfile(f)]\n\n # Filter out unwanted file types (if requested)\n if formats:\n filtered_files = [f for f in filtered_files if is_filetype(f, formats)]\n \n return filtered_files", "def _list_dir(self):\n return [os.path.join(self._path, fn) for fn in os.listdir(self._path)\n if not fn.endswith(self._fs_transaction_suffix)]", "def scan_directories(data_dir, file_filter):\n\n root = os.walk(data_dir)\n\n print('Scanning for files...')\n output = []\n\n for directory in root:\n\n files = directory[2]\n\n # Valid dataset contains video files of both halves and an accompanying label\n if file_filter(files):\n output.append(directory[0])\n\n print('Done')\n\n return output", "def list_of_files(path):\r\n files_list=[]\r\n path = os.path.abspath(path)\r\n\r\n #if the path is a file name, returns a list of a single file name\r\n if os.path.isfile(path):\r\n files_list.append(path)\r\n #if the path is a directory name, returns a list of all the file names anded with .asm\r\n else:\r\n for file in os.listdir(path):\r\n if file.endswith(\".asm\"):\r\n files_list.append(os.path.join(path, file))\r\n return files_list", "def filesInDir(self, path=None, pattern=None):\n if path is None:\n path = self.myDir\n if os.path.isfile(path):\n fileList = [path]\n else:\n fileList = os.listdir(path)\n if pattern is None:\n return fileList\n results = []\n for fileName in fileList:\n if pattern in fileName:\n results.append(fileName)\n return results", "def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)", "def list_of_files(self, dirname): \n\n list_of_files = os.listdir(dirname)\n all_files = []\n\n for entry in list_of_files:\n full_path = os.path.join(dirname, entry)\n\n if os.path.isdir(full_path):\n all_files = all_files + self.list_of_files(full_path)\n else:\n all_files.append(full_path)\n\n return all_files", "def read_filepaths(self, directory):\n folder_paths = [os.path.join(directory, folder) for folder in os.listdir(directory) if not folder.startswith('.')]\n filepaths = [[os.path.join(cur_folder, cur_file) for cur_file in os.listdir(cur_folder)] for cur_folder in folder_paths]\n return filepaths", "def list_dir_no_hidden(path):\n\n return glob(os.path.join(path, \"*\"))", "def empty_dir(value):\n return not os.listdir(value)", "def _filenames(self, dir_or_file):\n if os.path.isdir(dir_or_file):\n return glob(os.path.join(dir_or_file, \"*.txt\"))\n else:\n return [dir_or_file]", "def check_for_missing_files(self, path):\n return None", "def find_file(filename, std_dirs, paths):\n\n # Check the standard locations\n for dir in std_dirs:\n f = os.path.join(dir, filename)\n print('looking for', f)\n if os.path.exists(f):\n return []\n\n # Check the additional directories\n for dir in paths:\n f = os.path.join(dir, filename)\n print('looking for', f)\n if os.path.exists(f):\n return [dir]\n\n # Not found anywhere\n return None" ]
[ "0.70320946", "0.67621905", "0.6713793", "0.6638676", "0.66279495", "0.65001047", "0.6399279", "0.63733476", "0.63562477", "0.6341865", "0.63009876", "0.6289475", "0.6229904", "0.62287295", "0.6151689", "0.61245257", "0.61133426", "0.61005086", "0.60860026", "0.60531443", "0.604162", "0.6030636", "0.60159886", "0.6001738", "0.59905785", "0.59879297", "0.59459454", "0.59454405", "0.59399164", "0.59363717", "0.5930648", "0.59152555", "0.5914058", "0.5885302", "0.58749914", "0.58715594", "0.5862286", "0.5844435", "0.584397", "0.5818252", "0.5813417", "0.5812367", "0.5801704", "0.57921594", "0.5786918", "0.5786562", "0.5781429", "0.5778823", "0.57747555", "0.57724", "0.5768984", "0.57664436", "0.57584566", "0.57552266", "0.5752926", "0.574142", "0.57326704", "0.5724406", "0.572156", "0.5720801", "0.5714751", "0.57092744", "0.5708139", "0.57033455", "0.57008916", "0.56980234", "0.5689689", "0.5681382", "0.5677379", "0.5672884", "0.56707233", "0.56670034", "0.5666013", "0.56645966", "0.56535786", "0.56532645", "0.5640033", "0.56272906", "0.562575", "0.5619015", "0.5609778", "0.5603008", "0.56027937", "0.5602559", "0.55966365", "0.5585965", "0.55818695", "0.55810004", "0.55785674", "0.55726993", "0.5572462", "0.55692226", "0.55684155", "0.55679524", "0.55627507", "0.5561525", "0.55601674", "0.5558856", "0.5554204", "0.5540551" ]
0.6132733
15
Description When is given a directory path that has observed as parent folder and csv file with desired name Expected Result returns dictionary with right data
def test_observed_folder_path(self): #setup filepath = ".data/observed/Abadia-BA_-11.56_-37.52.csv" expected_result = { "type": "observed", "city": "Abadia", "state": "BA", "coordinates": ['-11.56', '-37.52'], "observed": {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def add_path_dict(input_dict: dict, start_path: str, file_path: str):\n # Determine relative path\n relpath = os.path.relpath(file_path, start=start_path)\n\n # If only file remaining, store in dict, otherwise go 1 level deeper\n if relpath == os.path.basename(file_path):\n input_dict[os.path.splitext(relpath)[0]] = pd.read_csv(file_path,\n sep='\\t')\n else:\n parent_dir = relpath.split(os.sep)[0]\n if parent_dir not in input_dict.keys():\n input_dict[parent_dir] = {}\n add_path_dict(input_dict=input_dict[parent_dir],\n start_path=os.path.join(start_path, parent_dir),\n file_path=file_path)", "def csv_path(name):\n return \"./data/%s\" % name", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def update_csv():\n return os.listdir('./data')", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def walk_csv_data(**kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.csv'):\n with open(path, newline='') as f:\n text = f.read()\n reader = csv.DictReader(StringIO(text))\n try:\n fieldnames = reader.fieldnames\n rows = list(reader)\n yield (path, name, text, fieldnames, rows)\n except csv.Error:\n continue", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def get_items_path() -> Path:\n return Path(os.path.join(Path(os.path.realpath(__file__)).parent, \"items.csv\"))", "def __init__(self):\r\n self.parent_directory = \"..\\csv\\\\\"\r\n self.file_parse_error_msg = \"An error occurred while paring the file\"", "def SearchObjects(directory, endwith='.csv'):\n directory = os.path.normpath(directory)\n if not os.path.isdir(directory):\n raise IOError(\"The directory \" + directory + \" is not exist\")\n objects = {}\n for curpath, subdirs, files in os.walk(directory):\n for fileType in (file for file in files if file.endswith(endwith)):\n path = os.path.join(curpath, fileType)\n label = path.split(os.path.sep)[-2]\n if label not in objects:\n objects[label] = []\n objects[label].append(path)\n \n return objects[label]", "def get_key_data_filepath():\n global key_filepath, directory\n filename = 'key.csv'\n key_filepath = os.path.join(directory, filename)", "def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')", "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def get_csv_paths(top_path):\n # exclude is a set holding all dirnames to be excluded\n exclude = {\"fails\", \"archive\", \"exclude\", \"fail\", \"backup\"}\n # files is a dict that defaults to lists, so values can be appended to keys\n files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n dirnames[:] = [d for d in dirnames if d.lower() not in exclude]\n\n for filename in filenames:\n\n # gather .csv and .tsv files\n if \".csv\" in str(filename).lower() or \".tsv\" in str(filename).lower():\n # Add filename to the key of dirpath\n files[dirpath].append(filename)\n return files", "def managecsv(data):\n\n checkfolderdata()\n if not datafileexist(data[7]):\n createcsv(data[7])\n managecsv(data)\n else:\n addcsv(data, data[7])", "def task_lst_gen(dirr, csv_path):\n train_file_lst, val_file_lst, test_file_lst = files_from_csv(csv_path)\n\n task_dict = {}\n out_prefix = '/work/jfeins1/maestro/dataset-v3/'\n for subdirs, dirs, files in os.walk(dirr):\n for file in files:\n filepath = subdirs + os.sep + file\n\n if file in train_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'train/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in test_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'test/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in val_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'val/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n task_lst = open('/work/jfeins1/maestro/encoding_gen_task.lst', 'w')\n for uid, d in task_dict.items():\n print(d['in'], d['out'], file=task_lst)", "def get_data(self, csv_file):\n pass", "def incoming_paths(root_dir, parent_dir):\n return {\n 'F1' : os.path.join(root_dir, \"F1\"),\n 'F' : os.path.join(parent_dir, \"F\"),\n 'F2' : os.path.join(parent_dir, \"F2-in\"),\n 'D1' : os.path.join(root_dir, \"D1\"),\n 'D' : os.path.join(parent_dir, \"D\"),\n 'D2' : os.path.join(parent_dir, \"D2-in\"),\n }", "def sample_data_path(name):\n import os.path as op\n data_dir = op.join(op.dirname(__file__), \"data\")\n data_path = op.join(data_dir, name + \".csv\")\n return op.abspath(data_path)", "def get_csv_data(csv_path: str, img_dir: str) -> pd.DataFrame:\r\n data = pd.read_csv(csv_path)\r\n data['title'] = data['title'].apply(preprocess_titles)\r\n data['image'] = data['image'].apply(abs_path, args=(img_dir,))\r\n return data", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))", "def _p_profile_dir(self):\n # Read in the absolute abund and meta file\n # write back out as csv\n df_path = False\n for file_ in os.listdir(self.sub_dir):\n if 'profiles.absolute.abund_and_meta.txt' in file_:\n df_path = os.path.join(self.sub_dir, file_)\n \n if df_path:\n df = pd.read_table(df_path)\n df.to_csv(df_path.replace('.txt', '.csv'), sep=',', index=False)\n\n # Now delete all other files except for the addtional info file\n for file_ in [_ for _ in os.listdir(self.sub_dir) if not any(substring in _ for substring in ['additional_info.txt', 'profiles.absolute.abund_and_meta.csv'])]:\n if not os.path.basename(self.base_dir) in file_:\n print(f'Deleting {file_}')\n os.remove(os.path.join(self.sub_dir, file_))\n\n # Now rename the files\n # Create a rename dict\n # Key is old full path, value is new full path\n rename_dict = {}\n parent_directories = self._get_parent_dir_list(self.sub_dir)\n\n # Rename the count table\n count_file = [_ for _ in os.listdir(self.sub_dir) if 'abund_and_meta' in _]\n assert (len(count_file) == 1)\n count_file_old = count_file[0]\n if os.path.basename(self.base_dir) not in count_file_old:\n match = re.search(\"profiles\", count_file_old)\n if match is None:\n raise RuntimeError\n # we need to discard from the match onwards\n new_count_file = count_file_old[match.span()[0]:]\n ext = new_count_file.split('.')[-1]\n new_count_file = new_count_file.replace('.', '_')\n new_count_file = new_count_file.replace('_profiles', '')\n # insert the version number\n new_count_file = new_count_file.replace(f'_{ext}', f'_{self.version_string}.{ext}')\n # add the directory structure info\n new_count_file = '_'.join(parent_directories) + f'_{new_count_file}'\n # add to rename dict\n rename_dict[os.path.join(self.sub_dir, count_file_old)] = os.path.join(self.sub_dir, new_count_file)\n\n # Rename the additional output file\n info_file = [_ for _ in os.listdir(self.sub_dir) if 'additional' in _]\n assert (len(info_file) == 1)\n info_file_old = info_file[0]\n if os.path.basename(self.base_dir) not in info_file_old:\n match = re.search(\"additional\", info_file_old)\n if match is None:\n raise RuntimeError\n # we need to discard from the match onwards\n new_info_file = info_file_old[match.span()[0]:]\n # add the directory structure info\n new_info_file = '_'.join(parent_directories) + '_symportal_output_' + new_info_file\n # insert the version number\n new_info_file = new_info_file.replace('.txt', f'_{self.version_string}.txt')\n # add to rename dict\n rename_dict[os.path.join(self.sub_dir, info_file_old)] = os.path.join(self.sub_dir, new_info_file)\n\n # At this point we can do the renaming\n self._rename_from_dict(rename_dict)", "def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths", "def __init__(self, dir_path= 'static/Irma data-20210525'):\n \n A=csv_to_dict(dir_path+\"\\A.csv\")\n B=csv_to_dict(dir_path+\"\\B.csv\")\n C=csv_to_dict(dir_path+\"\\C.csv\")\n D=csv_to_dict(dir_path+\"\\D.csv\")\n\n\n self.dicts_list=[A,B,C,D]\n self.image_codes=csv_to_dict(dir_path+\"\\image_codes.csv\")", "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def csv_files_in_folder_merger(file):\n stack = []\n for file_in_list in file:\n stack.append(file_to_generator(file_in_list))\n stacklijst = pd.concat(stack)\n\n return stacklijst", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def collect_csv(source_dir, dest_dir):\n source_dir = Path(source_dir)\n dest_dir = Path(dest_dir)\n for csvfile in source_dir.rglob(\"*.csv\"):\n species = normalized_species(csvfile)\n species_dir = dest_dir / species\n species_dir.mkdir(exist_ok=True, parents=True)\n date_time = normalized_datetime(csvfile)\n print(f\"Renaming {csvfile} to {species_dir / (date_time + '.csv')}\")\n csvfile.rename(species_dir / (date_time + \".csv\"))", "def read_csv_file(csv_file_path):\n file_names = []\n file_labels = []\n with open(csv_file_path, 'r') as files_path:\n path_list = csv.DictReader(files_path)\n fieldnames = path_list.fieldnames\n for path in path_list:\n file_names.append(path[fieldnames[0]])\n file_labels.append(path[fieldnames[1]])\n return file_names, file_labels", "def _fetch(self,name):\n #\n #Find name's parent and fill that first....\n if name != '.':\n parent = _parent(name)\n pdentry = self[parent]\n if pdentry.contents is not None or pdentry.dtype == 'f':\n #Parent dentry is a file or has been enumareted.\n #So we should not have got here. This almost certainly\n #means that name does nto exist.\n raise KeyError(name)\n else:\n parent = ''\n pdentry = None \n\n \n ##Get entries iterable from FS\n entries = []\n try:\n entries = os.listdir(os.path.join(self.home,parent))\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise\n else:\n modlogger.debug(\"Cant reach:Non existent directory found in it's own parent.\")\n return\n\n #Make a dict of files a dir entries., adnd put itoput global name list\n contents = {}\n for dentry in entries:\n #Ignore private file reserved for internal use.\n if dentry[:2] == \"..\": continue\n\n sr = os.stat(os.path.join(self.home,parent,dentry))\n if stat.S_ISDIR(sr.st_mode):\n contents[dentry] = Dentry(dentry , 'd')\n elif stat.S_ISREG(sr.st_mode):\n contents[dentry] = Dentry(dentry , 'f')\n\n self.mymap[os.path.join(parent,dentry)] = contents[dentry]\n\n #Put the parents contents in it's own dentry entry too.`\n if pdentry: pdentry.contents = contents\n #Initiallise our root directory\n else:\n self.mymap['.'] = Dir_Entry('.',contents)\n #print contents", "def filepath(filename, data, root='/home/cyneo/Work/Scans/Processed Data/',\r\n filetype='.csv'):\r\n path = os.path.abspath(root + data + '/' + filename +\r\n ' ' + data + filetype)\r\n return path", "def get_data(self):\n \n with os.scandir(self.file_path) as collection_of_files:\n files_found = [file.name.split('.')[0] for file in collection_of_files \n if (file.name.split('.')[0].lower().strip() in self._data_requirements.required_file_names \n and file.name.endswith('.csv'))]\n\n self.check_missing_files(files_found)\n \n self._data = DictObjectView(self.read_in_files(files_found))", "def generate_Struct(csv_file, pathToDir):\n\n df = extract_structure_from_csv(csv_file)\n\n df = df[ESSENTIAL_CSV_COLUMNS]\n\n for session_kwargs in df.to_dict('index').values():\n session = AnDOData(**session_kwargs)\n session.basedir = pathToDir\n session.generate_structure()", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def path_to_dict(self, someDir, level=9001, relativeFolders=True, relativeFiles=False):\n someDir = someDir.rstrip(os.path.sep)\n assert os.path.isdir(someDir)\n numSep = someDir.count(os.path.sep)\n\n outputDict = {}\n for root, dirs, files in os.walk(someDir):\n for d in dirs + files:\n path = os.path.join(root, d)[(len(someDir)):]\n path = path.rstrip(os.sep).lstrip(os.sep)\n pathSplit = paths.os_path_split_asunder(path)\n if os.path.isfile(os.path.join(root, d)) and not relativeFiles:\n pathSplit[-1] = os.path.join(root, d)\n if len(pathSplit) == 1:\n outputDict[pathSplit[0]] = {}\n else:\n nestedDict = self.list_flattened_to_dict(pathSplit)\n mergedDict = dict(mergedicts(outputDict, nestedDict))\n for key in nestedDict.keys():\n outputDict = dict(outputDict, **nestedDict)\n outputDict = dict(outputDict, **mergedDict)\n\n numSepCurrent = root.count(os.path.sep)\n if numSep + level <= numSepCurrent:\n del dirs[:]\n return outputDict", "def read_dir(directory):\n results = AttrDict()\n results.iterations = pd.read_csv(os.path.join(directory, 'iterations.csv'),\n index_col=0)\n results.solutions = AttrDict()\n for i in results.iterations.index.tolist():\n iteration_dir = os.path.join(directory, '{:0>4d}'.format(i))\n fmt = _detect_format(iteration_dir)\n logging.debug('Iteration: {}, Format detected: {}'.format(i, fmt))\n try:\n if fmt == 'netcdf':\n sol_path = os.path.join(iteration_dir, 'solution.nc')\n results.solutions[i] = read_netcdf(sol_path)\n else:\n sol_path = iteration_dir\n results.solutions[i] = read_csv(sol_path)\n logging.debug('Read as {}: {}'.format(fmt, sol_path))\n except IOError as err:\n logging.warning('I/O error in `{}` at iteration `{}`'\n ': {}'.format(iteration_dir, i, err))\n # results.solutions[i] = AttrDict() # add an empty entry\n continue\n return results", "def csvp(startingPath, csv_ext='.csv'):\n print 'walking up path=', startingPath\n csvfn = [os.path.join(root, filename)\n for root, dirnames, filenames in os.walk(startingPath)\n for filename in filenames if filename.endswith(csv_ext)]\n print 'list is ', len(csvfn), ' images long'\n print 'starting with', csvfn[0]\n print 'ending with', csvfn[-1]\n return csvfn", "def samplesheet_path_fixture(fixtures_dir: Path) -> Path:\n _file_path = fixtures_dir / \"samplesheet.csv\"\n return _file_path", "def __init__(self, path=None):\n super().__init__(path=path)\n self.path += '{}.csv'", "def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)", "def process_input_data(input_data_path):\n if os.path.isdir(input_data_path):\n input_data_glob = glob.glob(input_data_path + \"/*.csv\")\n else:\n if is_gcs_path(input_data_path):\n # Download the input to a local\n with tempfile.NamedTemporaryFile() as hf:\n input_data = hf.name\n\n logging.info(\"Copying %s to %s\", input_data_path, input_data)\n input_data_gcs_bucket, input_data_gcs_path = split_gcs_uri(\n input_data_path)\n\n logging.info(\"Download bucket %s object %s.\", input_data_gcs_bucket,\n input_data_gcs_path)\n bucket = storage.Bucket(storage.Client(), input_data_gcs_bucket)\n storage.Blob(input_data_gcs_path, bucket).download_to_filename(\n input_data)\n else:\n input_data = input_data_path\n\n ext = os.path.splitext(input_data)[-1]\n if ext.lower() == '.zip':\n zip_ref = zipfile.ZipFile(input_data, 'r')\n zip_ref.extractall('.')\n zip_ref.close()\n # TODO: Hardcoding the file in the Archive to use is brittle.\n # We should probably just require the input to be a CSV file.:\n csv_file = 'stackoverflow-questions.csv'\n else:\n csv_file = input_data\n\n input_data_glob = glob.glob(csv_file)\n\n return input_data_glob", "def import_to_df(\n list: str,\n path: str = \"competition_data\"\n ) -> dict:\n\n df_dict = {}\n for file in list:\n if 'csv' not in file:\n continue\n df = pd.read_csv(\"/\".join([path, file]))\n # remove extension\n name = file.split('.')[0]\n df_dict[name] = df\n \n return df_dict", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def parse_csv_files(self, filter_fn=None):\n def filter_function(f):\n return f is not None and f.endswith(\".csv\")\n if not filter_fn:\n filter_fn = filter_function\n files = self.filter_files(None,filter_fn)\n dicts = {}\n for f in files:\n with open(f) as fh:\n dicts[f] = [r for r in csv.DictReader(fh)]\n return dicts", "def getPathfromCSV(flist, full_csv_list, outfile=None):\n # Get the file list\n fils, csvfils = [], []\n with open(flist, 'r') as fIn:\n for line in fIn:\n if line:\n fils.append(line.split('.')[0].strip())\n with open(full_csv_list, 'r') as fIn:\n for line in fIn:\n if line:\n csvfils.append([line.split('/')[-1].split('_')[0].strip(), # Filename only\n line.strip()]) # File path only\n \n # replace it with the path list\n paths = []\n for f in fils:\n if f in [o[0] for o in csvfils]:\n idx = [o[0] for o in csvfils].index(f)\n paths.append(csvfils[idx][1])\n else:\n print('Could not find: %s' %f)\n \n print('Retrieved %i paths (of %i)' %(len(paths), len(fils)))\n if outfile is not None:\n with open(outfile, 'w') as fOut:\n for p in paths:\n fOut.write(p)\n fOut.write('\\n')\n \n return paths", "def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)", "def open_some_data(the_file_name: str) -> dict:\n\n result: dict = open_csv(file_name=the_file_name)\n return result", "def gather_data(instance):\n paths = [instance.get('file1'), instance.get('file2')]\n if Path(instance.get('file1')).parents[0].is_dir() is True and Path(\n instance.get('file2')).parents[0].is_dir() is True:\n files = [f for f in paths if os.path.isfile(f)]\n if len(files) == 0:\n raise Exception('The files you passed do not exist!')\n dfs = []\n for file in files:\n try:\n if file.endswith('.csv'):\n dfs.append(pd.read_csv(file))\n\n else:\n raise Exception('Please pass a file ending in .csv')\n\n except Exception as exc:\n formatted = \"Unable to locate files! Please ensure you have provided accurate file paths. {}\".format(\n repr(exc))\n raise Exception(formatted)\n\n return dfs, instance\n\n else:\n raise Exception('Please pass a valid file path.')", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def pop_list():\n # Create dict_list\n dict_list = {}\n for csvFilename in os.listdir('.'):\n if not csvFilename.endswith('.csv'):\n continue # skip non-csv files\n name_var = os.path.splitext(csvFilename)[0]\n print('Getting list of assets from [{0}]'.format(name_var))\n # print(name_var)\n var_list = []\n with open(csvFilename,'r') as csvFileObj:\n readerObj = csv.reader(csvFileObj)\n hrow = next(readerObj)\n asset_id = hrow.index('D_CODE')\n type_id = hrow.index('D_TYPE')\n for row in readerObj:\n if row[type_id] == 'PC' or row[type_id] == 'LT':\n var_list.append(row[asset_id])\n dict_list[str(name_var)] = var_list\n csvFileObj.close()\n return dict_list", "def test_search_file(self):\n base_dir = join(get_current_path(), 'samples', 'base_dir1')\n output_dir = join(get_current_path(), 'samples', 'base_dir1', 'result')\n files = search_files(base_dir, output_dir)\n self.assertTrue(self.verify_sub_folders(list(files.keys())))\n\n # sub folders under Concord is not counted, only files\n self.assertEqual(len(files['Concord']), 5)\n self.assertEqual(len(files['ListCo Equity']), 1)\n self.assertEqual(len(files['CLO Equity']), 2)\n self.assertEqual(files['ListCo Equity'][0], join(base_dir, 'ListCo Equity', 'Positions1219.xlsx'))", "def _read_source_data(self) -> pd.DataFrame:\n df = None\n try:\n logger.info(\"reading csv base file under simulation folder\", class_name=self.__class__.__name__)\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/simulation/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.warning(\"base file not processed, trying under unprocessed folder\",\n class_name=self.__class__.__name__)\n try:\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/unprocessed/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.error(\"base file not found... exiting\", class_name=self.__class__.__name__)\n exit(1)\n return df", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def _get_parent_path(self):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"genes\")", "def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def create_path_dict(save_path):\n act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'tanh']),\n sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),\n sorted(['relu', 'identity', 'sigmoid', 'tanh']),\n sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),\n ['relu'],\n ['sigmoid'],\n ['tanh'],\n ['antirelu'],\n ['None']]\n # ['identity']]\n\n act_fn = ['_'.join(act) for act in act_fn]\n path_dict = defaultdict(list)\n for (filepath, dirname, filename) in os.walk(save_path):\n if 'results.json' in filename:\n for act in act_fn:\n temp = filepath.split('/')\n if act == temp[-1] or act == temp[-2]:\n path_dict[act].append(filepath)\n print(path_dict)\n return path_dict", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def main():\r\n parent_dir = 'D:\\\\Profession\\\\Intern\\\\Assignments\\\\Codes\\\\Assignement Codes\\\\Part 2\\\\data_dumps'\r\n\r\n if not (os.path.isdir(parent_dir)):\r\n raise Exception(\"The directory doesn't exist\")\r\n\r\n directories = []\r\n\r\n for directory in os.listdir(parent_dir):\r\n directories.append(os.path.join(parent_dir, directory))\r\n\r\n # The group_dic represents the dictionary with keys equal to the unique dates in the directories\r\n # And the values represent a list of all files that have the same date prefix across the data_dumps\r\n group_dic = grouping(directories)\r\n\r\n # Moving Files into New Directory\r\n move(group_dic, parent_dir)\r\n print(\"Files Moved Successfully\")", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def get_first_of_day(self, folder_before=None, day=datetime.today(), filename='Epikurve.csv'):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n reached = folder_before is not None\n __folder_before = str(folder_before).split('/')[-1]\n for folder in folders:\n if reached:\n path_csv = self.data_root_path / folder / filename\n with open(path_csv) as f:\n first = True\n for x in csv.reader(f, delimiter=';'):\n if first:\n first = False\n continue\n ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')\n break\n if ts.date() <= day.date():\n return folder\n else:\n if folder == __folder_before:\n reached = True", "def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv", "def dirGenerator(datadirectory):\n\n subdirectories = [row for row in os.listdir(datadirectory) if '$' not in row]\n\n #iterate through subdirectories\n for day in subdirectories:\n\n #collect raw data set file names in sub directories\n fileNames = [row for row in os.listdir(datadirectory + day + '\\\\RawDataFiles\\\\')]\n\n #iterate over the raw datasets\n print 'There are ' + str(len(fileNames)) + ' datasets in ' + day\n for index, datafile in enumerate(fileNames):\n yield datadirectory + day + '\\\\RawDataFiles\\\\' + datafile, day, datafile, index", "def read_csv(folder):\n csv_paths = [(f, os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith('.csv') and '刑事' in f and '司法院-刑事補償_刑事' not in f and '最高' not in f]\n return csv_paths", "def handle(self, *args, **options):\n csv_filename = options['file_name']\n parent_name = options['parent']\n levels = 0\n\n with open(csv_filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n headings = next(csv_reader)\n\n # Determine the topic hierarchy levels\n for heading in headings:\n if heading.lower() in [\"topic\", \"topics\"]:\n levels += 1\n\n parent_topic = Topic.objects.get_or_create(name=parent_name)\n parent_topic_structure = TopicStructure.objects.get_or_create(topic=parent_topic[0],\n parent=None)\n generate_tree(levels, csv_reader, parent_topic_structure[0])", "def extract_csv_for_date(config, data_date): \n \n ### TODO: test config separately \n \n # print(config.DATA_ROOT)\n # print(data_date)\n \n # Raise an exception if attribute DATA_ROOT does not exist\n if not 'DATA_ROOT' in vars(config):\n raise AttributeError(\"Attribute DATA_ROOT does not exist\")\n \n # Raise an exception if DATA_ROOT does not exist\n if not os.path.exists(config.DATA_ROOT):\n raise NotADirectoryError(\"The path \" + config.DATA_ROOT + \" not found\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'METER_CHANNEL_DICT' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'SAMPLE_TIME' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n data_date_dt = parse(data_date)\n \n if data_date_dt > config.DATA_END_DATE:\n raise ValueError(\"data_date entered is greater than the DATA_END_DATE: \" + \n str(config.DATA_END_DATE))\n \n if data_date_dt < config.DATA_START_DATE:\n raise ValueError(\"data_date entered is less than the DATA_START_DATE: \" + \n str(config.DATA_START_DATE))\n \n # Get the year, month and and day from date entered\n data_year = data_date_dt.year\n data_month = data_date_dt.month\n data_day = data_date_dt.day\n \n # Get the corresponding path in the directory to look for the data for the day\n data_path = os.path.join(config.DATA_ROOT, str(data_year), \"{:02}\".format(data_month), \"{:02}\".format(data_day))\n # print(data_path)\n # Find the count of meters\n meter_count = len(config.METER_CHANNEL_DICT)\n\n # Dictionary to store the names of the resulting csv files\n meter_csv_names = {}\n \n # Get the down-sampling time\n sample_time = config.SAMPLE_TIME\n \n # Create a dictionary with keys are meter names and values as dataframes \n # containing the data for the day\n meter_collection = {}\n \n # for meter_name in config.METER_CHANNEL_DICT:\n # # Create an empty dataframe, the columns will be created later\n # meter_collection[meter_name] = pd.DataFrame()\n\n #print(meter_collection)\n if os.path.exists(data_path):\n # Walk through all the files in the directory for the day's data\n for dirpath, dirnames, files in os.walk(data_path, topdown=True):\n # `files` contains the names of all the files at the location\n if len(files) == 0:\n print(\"No files found for day: \" + data_path)\n continue\n for filename in files:\n # Get the netcdf files, these are files with `.nc` extension\n if filename.lower().endswith('.nc'):\n # For the particular file, find out the corresponding meter and channel \n [meter, channel] = extract_ppty(filename, config.METER_CHANNEL_DICT.keys())\n # Create an entry in the `meter_collection` dict if it does not exist yet\n if meter not in meter_collection:\n meter_collection[meter] = pd.DataFrame()\n # Form the resulting csv name from the meter name if it doesnt exist yet\n # They are of the type - meter_name@Timestamp@Duration@Frequency\n # For e.g.: PQube3@2017-11-01T080002Z@[email protected]\n #print(meter, channel)\n if meter not in meter_csv_names:\n meter_csv_names[meter] = '@'.join([meter, '@'.join(filename.split('@')[1:4])])[:-3] + '.csv'\n #print(meter_csv_names)\n # Get the full path of the csv\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # Only extract if not already extracted to csv\n if (not os.path.isfile(csv_name)):\n # Get the dataframe containing time and channel values\n channel_df = extract_data(dirpath, filename)\n # Give the dataframe column a name\n channel_df.columns = [channel]\n # Down-sample the data to the sampling time intended\n channel_resampled = data_resample(channel_df, sample_time)\n # If our meter dataframe is empty so far, i.e. if this is the \n # first channel being entered, then create a copy of the \n # resampled dataframe\n if meter_collection[meter].empty:\n meter_collection[meter] = channel_resampled.copy()\n ####################### \n # This `else` clause handles two cases:\n # 1. If the dataframe is not empty, then add other columns to\n # the dataframe. (the else case)\n # 2. Some days have data downloaded more than once, this means \n # that channels can occur more than once. (like 05/21/2018)\n #######################\n else:\n # If the channel already exists in the dataframe\n # then either the other file has updated data or \n # subsequent data. \n if channel in meter_collection[meter].columns:\n # Get index from total dataframe \n idx_1 = meter_collection[meter].index\n # Get index from file dataframe\n idx_2 = channel_resampled.index\n # Compare the two, if the index is contained within,\n # then **update** the channel's value for file's indices. \n if np.all(np.isin(idx_2, idx_1)):\n meter_collection[meter][channel].loc[idx_2] = channel_resampled.values.tolist()\n # If the index is not contained, append the file df to\n # the total dataframe\n else:\n meter_collection[meter] = meter_collection[meter].append(channel_resampled, sort=False)\n meter_collection[meter].sort_index(inplace=True)\n #######################\n # This data is resampled a second time to handle two cases:\n # 1. When appending a resampled dataframe to an already resampled dataframe, the last\n # index of the original dataframe and the first index of the new dataframe can have\n # the same time. Resampling the appended dataframe will eliminate the repetitions.\n # 2. If the new dataframe to be appended starts at a much later time, resampling the\n # appended dataframe will create rows of missing data (NaN) at the times with no\n # measurement values. This makes it easier to detect missing measurement values and\n # perform data imputation at a later phase.\n #######################\n meter_collection[meter] = data_resample(meter_collection[meter], sample_time)\n # If the channel does not already exist, then add the\n # file dataframe to the total df. \n else:\n meter_collection[meter] = meter_collection[meter].join(channel_resampled, how='outer')\n else:\n print(\"Path not found: \" + data_path)\n \n # Perform data imputation wherrever needed\n # print(meter_collection)\n meter_collection = data_impute(meter_collection)\n \n # Write the total dataframes to csv file\n for meter in meter_collection:\n # Reorganize the order of columns to match the database tables \n meter_channels = config.METER_CHANNEL_DICT[meter]\n # meter_collection[meter].reset_index(inplace=True)\n meter_collection[meter] = meter_collection[meter].reindex(columns=meter_channels[1:])\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # print(csv_name)\n # Only write csv if it does not exist yet\n if(not os.path.isfile(csv_name)):\n meter_collection[meter].to_csv(csv_name, header=False)\n\n return meter_csv_names", "def parse_directory(input_dir, start_date = datetime(1970,1,1), end_date = datetime(9999,12,31,23,59,59)):\n logging.debug(\"Beginning parse_directory {}\".format(input_dir))\n\n def parse_gzipped_directory(input_dir, start_date, end_date, infos, backup_stamps):\n \"\"\" Scans a gzipped directory. This one in different situation \"\"\"\n try:\n file_names = sorted(os.listdir(input_dir)) \n for file_name in file_names:\n if file_name.endswith(\".manifest.gz\") or file_name.endswith(\".manifest\"):\n tmp = parse_file(input_dir + \"/\" + file_name, start_date, end_date, backup_stamps)\n backup_stamps[tmp[\"backup_label\"]] = tmp[\"backup_timestamp_stop_ts\"]\n if tmp:\n infos.append(tmp)\n except OSError as e:\n logging.error(\"Failed to open directory\", exc_info=True)\n exit(1)\n except:\n raise\n \n if not path.isdir(input_dir):\n logging.error(\"The specified path is not a directory\")\n exit(1)\n\n infos = []\n backup_stamps = {}\n if path.exists(input_dir + \"/backup.info\"):\n\n dir_names = sorted(os.listdir(input_dir + \"/backup.history\"))\n for dir_name in dir_names:\n parse_gzipped_directory(input_dir + \"/backup.history/\" + dir_name, start_date, end_date, infos, backup_stamps)\n\n else:\n parse_gzipped_directory(input_dir, start_date, end_date, infos, backup_stamps)\n \n logging.debug(\"End parse_directory\")\n return infos", "def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict", "def traverse(self, path):\n\n path_list = [s for s in path.split('/') if len(s) > 0 ]\n # print(path)\n # print('files:', self.files)\n directory = self.files\n index = 0\n while index < len(path_list) and path_list[index] in directory:\n if type(directory[path_list[index]]) is str: # directory is a file\n break\n directory = directory[path_list[index]]\n index += 1\n print('info', directory, path_list[index:])\n return directory, path_list[index:]", "def init_map(project_name, destination_directory):\n project_path = os.path.join(destination_directory, project_name)\n map_filename = os.path.join(project_path, project_name + \"map.csv\")\n if not os.path.exists(project_path):\n os.makedirs(os.path.join(destination_directory, project_name))\n file_id = 0\n mapdf = pd.DataFrame(columns=METADATA_COLUMN_NAMES)\n open(map_filename, 'a').close()\n elif len(os.listdir(project_path)) == 1:\n file_id = 0\n mapdf = pd.DataFrame(columns=METADATA_COLUMN_NAMES)\n else:\n mapdf = pd.read_csv(map_filename)\n mapdf.columns = METADATA_COLUMN_NAMES\n file_id = mapdf['fid'].max() + 1\n return file_id, mapdf", "def data_characterization_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_characterization_dir(experiment_name) / iteration_csv", "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def Get_Player_Historic_Data(data_path, player_history_path): \n players = os.listdir(player_history_path) # Lists All The Player Folders in the Dir\n players_data = pd.read_csv(data_path + 'players_raw.csv')\n for ind in pbar(players_data.index): # ind in [0:693:1]\n # Get the Seasonal History\n player_path = players_data['first_name'][ind] + '_' + players_data['second_name'][ind] + '_' + str(players_data['id'][ind]) # Create player_history_path\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n # print(json.keys())\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n os.makedirs(player_history_path + player_path, exist_ok = True) # Create a new path for the player \n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his syeasonal history\n else: # However, if the player is within the existing directory\n if not os.path.isfile(player_history_path + player_path + \"/history.csv\"): # And a history file does not exist\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his seasonal history\n # Get the Gameweek History\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID \n history_df_gw = pd.DataFrame(json['history']) # Extract Gameweek History\n if not history_df_gw.empty: # If history returned\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n os.makedirs(player_history_path + player_path, exist_ok = True) # Create the directory, exit\n history_df_gw.to_csv(player_history_path + player_path + '/gw.csv', encoding='utf-8', index = False) # Write the CSV", "def determine_subdirectories(file_path):\n\tsource_dir = file_path.replace('/data/','/data-cg1d/')\n\tlead_dir_1, subdir_1 = split_leading_directory(source_dir)\n\tlead_dir_2, subdir_2 = split_leading_directory(subdir_1)\n\tipts_dir, new_subdir = split_leading_directory(subdir_2)\n\tprint('\\n\\nsource_dir: {}\\nlead_dir_2: {}\\nsubdir_2: {}\\nipts_dir: {}\\n new_subdir: {}\\n\\n'.format(\n\t\tsource_dir, lead_dir_2, subdir_2, ipts_dir, new_subdir))\n\treturn source_dir, ipts_dir, new_subdir", "def batchProcessDirectory(self,baseDir,startTeam=1):\n\n import fnmatch\n\n # find all directories containing the target pattern\n resultDirs = {}\n patientNumbers = {}\n for root, dirnames, filenames in os.walk(baseDir):\n resultDirs[root] = []\n for filename in filenames:\n if fnmatch.fnmatch(filename, 'patient*tract_team*.vtk'):\n resultDirs[root].append(os.path.join(root, filename))\n patientNumbers[root] = filename[len('patient'):filename.index('_')]\n\n distanceMatrix = {}\n # calculate results for each pair of files in each directory\n for dir,files in resultDirs.items():\n if len(files) > 0:\n teamCount = len(files) / 2 # left and right per team\n teamRange = range(startTeam,startTeam+teamCount)\n for side in ('left','right'):\n for teamA in teamRange:\n for teamB in teamRange:\n fmt = 'patient%(patient)s_%(side)s_tract_team%(team)d.vtk'\n fileA = fmt % {'patient': patientNumbers[dir], 'side': side, 'team': teamA}\n fileB = fmt % {'patient': patientNumbers[dir], 'side': side, 'team': teamB}\n print (\"Compare %s with %s\" % (fileA, fileB))\n print((os.path.join(dir,fileA),os.path.join(dir,fileB)))\n\n # close the scene and calculate the distance\n slicer.mrmlScene.Clear(0) \n pathA, pathB = os.path.join(dir,fileA),os.path.join(dir,fileB)\n distanceMatrix[dir,side,teamA,teamB] = self.loadAndCalculate(pathA,pathB)\n print('\\n\\n' + str(distanceMatrix.keys()) + '\\n\\n')\n print(distanceMatrix)\n\n # write csv files\n import csv\n header = ['team',]\n for team in teamRange:\n header.append('team_%d' % team)\n for dir in resultDirs.keys():\n print ('checking %s' % dir)\n print (len(resultDirs[dir]))\n if len(resultDirs[dir]) > 0:\n for side in ('left','right'):\n fp = open(os.path.join(dir,\"../distanceMatrix-%s.csv\"%side),'w')\n csvWriter = csv.writer(fp, dialect='excel', quotechar='\"', quoting=csv.QUOTE_ALL)\n csvWriter.writerow(header)\n for teamA in teamRange:\n teamARow = ['team_%d' % teamA,]\n for teamB in teamRange:\n teamARow.append(distanceMatrix[dir,side,teamA,teamB])\n csvWriter.writerow(teamARow)\n fp.close()\n\n return(distanceMatrix)", "def zpDir(dir, nivelRecursao = 50, ddir = None, debug = 0):\n\n idArquivoCSV = 'csv'\n\n qArquivos = 0\n\n arquivos = []\n\n if debug:\n #print 'atual', dir, '...'\n pass\n try:\n nomes = os.listdir(dir)\n except os.error:\n print 'erro: nao foi possivel listar', dir\n nomes = []\n\n nomes.sort()\n sucesso = 1\n for nome in nomes:\n nomeCompleto = os.path.join(dir, nome)\n if ddir is not None:\n dfile = os.path.join(ddir, nome)\n else:\n dfile = None\n if not os.path.isdir(nomeCompleto):\n\n if nomeCompleto[-3:].lower() == idArquivoCSV:\n arquivos.append(nomeCompleto)\n qArquivos += 1\n\n if qArquivos == 2:\n #for i in arquivos:\n #int(i[-5:-4])\n #with open(i, 'rb') as f:\n # cf = csv.reader(f)\n\n if debug:\n print '*'*80\n print 'arquivos <csv> no diretorio atual'\n print arquivos\n print\n\n zpArquivo(arquivos, plot = 0)\n\n elif qArquivos > 2 and debug:\n print 'erro: mais de um arquivo <csv> no diretorio'\n print arquivos\n\n sucesso = 0\n\n elif nivelRecursao > 0 and \\\n nome != os.curdir and nome != os.pardir and \\\n os.path.isdir(nomeCompleto) and \\\n not os.path.islink(nomeCompleto):\n\n if not zpDir(nomeCompleto, nivelRecursao-1, dfile, debug):\n sucesso = 0\n\n return sucesso", "def test_get_filepaths(self):\n\n #setup\n get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn)\n \n #when\n test1 = get_filepaths(\"./dir1\", \".csv\")\n\n #result\n assert len(test1) == 2", "def read_directory(self, dirpath):\n raise NotImplementedError", "def scan_folder(folder):\n LOGGER.debug(\"Scanning folder: %s\", folder)\n for file in os.listdir(folder):\n if file.endswith(\".csv\"):\n yield os.path.join(folder, file)", "def convert_dataset(filename, subdirectory='data'):\n \"\"\" Single path case. \"\"\"\n if 'TRIP_ID' not in next(read_csv(filename, subdirectory)):\n return [DataPoint(timestamp=line['SAMPLE_DATE'],\n speed=line['SPEED'],\n lon=line['LON'],\n lat=line['LAT'],\n bearing=line['HEADING']) for line in read_csv(filename, subdirectory)]\n\n \"\"\" Multiple path case. \"\"\"\n paths = {}\n for line in read_csv(filename, subdirectory):\n next_point = DataPoint(timestamp=line['SAMPLE_DATE'],\n speed=line['SPEED'],\n lon=line['LON'],\n lat=line['LAT'],\n bearing=line['HEADING'])\n try:\n paths[line['TRIP_ID']].append(next_point)\n except KeyError:\n paths[line['TRIP_ID']] = [next_point]\n\n return list(paths.values())", "def _process_dir( d, files, columns, _file, mapping ):\n\t\tfhandles = {}\n\t\t\n\t\tif( d == \"template\"):\n\t\t\treturn\n\n\t\tfor f in files:\n\t\t\ttry:\n\t\t\t\tfhandles[f] = open(d + \"/\" + mapping[\"alias\"][f], 'r')\n\t\t\texcept IOError:\n\t\t\t\tprint (\"Error on opening file: \" + d + \"/\" + mapping[\"alias\"][f])\n\t\t\t\tsys.exit(1)\n\n\t\tflag = True\t\t\n\n\t\twhile( flag ):\n\n\t\t\tfor h in fhandles.values():\n\t\t\t\tline = h.readline()\n\n\t\t\t\tif( not line ):\n\t\t\t\t\tflag = False\n\t\t\t\t\tbreak\n\n\t\t\t\tline = line.rstrip()\n\n\t\t\t\trlist = re.findall(r\"-?\\d*\\.{0,1}\\d+\", line)\n\n\t\t\t\tfileName = os.path.split(h.name)[1]\n\n\t\t\t\trkeys = mapping[\"columns\"][fileName]\n\n\t\t\t\trobj = row()\n\t\t\t\tlocals()[mapping[\"reversealias\"][fileName]] = robj\t\t\t\n\n\t\t\t\ti = 0\t\t\t\n\t\t\t\tfor k in rkeys:\t\t\t\t\t\n\t\t\t\t\tif(rkeys[i] == \"timestamp\"):\n\t\t\t\t\t\tsetattr(robj, rkeys[i], int(rlist[i]))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif( rkeys[i].endswith(\"*\")):\n\t\t\t\t\t\t\tcol = rkeys[i].replace(\"*\",\"\")\n\n\t\t\t\t\t\t\tif(not hasattr(robj, col)):\n\t\t\t\t\t\t\t\tl = []\n\t\t\t\t\t\t\t\tsetattr(robj, col, l)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tl = getattr(robj, col)\n\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tl.append(float(rlist[i]))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tsetattr(robj, rkeys[i], float(rlist[i]))\t\n\t\t\t\t\ti = i + 1\n\n\t\t\tif( not flag ):\n\t\t\t\tbreak\n\n\t\t\tfor c in columns:\n\t\t\t\tval = eval(c.strip())\n\t\t\t\tprint( float(val) , end = ' ', file = _file)\n\t\t\t\t\n\t\t\tprint('', file = _file)\n\t\t\t\n\t\t\t_file.flush()\n\n\t\tfor h in fhandles.values():\n\t\t\th.close()", "def read_dir(self, path, process_zone, prefix=\"\"):\n\n zones = {}\n for entry in os.listdir(path):\n fullpath = os.path.join(path, entry)\n if os.path.isdir(fullpath):\n zones.update(self.read_dir(fullpath, process_zone, os.path.join(prefix, entry)))\n elif prefix != \"\":\n filename = os.path.join(prefix, entry)\n zones[filename[:-4]] = process_zone(filename)\n return zones", "def stack_walks(direc):\n files = os.listdir(direc)\n csvs = []\n for x in files:\n if '.csv' in x:\n csvs.append(x)\n complete = np.vstack([get_nx10(direc+'/'+x) for x in csvs])\n return complete", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def open_local_file(file_path):\n directory_name = Path(\"../data/\" + file_path)\n return directory_name", "def directory_to_df(path,filetype = '.csv',ignore_index = True):\n filenames = []\n file_column = []\n frames = []\n test_index = 1\n for filename in os.listdir(path):\n if filetype in filename:\n curr_df = pd.read_csv(path+filename)\n frames.append(curr_df)\n filenames.append(filename.replace(filetype,''))\n for i in range(curr_df.shape[0]):\n file_column.append(test_index)\n test_index+=1\n \n df = pd.concat(frames,ignore_index = ignore_index)\n df['files'] = file_column\n return df, filenames", "def load_cards(dir: str) -> Dict[str, CardsTuples]:\n res = {}\n for fname in glob.glob(f'{dir}/*.csv'):\n card = load_card(fname)\n title = os.path.basename(fname)\n title = title[:-len('.csv')]\n res[title] = card\n return res" ]
[ "0.64571184", "0.64260465", "0.62206316", "0.5949646", "0.5880635", "0.5827373", "0.5811057", "0.57590806", "0.57238656", "0.5693332", "0.56806904", "0.56777036", "0.5620818", "0.55884403", "0.55691016", "0.55604094", "0.5549872", "0.55385315", "0.55111617", "0.5508465", "0.5507536", "0.55027485", "0.55022866", "0.5476992", "0.54584354", "0.5453344", "0.5452682", "0.54398054", "0.54391444", "0.54234076", "0.54100436", "0.54036665", "0.54014033", "0.53923017", "0.53873503", "0.5375127", "0.5366709", "0.536365", "0.5356414", "0.53350806", "0.53214616", "0.5320601", "0.53153753", "0.5303483", "0.53004444", "0.52988654", "0.5287311", "0.5283783", "0.52699214", "0.5268716", "0.5252475", "0.52523094", "0.5243171", "0.52411234", "0.52397406", "0.5237309", "0.5229537", "0.5228142", "0.5222757", "0.5221566", "0.521762", "0.5215029", "0.5201677", "0.5193539", "0.51809174", "0.5174303", "0.51712835", "0.51676184", "0.51649296", "0.51581484", "0.5150146", "0.5148121", "0.5123797", "0.51233", "0.5122529", "0.51224256", "0.5116351", "0.51162755", "0.5113924", "0.5112492", "0.5112065", "0.5101175", "0.5101172", "0.5096789", "0.5095663", "0.509566", "0.5093225", "0.5083791", "0.5079988", "0.5075843", "0.50636286", "0.506309", "0.5062842", "0.5062574", "0.50572866", "0.50530636", "0.5052711", "0.5050968", "0.5047536", "0.5044363" ]
0.5314282
43
Description When is given a directory path that has forecast as parent folder and csv file with desired name Expected Result returns dictionary with right data
def test_forecast_folder_path(self): #setup filepath = ".data/forecast/Kano-KN_-9.09_7.39.json" expected_result = { "type": "forecast", "city": "Kano", "state": "KN", "coordinates": ['-9.09', '7.39'], "forecast": {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def test_observed_folder_path(self):\n\n #setup\n filepath = \".data/observed/Abadia-BA_-11.56_-37.52.csv\"\n expected_result = {\n \"type\": \"observed\",\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": ['-11.56', '-37.52'],\n \"observed\": {}\n }\n \n #result\n assert extractor.get_metadata_from_filepath(filepath) == expected_result", "def csv_path(name):\n return \"./data/%s\" % name", "def add_path_dict(input_dict: dict, start_path: str, file_path: str):\n # Determine relative path\n relpath = os.path.relpath(file_path, start=start_path)\n\n # If only file remaining, store in dict, otherwise go 1 level deeper\n if relpath == os.path.basename(file_path):\n input_dict[os.path.splitext(relpath)[0]] = pd.read_csv(file_path,\n sep='\\t')\n else:\n parent_dir = relpath.split(os.sep)[0]\n if parent_dir not in input_dict.keys():\n input_dict[parent_dir] = {}\n add_path_dict(input_dict=input_dict[parent_dir],\n start_path=os.path.join(start_path, parent_dir),\n file_path=file_path)", "def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def import_to_df(\n list: str,\n path: str = \"competition_data\"\n ) -> dict:\n\n df_dict = {}\n for file in list:\n if 'csv' not in file:\n continue\n df = pd.read_csv(\"/\".join([path, file]))\n # remove extension\n name = file.split('.')[0]\n df_dict[name] = df\n \n return df_dict", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)", "def loadPredictions(self):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n message = 'Select folder'\n folderDialog = QtWidgets.QFileDialog(self, message, dir_path)\n folderDialog.setFileMode(QtWidgets.QFileDialog.Directory)\n folderDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)\n fileName = [] # Returns a list of the directory\n\n # Plot the window to select the csv file\n if folderDialog.exec_():\n fileName = folderDialog.selectedFiles()\n # Debug\n #fileName = ['/media/dimitris/TOSHIBA EXT/Image_Document_Classification/PMC-Dataset']\n print(fileName)\n if os.path.isdir(str(fileName[0])):\n self.loadFolder(str(fileName[0]))\n else:\n message = 'Only csv files'\n self.messageBox(message)\n return\n\n self.selectFigures()", "def extract_csv_for_date(config, data_date): \n \n ### TODO: test config separately \n \n # print(config.DATA_ROOT)\n # print(data_date)\n \n # Raise an exception if attribute DATA_ROOT does not exist\n if not 'DATA_ROOT' in vars(config):\n raise AttributeError(\"Attribute DATA_ROOT does not exist\")\n \n # Raise an exception if DATA_ROOT does not exist\n if not os.path.exists(config.DATA_ROOT):\n raise NotADirectoryError(\"The path \" + config.DATA_ROOT + \" not found\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'METER_CHANNEL_DICT' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'SAMPLE_TIME' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n data_date_dt = parse(data_date)\n \n if data_date_dt > config.DATA_END_DATE:\n raise ValueError(\"data_date entered is greater than the DATA_END_DATE: \" + \n str(config.DATA_END_DATE))\n \n if data_date_dt < config.DATA_START_DATE:\n raise ValueError(\"data_date entered is less than the DATA_START_DATE: \" + \n str(config.DATA_START_DATE))\n \n # Get the year, month and and day from date entered\n data_year = data_date_dt.year\n data_month = data_date_dt.month\n data_day = data_date_dt.day\n \n # Get the corresponding path in the directory to look for the data for the day\n data_path = os.path.join(config.DATA_ROOT, str(data_year), \"{:02}\".format(data_month), \"{:02}\".format(data_day))\n # print(data_path)\n # Find the count of meters\n meter_count = len(config.METER_CHANNEL_DICT)\n\n # Dictionary to store the names of the resulting csv files\n meter_csv_names = {}\n \n # Get the down-sampling time\n sample_time = config.SAMPLE_TIME\n \n # Create a dictionary with keys are meter names and values as dataframes \n # containing the data for the day\n meter_collection = {}\n \n # for meter_name in config.METER_CHANNEL_DICT:\n # # Create an empty dataframe, the columns will be created later\n # meter_collection[meter_name] = pd.DataFrame()\n\n #print(meter_collection)\n if os.path.exists(data_path):\n # Walk through all the files in the directory for the day's data\n for dirpath, dirnames, files in os.walk(data_path, topdown=True):\n # `files` contains the names of all the files at the location\n if len(files) == 0:\n print(\"No files found for day: \" + data_path)\n continue\n for filename in files:\n # Get the netcdf files, these are files with `.nc` extension\n if filename.lower().endswith('.nc'):\n # For the particular file, find out the corresponding meter and channel \n [meter, channel] = extract_ppty(filename, config.METER_CHANNEL_DICT.keys())\n # Create an entry in the `meter_collection` dict if it does not exist yet\n if meter not in meter_collection:\n meter_collection[meter] = pd.DataFrame()\n # Form the resulting csv name from the meter name if it doesnt exist yet\n # They are of the type - meter_name@Timestamp@Duration@Frequency\n # For e.g.: PQube3@2017-11-01T080002Z@[email protected]\n #print(meter, channel)\n if meter not in meter_csv_names:\n meter_csv_names[meter] = '@'.join([meter, '@'.join(filename.split('@')[1:4])])[:-3] + '.csv'\n #print(meter_csv_names)\n # Get the full path of the csv\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # Only extract if not already extracted to csv\n if (not os.path.isfile(csv_name)):\n # Get the dataframe containing time and channel values\n channel_df = extract_data(dirpath, filename)\n # Give the dataframe column a name\n channel_df.columns = [channel]\n # Down-sample the data to the sampling time intended\n channel_resampled = data_resample(channel_df, sample_time)\n # If our meter dataframe is empty so far, i.e. if this is the \n # first channel being entered, then create a copy of the \n # resampled dataframe\n if meter_collection[meter].empty:\n meter_collection[meter] = channel_resampled.copy()\n ####################### \n # This `else` clause handles two cases:\n # 1. If the dataframe is not empty, then add other columns to\n # the dataframe. (the else case)\n # 2. Some days have data downloaded more than once, this means \n # that channels can occur more than once. (like 05/21/2018)\n #######################\n else:\n # If the channel already exists in the dataframe\n # then either the other file has updated data or \n # subsequent data. \n if channel in meter_collection[meter].columns:\n # Get index from total dataframe \n idx_1 = meter_collection[meter].index\n # Get index from file dataframe\n idx_2 = channel_resampled.index\n # Compare the two, if the index is contained within,\n # then **update** the channel's value for file's indices. \n if np.all(np.isin(idx_2, idx_1)):\n meter_collection[meter][channel].loc[idx_2] = channel_resampled.values.tolist()\n # If the index is not contained, append the file df to\n # the total dataframe\n else:\n meter_collection[meter] = meter_collection[meter].append(channel_resampled, sort=False)\n meter_collection[meter].sort_index(inplace=True)\n #######################\n # This data is resampled a second time to handle two cases:\n # 1. When appending a resampled dataframe to an already resampled dataframe, the last\n # index of the original dataframe and the first index of the new dataframe can have\n # the same time. Resampling the appended dataframe will eliminate the repetitions.\n # 2. If the new dataframe to be appended starts at a much later time, resampling the\n # appended dataframe will create rows of missing data (NaN) at the times with no\n # measurement values. This makes it easier to detect missing measurement values and\n # perform data imputation at a later phase.\n #######################\n meter_collection[meter] = data_resample(meter_collection[meter], sample_time)\n # If the channel does not already exist, then add the\n # file dataframe to the total df. \n else:\n meter_collection[meter] = meter_collection[meter].join(channel_resampled, how='outer')\n else:\n print(\"Path not found: \" + data_path)\n \n # Perform data imputation wherrever needed\n # print(meter_collection)\n meter_collection = data_impute(meter_collection)\n \n # Write the total dataframes to csv file\n for meter in meter_collection:\n # Reorganize the order of columns to match the database tables \n meter_channels = config.METER_CHANNEL_DICT[meter]\n # meter_collection[meter].reset_index(inplace=True)\n meter_collection[meter] = meter_collection[meter].reindex(columns=meter_channels[1:])\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # print(csv_name)\n # Only write csv if it does not exist yet\n if(not os.path.isfile(csv_name)):\n meter_collection[meter].to_csv(csv_name, header=False)\n\n return meter_csv_names", "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def update_csv():\n return os.listdir('./data')", "def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)", "def read_weatherstations(path_to_data):\n namedict = read_weatherstationnames(path_to_data)\n stations = {}\n for i in namedict:\n filename = namedict[i].replace(' ', '_') + '.csv'\n print(\"Reading\", filename)\n ws = read_station_csv(os.path.join(path_to_data, filename))\n stations[i] = ws\n return stations", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def convert_dataset(filename, subdirectory='data'):\n \"\"\" Single path case. \"\"\"\n if 'TRIP_ID' not in next(read_csv(filename, subdirectory)):\n return [DataPoint(timestamp=line['SAMPLE_DATE'],\n speed=line['SPEED'],\n lon=line['LON'],\n lat=line['LAT'],\n bearing=line['HEADING']) for line in read_csv(filename, subdirectory)]\n\n \"\"\" Multiple path case. \"\"\"\n paths = {}\n for line in read_csv(filename, subdirectory):\n next_point = DataPoint(timestamp=line['SAMPLE_DATE'],\n speed=line['SPEED'],\n lon=line['LON'],\n lat=line['LAT'],\n bearing=line['HEADING'])\n try:\n paths[line['TRIP_ID']].append(next_point)\n except KeyError:\n paths[line['TRIP_ID']] = [next_point]\n\n return list(paths.values())", "def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)", "def Get_Player_Historic_Data(data_path, player_history_path): \n players = os.listdir(player_history_path) # Lists All The Player Folders in the Dir\n players_data = pd.read_csv(data_path + 'players_raw.csv')\n for ind in pbar(players_data.index): # ind in [0:693:1]\n # Get the Seasonal History\n player_path = players_data['first_name'][ind] + '_' + players_data['second_name'][ind] + '_' + str(players_data['id'][ind]) # Create player_history_path\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n # print(json.keys())\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n os.makedirs(player_history_path + player_path, exist_ok = True) # Create a new path for the player \n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his syeasonal history\n else: # However, if the player is within the existing directory\n if not os.path.isfile(player_history_path + player_path + \"/history.csv\"): # And a history file does not exist\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his seasonal history\n # Get the Gameweek History\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID \n history_df_gw = pd.DataFrame(json['history']) # Extract Gameweek History\n if not history_df_gw.empty: # If history returned\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n os.makedirs(player_history_path + player_path, exist_ok = True) # Create the directory, exit\n history_df_gw.to_csv(player_history_path + player_path + '/gw.csv', encoding='utf-8', index = False) # Write the CSV", "def task_lst_gen(dirr, csv_path):\n train_file_lst, val_file_lst, test_file_lst = files_from_csv(csv_path)\n\n task_dict = {}\n out_prefix = '/work/jfeins1/maestro/dataset-v3/'\n for subdirs, dirs, files in os.walk(dirr):\n for file in files:\n filepath = subdirs + os.sep + file\n\n if file in train_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'train/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in test_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'test/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in val_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'val/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n task_lst = open('/work/jfeins1/maestro/encoding_gen_task.lst', 'w')\n for uid, d in task_dict.items():\n print(d['in'], d['out'], file=task_lst)", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def get_data( name=None, force_download=False, version=19, target_extension='.csv' ):\n os.makedirs(DATA_PATH, exist_ok=True)\n\n def download_data( version ):\n url = \"https://ndownloader.figshare.com/articles/14766102/versions/\" + str(version)\n target_file_name = \"14766102.zip\"\n target_file_name_path = tf.keras.utils.get_file(target_file_name, url,\n cache_subdir=DATA_PATH, extract = True )\n os.remove( DATA_PATH + target_file_name )\n\n if force_download:\n download_data( version = version )\n\n\n files = []\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if len( files ) == 0 :\n download_data( version = version )\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if name == 'all':\n return files\n\n datapath = None\n\n for fname in os.listdir(DATA_PATH):\n mystem = (Path(fname).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n if ( name == mystem and fname.endswith(target_extension) ) :\n datapath = os.path.join(DATA_PATH, fname)\n\n if datapath is None:\n raise ValueError('File doesnt exist. Options: ' , os.listdir(DATA_PATH))\n return datapath", "def get_files(input_dir):\n file_rep = { \"tars\" : [] }\n \n files = os.listdir(input_dir)\n \n the_file, the_date = find_bootstrap(files)\n \n #add index file in file_rep\n file_rep['index'] = the_file\n file_rep['date'] = the_date\n \n pattern = \"ncep_forecast_%s_(?P<name>\\S+).tar\" % (the_date)\n \n the_re = re.compile(pattern)\n\n for the_file in files:\n matched = the_re.match(the_file)\n if matched:\n print(\"matched %s\" % (matched.group(\"name\")))\n file_rep['tars'].append(the_file)\n \n return file_rep", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def get_csv_data(csv_path: str, img_dir: str) -> pd.DataFrame:\r\n data = pd.read_csv(csv_path)\r\n data['title'] = data['title'].apply(preprocess_titles)\r\n data['image'] = data['image'].apply(abs_path, args=(img_dir,))\r\n return data", "def sample_data_path(name):\n import os.path as op\n data_dir = op.join(op.dirname(__file__), \"data\")\n data_path = op.join(data_dir, name + \".csv\")\n return op.abspath(data_path)", "def import_func(path_):\n\n datasets_dic = {}\n\n for dataset_path in path_:\n # Parse labels from filenames\n dataset_label = os.path.split(dataset_path)[1].split('.')[0]\n\n # Read from csv to Pandas\n dataset = pd.read_csv(dataset_path)\n\n # insert dataset label to the dataframes\n dataset.insert(0, 'trial', dataset_label)\n dataset.insert(0, 'maneuver', dataset_label.split('_')[0])\n\n # Datasets are stored in a dictionary\n datasets_dic.update({dataset_label: dataset})\n\n # list of imported maneuvers\n dataset_names = list(datasets_dic.keys())\n\n return datasets_dic, dataset_names", "def get_data(self, csv_file):\n pass", "def get_data(self):\n \n with os.scandir(self.file_path) as collection_of_files:\n files_found = [file.name.split('.')[0] for file in collection_of_files \n if (file.name.split('.')[0].lower().strip() in self._data_requirements.required_file_names \n and file.name.endswith('.csv'))]\n\n self.check_missing_files(files_found)\n \n self._data = DictObjectView(self.read_in_files(files_found))", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP", "def _get_model_val(models_name, data_dir, val_source='test'):\n model_val = {}\n for model in models_name:\n mypath = data_dir + '/' + model\n only_files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n select_files = [val_source in x for x in only_files]\n only_files = list(compress(only_files, select_files))\n if type(only_files) == list:\n for name_file in only_files:\n df_name = name_file.replace('.csv', '')\n model_val[model + '_' + df_name] = pd.read_csv(mypath + '/' + name_file)\n else:\n df_name = only_files.replace('.csv', '')\n model_val[model + '_' + df_name] = pd.read_csv(mypath + '/' + only_files)\n return model_val", "def _read_source_data(self) -> pd.DataFrame:\n df = None\n try:\n logger.info(\"reading csv base file under simulation folder\", class_name=self.__class__.__name__)\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/simulation/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.warning(\"base file not processed, trying under unprocessed folder\",\n class_name=self.__class__.__name__)\n try:\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/unprocessed/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.error(\"base file not found... exiting\", class_name=self.__class__.__name__)\n exit(1)\n return df", "def read_timestep(root_path: str, time: str):\n path = os.path.join(root_path, time)\n\n agent_file = glob.glob(os.path.join(path, \"*_agents.csv\"))[0]\n rel_file = glob.glob(os.path.join(path, \"*_relationships.csv\"))[0]\n feat_files = glob.glob(os.path.join(path, \"*_feat_*.csv\"))\n exposure_files = glob.glob(os.path.join(path, \"*_exposure_*.csv\"))\n assert os.path.isfile(agent_file), f\"can't find agents.csv in {dir}\"\n assert os.path.isfile(rel_file), f\"can't find relationships.csv in {dir}\"\n\n _, agent_filename = os.path.split(agent_file)\n\n # create agents dict\n agents = {}\n\n # re-create all agents and add to population\n with open(agent_file, newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n agents[row[\"id\"]] = row\n agents[row[\"id\"]][\"time\"] = time\n\n def update_agent_extras(files, extra_type):\n pattern = re.compile(f\"^.*_{extra_type}_(.*)\\.csv$\")\n for file in files:\n m = pattern.match(file)\n if m is not None:\n extra = m.group(1)\n with open(file, newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n for k, v in row.items():\n if k != \"agent\":\n agents[row[\"agent\"]][f\"{extra}_{k}\"] = v.lower() if v in ('True', 'False') else v\n\n update_agent_extras(feat_files, \"feat\")\n update_agent_extras(exposure_files, \"exposure\")\n\n # re-create all relationships and write to file\n rels = []\n with open(rel_file, newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n row[\"time\"] = time\n rels.append(row)\n\n return rels, list(agents.values())", "def load_data_files() -> Dict[str, Path]:\n default_path = paths.MISCELLANEOUS_DIRECTORY / \"portfolio\"\n custom_exports = (\n get_current_user().preferences.USER_PORTFOLIO_DATA_DIRECTORY / \"optimization\"\n )\n data_files = {}\n for directory in [default_path, custom_exports]:\n for file_type in [\"xlsx\", \"ini\"]:\n for filepath in Path(directory).rglob(f\"*.{file_type}\"):\n if filepath.is_file() and \"example\" not in filepath.name:\n data_files[filepath.name] = filepath\n\n return data_files", "def create_path_dict(save_path):\n act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'tanh']),\n sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),\n sorted(['relu', 'identity', 'sigmoid', 'tanh']),\n sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),\n ['relu'],\n ['sigmoid'],\n ['tanh'],\n ['antirelu'],\n ['None']]\n # ['identity']]\n\n act_fn = ['_'.join(act) for act in act_fn]\n path_dict = defaultdict(list)\n for (filepath, dirname, filename) in os.walk(save_path):\n if 'results.json' in filename:\n for act in act_fn:\n temp = filepath.split('/')\n if act == temp[-1] or act == temp[-2]:\n path_dict[act].append(filepath)\n print(path_dict)\n return path_dict", "def load_data(input_dir, file_name, forecast_col='Close'):\n # read in csv\n df = pd.read_csv('{}/{}'.format(input_dir, file_name), parse_dates=['Date'], index_col=0)\n # select & add feature columns\n df.fillna(0, inplace=True)\n df = df[['Open', 'High', 'Low', 'Close']]\n df['HL_PCT'] = (df['High'] - df['Low']) / df['Close'] * 100.\n df['PCT_Change'] = (df['Close'] - df['Open']) / df['Open'] * 100.\n df = df.iloc[::-1]\n df.fillna(value=-9999, inplace=True)\n # set # of days to forecast out and shift column to be used as labels\n days_forecast = 15\n df['label'] = df[forecast_col].shift(-days_forecast)\n # set up feature & label matrices\n X = np.array(df.drop(['label'], 1))\n X = preprocessing.scale(X)\n x_recent = X[-days_forecast:]\n X = X[:-days_forecast]\n df.dropna(inplace=True)\n y = np.array(df['label'])\n # split data 80/20 for train & test respectively\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)\n return x_train, x_test, x_recent, y_train, y_test, df", "def _filepath(self, which_one: str):\n dataset = self.mode.name\n with open('data/dstc2_{}/scripts/config/dstc2_{}.flist'.format(\n 'test' if self.mode is DSTC2.Mode.test else 'traindev', dataset\n )) as flist:\n paths = flist.read().splitlines()\n for path in paths:\n path = 'data/dstc2_{}/data/'.format('test' if self.mode is DSTC2.Mode.test else 'traindev') + path + '/'\n with open(path + which_one + '.json') as f:\n yield json.load(f)", "def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv", "def process_input_data(input_data_path):\n if os.path.isdir(input_data_path):\n input_data_glob = glob.glob(input_data_path + \"/*.csv\")\n else:\n if is_gcs_path(input_data_path):\n # Download the input to a local\n with tempfile.NamedTemporaryFile() as hf:\n input_data = hf.name\n\n logging.info(\"Copying %s to %s\", input_data_path, input_data)\n input_data_gcs_bucket, input_data_gcs_path = split_gcs_uri(\n input_data_path)\n\n logging.info(\"Download bucket %s object %s.\", input_data_gcs_bucket,\n input_data_gcs_path)\n bucket = storage.Bucket(storage.Client(), input_data_gcs_bucket)\n storage.Blob(input_data_gcs_path, bucket).download_to_filename(\n input_data)\n else:\n input_data = input_data_path\n\n ext = os.path.splitext(input_data)[-1]\n if ext.lower() == '.zip':\n zip_ref = zipfile.ZipFile(input_data, 'r')\n zip_ref.extractall('.')\n zip_ref.close()\n # TODO: Hardcoding the file in the Archive to use is brittle.\n # We should probably just require the input to be a CSV file.:\n csv_file = 'stackoverflow-questions.csv'\n else:\n csv_file = input_data\n\n input_data_glob = glob.glob(csv_file)\n\n return input_data_glob", "def get_key_data_filepath():\n global key_filepath, directory\n filename = 'key.csv'\n key_filepath = os.path.join(directory, filename)", "def init_map(project_name, destination_directory):\n project_path = os.path.join(destination_directory, project_name)\n map_filename = os.path.join(project_path, project_name + \"map.csv\")\n if not os.path.exists(project_path):\n os.makedirs(os.path.join(destination_directory, project_name))\n file_id = 0\n mapdf = pd.DataFrame(columns=METADATA_COLUMN_NAMES)\n open(map_filename, 'a').close()\n elif len(os.listdir(project_path)) == 1:\n file_id = 0\n mapdf = pd.DataFrame(columns=METADATA_COLUMN_NAMES)\n else:\n mapdf = pd.read_csv(map_filename)\n mapdf.columns = METADATA_COLUMN_NAMES\n file_id = mapdf['fid'].max() + 1\n return file_id, mapdf", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def __init__(self, dir_path= 'static/Irma data-20210525'):\n \n A=csv_to_dict(dir_path+\"\\A.csv\")\n B=csv_to_dict(dir_path+\"\\B.csv\")\n C=csv_to_dict(dir_path+\"\\C.csv\")\n D=csv_to_dict(dir_path+\"\\D.csv\")\n\n\n self.dicts_list=[A,B,C,D]\n self.image_codes=csv_to_dict(dir_path+\"\\image_codes.csv\")", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def existing_data(self):\n # Set the directory and file name\n data_summary_dir = op.join('../logs', self.name, 'data_summary')\n file_name = 'Train_Test_Summary_generative.csv'\n\n # Read the csv and obtain the train data list\n df = pd.read_csv(op.join(data_summary_dir, file_name))\n train_data = df['Train Data'].dropna().values.tolist()\n test_data = df['Test Data'].dropna().values.tolist()\n\n train_data_list, test_data_list = [], []\n for single_train in train_data:\n data_name = single_train.split('_')[0]\n if data_name == 'LTRC':\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4]\n else:\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4] + '_' + single_train.split('_')[5]\n full_data_name = single_train.split('_')[0] + '_' + single_train.split('_')[1] + '_' + single_train.split('_')[2] + '_' + series\n train_data_list.append(full_data_name)\n\n for single_test in test_data:\n data_name = single_test.split('_')[0]\n if data_name == 'LTRC':\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4]\n else:\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4] + '_' + single_test.split('_')[5]\n full_data_name = single_test.split('_')[0] + '_' + single_test.split('_')[1] + '_' + single_test.split('_')[2] + '_' + series\n test_data_list.append(full_data_name)\n\n # Obtain the label map and CT list and file names\n label_map_list = glob(op.join(self.save_root_dir, 'source_data_2', '*'))\n ct_list = glob(op.join(self.save_root_dir, 'target_data_2', '*'))\n\n label_map_files = [single_file.split('/')[-1] for single_file in label_map_list]\n ct_files = [single_file.split('/')[-1] for single_file in ct_list]\n label_map_files.sort(), ct_files.sort()\n\n # Initialize empty list\n existing_train_lm, existing_train_ct = [], []\n existing_test_lm, existing_test_ct = [], []\n\n for single_lm, single_ct in zip(label_map_files, ct_files):\n\n ct_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\n lm_data_name = single_lm.split('_')[0] + '_' + single_lm.split('_')[1] + '_' + single_lm.split('_')[2]\n\n assert ct_data_name == lm_data_name, 'Data is not the same.'\n\n data_name = single_ct.split('_')[0]\n if data_name == 'LTRC':\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4]\n else:\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4] + '_' + single_ct.split('_')[5]\n full_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\\\n + '_' + series\n\n if full_data_name in train_data_list:\n existing_train_lm.append(single_lm)\n existing_train_ct.append(single_ct)\n if full_data_name in test_data_list:\n existing_test_lm.append(single_lm)\n existing_test_ct.append(single_ct)\n existing_train_data = [existing_train_lm, existing_train_ct]\n existing_test_data = [existing_test_lm, existing_test_ct]\n return existing_train_data, existing_test_data", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def predict_directory(self, directory_name, result_file_name):\n logging.info('Starting prediction.')\n with open(result_file_name, 'ab') as f:\n writer = csv.writer(f)\n writer.writerow(('id', 'category'))\n for song_id in os.listdir(directory_name):\n song = pd.read_csv('{}{}'.format(directory_name, song_id)).values\n predicted_genre = self.classify(song)\n logging.info('Predicted genre: {}'.format(predicted_genre))\n writer.writerow((song_id, predicted_genre))", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def _read_trajectory_files(self):\n dflist = []\n self.Ntimes = {}\n for downD in self.case.downstreamD:\n outputs = self.case.get_outputs(self.method,downD)\n print(outputs['trajectory_file'])\n df = pd.read_csv(outputs['trajectory_file'],\n header=None,\n usecols=[0,1,2])\n df.columns = ['t','y','z']\n df['x'] = downD * self.case.turbine.D\n df['z'] -= self.case.turbine.zhub\n df = df.set_index(['t','x'])[['y','z']]\n self.Ntimes[downD] = len(df.index.levels[0])\n dflist.append(df)\n self.df = pd.concat(dflist).sort_index()", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def run_load(rootpath):\n global CSV_PATH\n CSV_PATH = rootpath+'/csv_files/'\n load_movies_details()\n load_movies_cast()\n load_movies_reviews()", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def get_items_path() -> Path:\n return Path(os.path.join(Path(os.path.realpath(__file__)).parent, \"items.csv\"))", "def parse(cls, raw_folder: str) -> Dict[str, Any]:\n folder_path = os.path.abspath(raw_folder)\n data = dict()\n files = os.listdir(folder_path)\n for file in files:\n if is_ignored(file):\n continue\n try:\n file = os.path.join(raw_folder, file)\n datum = cls.process_file(file)\n except FileNotCompatible:\n continue\n\n _, kwrd = os.path.split(file)\n kwrd = os.path.splitext(kwrd)[0]\n data[kwrd] = datum\n\n return data", "def generate_Struct(csv_file, pathToDir):\n\n df = extract_structure_from_csv(csv_file)\n\n df = df[ESSENTIAL_CSV_COLUMNS]\n\n for session_kwargs in df.to_dict('index').values():\n session = AnDOData(**session_kwargs)\n session.basedir = pathToDir\n session.generate_structure()", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def read_dir(directory):\n results = AttrDict()\n results.iterations = pd.read_csv(os.path.join(directory, 'iterations.csv'),\n index_col=0)\n results.solutions = AttrDict()\n for i in results.iterations.index.tolist():\n iteration_dir = os.path.join(directory, '{:0>4d}'.format(i))\n fmt = _detect_format(iteration_dir)\n logging.debug('Iteration: {}, Format detected: {}'.format(i, fmt))\n try:\n if fmt == 'netcdf':\n sol_path = os.path.join(iteration_dir, 'solution.nc')\n results.solutions[i] = read_netcdf(sol_path)\n else:\n sol_path = iteration_dir\n results.solutions[i] = read_csv(sol_path)\n logging.debug('Read as {}: {}'.format(fmt, sol_path))\n except IOError as err:\n logging.warning('I/O error in `{}` at iteration `{}`'\n ': {}'.format(iteration_dir, i, err))\n # results.solutions[i] = AttrDict() # add an empty entry\n continue\n return results", "def to_csv_files(self, path):\n self._to_dict_tree().to_csv_files(path)", "def loadFileNameByModel(self, inputDir):\n fileNames = walktree(inputDir)\n fileByModel = {}\n for file in fileNames:\n modelName = file.split('/')[-1]\n modelName = modelName.replace('.txt', '')\n fileByModel[modelName] = file\n return fileByModel", "def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data", "def loadData(directoryName, fileName):\r\n\r\n dataset = pd.read_csv(directoryName + \"/\" + fileName, header = None)\r\n with open(directoryName + \"/\" + \"metadata.json\") as json_file:\r\n metadata = json.load(json_file)\r\n for uuid in metadata:\r\n for column in metadata[uuid]:\r\n index = int(metadata[uuid][column])\r\n dataset.iloc[0, index] = uuid\r\n \r\n return DataManipulationService.createDictionary(dataset)", "def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def get_csv_paths(top_path):\n # exclude is a set holding all dirnames to be excluded\n exclude = {\"fails\", \"archive\", \"exclude\", \"fail\", \"backup\"}\n # files is a dict that defaults to lists, so values can be appended to keys\n files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n dirnames[:] = [d for d in dirnames if d.lower() not in exclude]\n\n for filename in filenames:\n\n # gather .csv and .tsv files\n if \".csv\" in str(filename).lower() or \".tsv\" in str(filename).lower():\n # Add filename to the key of dirpath\n files[dirpath].append(filename)\n return files", "def load_data():\n directories=[\"./track1/\",\n \"./track1_recovery/\",\n \"./track2/\",\n \"./track1_reverse/\",\n \"./track2_reverse/\",#Additional data for model built on top of lenet.h5\n \"./track2_recovery/\",#Additions data for model built on top of lenet.h5\n ]\n lines=[]\n for directory in directories:\n with open(directory+\"driving_log.csv\") as csvfile:\n reader=csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n train_samples, validation_samples = train_test_split(lines, test_size=0.2)\n return train_samples, validation_samples", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def get_data_paths(directory: Optional[str] = None) -> DataPaths:\n if directory is None:\n directory = DATA_DIRECTORY\n\n os.makedirs(directory, exist_ok=True)\n\n node_data_path = os.path.join(directory, 'nodes.tsv')\n if not os.path.exists(node_data_path):\n logger.info(f'downloading {NODE_DATA_URL}')\n urlretrieve(NODE_DATA_URL, node_data_path)\n\n edge_data_path = os.path.join(directory, 'edges.sif.gz')\n if not os.path.exists(edge_data_path):\n logger.info(f'downloading {EDGE_DATA_URL}')\n urlretrieve(EDGE_DATA_URL, edge_data_path)\n\n transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')\n if not os.path.exists(transformed_features_path):\n logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')\n urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)\n\n validate_data_path = os.path.join(directory, 'validation-statuses.tsv')\n if not os.path.exists(validate_data_path):\n logger.info(f'downloading {VALIDATE_DATA_URL}')\n urlretrieve(VALIDATE_DATA_URL, validate_data_path)\n\n symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')\n if not os.path.exists(symptomatic_data_path):\n logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')\n urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)\n\n repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')\n if not os.path.exists(repurpose_data_path):\n logger.info(f'downloading {REPURPOSE_DATA_URL}')\n urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)\n\n repo_data_path = os.path.join(directory, 'repo_data.csv')\n if not os.path.exists(repo_data_path):\n logger.info(f'downloading {REPO_DATA_URL}')\n urlretrieve(REPO_DATA_URL, repo_data_path)\n\n permutation_directory = os.path.join(directory, \"permutations\")\n os.makedirs(permutation_directory, exist_ok=True)\n\n permutation_paths = []\n for i in range(5):\n permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))\n if not os.path.exists(permutation_data_path):\n url = PERMUTATION_DATA_URL_FMT.format(i + 1)\n logger.info(f'downloading {url}')\n urlretrieve(url, permutation_data_path)\n permutation_paths.append(permutation_data_path)\n data_edge2vec_path = os.path.join(directory, 'data_edge2vec')\n\n return DataPaths(\n node_data_path=node_data_path,\n edge_data_path=edge_data_path,\n transformed_features_path=transformed_features_path,\n validate_data_path=validate_data_path,\n symptomatic_data_path=symptomatic_data_path,\n permutation_paths=permutation_paths,\n data_edge2vec_path=data_edge2vec_path,\n repurpose_data_path = repurpose_data_path,\n repo_data_path = repo_data_path\n )", "def test_process(self, tmp_path):\n export_dir = tmp_path / 'export'\n export_dir.mkdir()\n\n process(['raw_data/small_raw_data_0.csv',\n 'raw_data/small_raw_data_1.csv',\n # File 2 does not exist.\n 'raw_data/small_raw_data_2.csv',\n 'raw_data/small_raw_data_3.csv'],\n SIGNALS,\n ['median_home_dwell_time',\n 'completely_home_prop_7dav'],\n ['state'],\n export_dir)\n\n expected = {\n 'wip_median_home_dwell_time': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [6, 3.5],\n 'se': [None, 0.5],\n 'sample_size': [1, 2]\n }),\n 'completely_home_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.15, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'part_time_work_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.35, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'full_time_work_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.45, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'median_home_dwell_time_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [4.5, 3.5, 7.5],\n 'se': [1.5, 0.5, 0.5],\n 'sample_size': [2, 2, 2]\n }),\n 'wip_completely_home_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.1, 0.055, 0.15],\n 'se': [0.05, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n }),\n 'part_time_work_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.25, 0.055, 0.25],\n 'se': [0.1, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n }),\n 'full_time_work_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.35, 0.055, 0.35],\n 'se': [0.1, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n })\n }\n actual = {signal: pd.read_csv(\n export_dir / f'20200612_state_{signal}.csv')\n for signal in expected}\n for signal in expected:\n pd.testing.assert_frame_equal(expected[signal], actual[signal])", "def walk_csv_data(**kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.csv'):\n with open(path, newline='') as f:\n text = f.read()\n reader = csv.DictReader(StringIO(text))\n try:\n fieldnames = reader.fieldnames\n rows = list(reader)\n yield (path, name, text, fieldnames, rows)\n except csv.Error:\n continue", "def GetRateData(directory):\n\n rt_data = pd.read_csv(directory)\n return rt_data", "def read_data(name: str) -> pd.DataFrame:\n import_dir = Path.cwd().joinpath('eikon_data_files')\n\n path = Path.joinpath(import_dir, Path(name))\n if path.exists():\n return pd.read_csv(path, sep=',')\n else:\n print('File type \"' + name + '.csv' + ' does not exist. Aborted.')\n quit()", "def compute_aggregate_weather_data():\n\n # get a list of all the csv files names in the 'weather_data' directory\n files = get_all_csv_files_in_directory('weather_data')\n\n # Todo: if the number of csv files doesn't match the expected value, unzip remaining using the 'os' module\n\n if len(files) == 0:\n\n # Unzip all files in current directory and subdirectories\n print \"unzipping weather files...\"\n os.system(\"unzip 'weather_data/*.zip' -d weather_data\")\n\n\n # Try again to get files\n files = get_all_csv_files_in_directory('weather_data')\n\n # Throw exception if still missing csv files\n if len(files) == 0:\n raise ValueError(\"Missing weather data in csv format in the 'weather_data' directory\")\n\n # convert the list of csv file names to a list of corresponding DataFrames\n dallas_files = filter(lambda file_name : \"KDAL\" in file_name, files)\n houston_files = filter(lambda file_name : \"KHOU\" in file_name, files)\n san_antonio_files = filter(lambda file_name : \"KSAT\" in file_name, files)\n\n print \"Retrieved weather data files...\"\n print \"\\t# of Dallas weather files found: \", len(dallas_files)\n print \"\\t# of Houston weather files found: \", len(houston_files)\n print \"\\t# of San Antonio weather files found: \", len(san_antonio_files)\n\n dallas_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), dallas_files)\n houston_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), houston_files)\n san_antonio_dfs = map(lambda file_name: read_weather_data_from_csv(\"./weather_data/\" + file_name), san_antonio_files)\n\n dallas_df = pd.concat(dallas_dfs)\n houston_df = pd.concat(houston_dfs)\n san_antonio_df = pd.concat(san_antonio_dfs)\n\n print \"Aggregating all of the weather data...\"\n # fold the list of data frames into a single data frame\n aggregate_df = reduce(lambda df1, df2: pd.merge(df1, df2, on=\"Date\", how=\"outer\"), [dallas_df, houston_df, san_antonio_df]).sort_values(\"Date\")\n\n return aggregate_df", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def data_characterization_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_characterization_dir(experiment_name) / iteration_csv", "def build():\n for root, dirs, files in os.walk(IN_PATH):\n for filename in files:\n if filename.endswith('.csv'):\n with open(os.path.join(IN_PATH, filename), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n path = row[0].split('.')\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = row[1]\n else:\n d = d[path[i]]\n with open (os.path.join(OUT_PATH, filename.replace('.csv', '.json')), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open (os.path.join(WEB_PATH, filename.replace('.csv', '.js')), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))\n\n with open(os.path.join(IN_PATH, 'en_US.csv'), encoding='utf-8') as f:\n reader = csv.reader(f)\n next(reader)\n data = nested_dict()\n web_data = nested_dict()\n for row in reader:\n path = row[0].split('.')\n if row[0].startswith('report.') or row[0].startswith('cardset.'):\n d = data\n elif row[0].startswith('web.'):\n d = web_data\n\n for i in range(len(path)):\n if i == len(path) - 1:\n d[path[i]] = zz_string(row[1], row[0])\n else:\n d = d[path[i]]\n with open(os.path.join(OUT_PATH, 'zz_ZZ.json'), 'w', encoding='utf-8') as fout:\n json.dump({**data, **web_data}, fout)\n with open(os.path.join(WEB_PATH, 'zz_ZZ.js'), 'w', encoding='utf-8') as fout:\n fout.write('var STRINGS = {};'.format(json.dumps(web_data)))", "def filepath(filename, data, root='/home/cyneo/Work/Scans/Processed Data/',\r\n filetype='.csv'):\r\n path = os.path.abspath(root + data + '/' + filename +\r\n ' ' + data + filetype)\r\n return path", "def _unit_test_only_get_category_map_files(self):\n result = {}\n for key in self.__all_model_categories:\n category_file_name = '%s.json' % self._get_category_file_prefix(key)\n category_file_path = '%s%s' % (self.__category_modules_dir_name, category_file_name)\n result[key] = category_file_path\n return result", "def extract_data(input_file):\n input_zip = client.file(input_file).getFile().name\n # Create directory to unzip model files into\n if os.path.exists(\"/tmp/unzipped_file/\"):\n rmtree('unzipped_file',ignore_errors=True)\n else:\n os.mkdir(\"/tmp/unzipped_file/\")\n zipped_file = zipfile.ZipFile(input_zip)\n # Extract unzipped files into directory created earlier returns none\n file_path = \"/tmp/unzipped_file/\"\n return zipped_file.extract(\"test_keras_data.csv\", file_path)", "def read_fx_data(self):\n\n dirStr = self.dir\n\n formatSpec1 = '%Y-%m-%d %H:%M:%S'\n formatSpec2 = '%m/%d/%Y %H:%M'\n\n dirN = os.fsencode(dirStr)\n data = []\n labels = {}\n fileIdx = 0\n\n for file in os.listdir(dirN):\n filename = os.fsdecode(file)\n if filename.endswith('.csv'):\n try:\n fileData, label = self.read_fx_data_from_file(os.path.join(dirStr, filename), formatSpec=formatSpec1)\n except:\n fileData, label = self.read_fx_data_from_file(os.path.join(dirStr, filename), formatSpec=formatSpec2)\n\n labels[fileIdx] = label\n fileIdx += 1\n data.append(fileData)\n\n # Drop columns where not all data are present\n scatData = pd.concat([df['Close'] for df in data], axis=1)\n for df in data:\n df.drop(scatData.index[scatData.isnull().any(1).nonzero()[0]], errors='ignore', inplace=True)\n\n return data, labels", "def _read_data_file(self, path_model_id):\n\n path_dataset_file = path_model_id.joinpath('training_set.csv')\n\n with path_dataset_file.open(mode='r', newline='') as f:\n csv_reader = reader(f, delimiter=',')\n rows = list(csv_reader)\n\n self.example_count = len(rows)\n\n img_files = [path.join(f'label_{row[1]}', row[0]) for row in rows]\n enc_labels = self.class_le.fit_transform([row[1] for row in rows])\n \n self.files_labels = [[img_files[i], enc_labels[i]]\n for i in range(self.example_count)]", "def get_first_of_day(self, folder_before=None, day=datetime.today(), filename='Epikurve.csv'):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n reached = folder_before is not None\n __folder_before = str(folder_before).split('/')[-1]\n for folder in folders:\n if reached:\n path_csv = self.data_root_path / folder / filename\n with open(path_csv) as f:\n first = True\n for x in csv.reader(f, delimiter=';'):\n if first:\n first = False\n continue\n ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')\n break\n if ts.date() <= day.date():\n return folder\n else:\n if folder == __folder_before:\n reached = True", "def createFolderStructure(self):\n\n\t\twith open(self.data_path + 'categories.csv', 'rb') as csvfile:\n\t\t\treader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\t\t\tnext(reader, None) # skip the headers\n\t\t\tfor row in reader:\n\t\t\t\tdirectory = self.data_path + 'categories/' + str(row[1])\n\t\t\t\tif not os.path.exists(directory):\n\t\t\t\t\tos.makedirs(directory)", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def check_intermediate_file(cache_dir, pull_start_dates):\n previous_dfs = {}\n for test_type in TEST_TYPES:\n previous_dfs[test_type] = None\n if pull_start_dates[test_type] is not None:\n pull_start_dates[test_type] = datetime.strptime(\n pull_start_dates[test_type], '%Y-%m-%d')\n\n for filename in os.listdir(cache_dir):\n if \".csv\" in filename:\n test_type = \"_\".join(filename.split(\"_\")[:2])\n date_string = filename.split(\"_\")[4].split(\".\")[0]\n pull_start_dates[test_type] = datetime.strptime(date_string,\n '%Y%m%d') + timedelta(days=1)\n previous_dfs[test_type] = pd.read_csv(join(cache_dir, filename),\n sep=\",\", parse_dates=[\"timestamp\"])\n return previous_dfs, pull_start_dates", "def Get_FPL_Data(path): \n json = Access_URL(url = 'https://fantasy.premierleague.com/api/bootstrap-static/')\n elements_df = pd.DataFrame(json['elements']) # Player statistics\n elements_types_df = pd.DataFrame(json['element_types']) # Rules/positions\n teams_df = pd.DataFrame(json['teams']) # Statistics/team\n elements_df.to_csv(path + 'players_raw.csv', index = False) \n elements_types_df.to_csv(path + 'players_type.csv', index = False)\n teams_df.to_csv(path + 'teams.csv', index = False)\n json = Access_URL(url = 'https://fantasy.premierleague.com/api/fixtures/')\n fixtures_df = pd.DataFrame(json) \n fixtures_df.to_csv(path + 'fixtures.csv', index = False)", "def analyzeViSDEMData(dict):\n \n if 'path_in' in dict:\n path_in = dict['path_in']\n else:\n print(\"Caution: No path for input folder containing the data has been defined. Please define path to folder by dict['path_in']=path_in\") \n return\n \n path_out_default = '../colordeficiency-data/' \n if 'path_out' in dict:\n path_out = dict['path_out']\n else:\n print(\"Caution: No path for output folder where the data should be stored has been defined. Using default output path instead: \"+str(path_out_default))\n path_out = path_out_default\n \n if 'round' in dict:\n round = dict['round']\n else:\n print(\"Error: You have to chose a round first.\")\n \n path = os.path.join(os.path.dirname(os.path.abspath(os.path.join(__file__,os.pardir))),'colordeficiency-data')\n \n # 0. Step: Get all the relevant information, i.e. motive_ids, obs_col_defs etc.\n if round == 1:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids.csv\")\n elif round == 2:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids_2.csv\")\n \n vs_ids_sheet = pandas.read_csv(visualsearch_ids,sep=\";\")\n \n # Get all the relevant information about the observers, i.e. obs_col_defs etc.\n observer_ids = os.path.join(path,\"observer_ids.csv\")\n obs_ids_sheet = pandas.read_csv(observer_ids,sep=\";\")\n \n # 1. Step: Read all the XLSX data in the path\n ext = 'xlsx'; xlsx_files = getAllXXXinPath(path_in,ext)\n dataArray = pandas.DataFrame()\n i=1\n for xlsx_file in xlsx_files:\n sys.stdout.write(xlsx_file)\n dataArray_tmp, testArray, extraDataDict = extractExperimentData(os.path.join(path_in,xlsx_file))\n \n newDataArray = dataArray_tmp[['dalt_id','coldef_type','resp.corr_raw','resp.rt_raw','stimFile']]\n \n if \"2. Session\" in extraDataDict:\n sessionID = int(extraDataDict['2. Session'])\n newDataArray['session_id'] = sessionID\n \n if 'group' in extraDataDict:\n obsGroup = str(extraDataDict['group'])\n newDataArray['obsGroup'] = obsGroup\n \n if '0. Participant ID' in extraDataDict:\n obsID = int(extraDataDict['0. Participant ID'])\n \n newDataArray['observer_id'] = obsID\n obs_coldef_type = obs_ids_sheet.loc[obs_ids_sheet['observer_id']==obsID,['observer_coldef_type']]\n newDataArray['observer_coldef_type'] = int(obs_coldef_type['observer_coldef_type'])\n \n dataArray = pandas.concat([dataArray, newDataArray])\n sys.stdout.write(' . ')\n if (i%5)==0: sys.stdout.write('\\n')\n i+=1\n sys.stdout.write('\\n')\n \n # 2. Step: Adapt values to programstandards\n for item in settings.colDefLong2ID:\n dataArray.loc[dataArray['coldef_type'] == item, ['coldef_type']] = settings.colDefLong2ID[item]\n \n for item in settings.dalt2ID:\n dataArray.loc[dataArray['dalt_id'] == item, ['dalt_id']] = settings.dalt2ID[item]\n \n dataArray.loc[dataArray['dalt_id'] == 'none', ['dalt_id']] = 0\n \n \n dataArray = dataArray.rename(columns={'dalt_id': 'dalt_id',\n 'coldef_type': 'coldef_type',\n 'resp.corr_raw': 'is_correct',\n 'resp.rt_raw': 'resp_time',\n 'stimFile': 'filepath'})\n dataArray = dataArray.reset_index()\n \n # Adding set_id, motive_id and variant_id to each file\n for index, row in dataArray.iterrows():\n path_tmp = row['filepath']\n filename = os.path.basename(path_tmp).split('.')[0]\n dict_tmp = getStatsFromFilename(filename)\n imgID_tmp = int(dict_tmp['img_id'])\n \n tempVSDataArray = vs_ids_sheet.loc[vs_ids_sheet['image_id']==imgID_tmp,['set_id','motive_id','variant_id']]\n \n dataArray.at[index,'image_id'] = imgID_tmp\n dataArray.ix[index,'set_id'] = int(tempVSDataArray['set_id'])\n dataArray.ix[index,'motive_id'] = int(tempVSDataArray['motive_id'])\n dataArray.ix[index,'variant_id'] = int(tempVSDataArray['variant_id'])\n\n dataArray.image_id = dataArray.image_id.astype(int)\n dataArray.set_id = dataArray.set_id.astype(int)\n dataArray.motive_id = dataArray.motive_id.astype(int)\n dataArray.variant_id = dataArray.variant_id.astype(int)\n dataArray.is_correct = dataArray.is_correct.astype(bool)\n \n dataArray = dataArray[['image_id','set_id','motive_id','variant_id','dalt_id','coldef_type','is_correct','resp_time','observer_id','observer_coldef_type','session_id','filepath','obsGroup']]\n \n # 3. Saving data to file\n try:\n dataArray.to_csv(os.path.join(path_out, 'visdem-data.csv'),sep=\";\")\n sys.stdout.write(\"Success: ViSDEM data successfully saved in '\"+str(path_out)+\"'.\\n\")\n except Exception as e:\n print(e)", "def import_data(ashrae_dir, filenames=const.NAMES):\n print('Importing data from csv')\n ashrae_dir = pathlib.Path(ashrae_dir)\n data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames}\n\n return data", "def _parse_cvcfolder(self, cvcfolderpath):\n cvcfoldername = os.path.basename(os.path.abspath(cvcfolderpath))\n obsfolderinfo = {}\n cvcextstr = cvcfoldername.split('_')[-1]\n if cvcextstr == 'xst' or cvcextstr == 'xst-SEPTON':\n cvcfoldername_split = cvcfoldername.split('_')\n try:\n (stnid, Ymd, HMS, rcustr, sbstr, intstr, durstr, dirstr, cvcextstr\n ) = cvcfoldername_split\n obsfolderinfo['stnid'] = stnid\n obsfolderinfo['datetime'] = datetime.datetime.strptime(\n Ymd + 'T' + HMS, '%Y%m%dT%H%M%S')\n obsfolderinfo['rcumode'] = rcustr[3:]\n obsfolderinfo['subband'] = sbstr[2:]\n obsfolderinfo['integration'] = float(intstr[3:])\n obsfolderinfo['duration_tot'] = float(durstr[3:])\n obsfolderinfo['pointing'] = dirstr[3:].split(',')\n except:\n raise ValueError(\"Foldername not in xst_ext format.\")\n elif cvcextstr == 'acc':\n dirpat = re.compile(regex_ACCfolder)\n obsdirinfo_m = dirpat.match(cvcfoldername)\n if obsdirinfo_m is None:\n print(\"Cal error\")\n raise ValueError(\n \"Calibration directory does not have correct syntax.\")\n obsdirinfo = obsdirinfo_m.groupdict()\n obsfolderinfo['stnid'] = obsdirinfo['stnid']\n d0 = datetime.datetime(int(obsdirinfo['year']),\n int(obsdirinfo['month']),\n int(obsdirinfo['day']),\n int(obsdirinfo['hour']),\n int(obsdirinfo['minute']),\n int(obsdirinfo['second']))\n obsfolderinfo['sessiontimeid'] = d0\n obsfolderinfo['rcumode'] = obsdirinfo['rcumode']\n obsfolderinfo['subband'] = '0:511'\n obsfolderinfo['integration'] = 1.0\n obsfolderinfo['duration_tot'] = int(obsdirinfo['duration_tot'])\n obsfolderinfo['source'] = obsdirinfo['calsrc']\n obsfolderinfo['pointing'] = \\\n ilisa.monitorcontrol.directions.std_pointings(\n obsfolderinfo['source'])\n else:\n raise ValueError(\"Folder not expected xst or acc format.\")\n obsfolderinfo['datatype'] = cvcextstr\n return obsfolderinfo", "def _download_training_data_from_file_system(self) -> 'DataFrame':\n\n try:\n url = f\"{self._wml_client.wml_credentials['url']}/v2/asset_files/{self.location.path.split('/assets/')[-1]}\"\n # note: stream the whole CSV file\n csv_response = requests.get(url,\n params=self._wml_client._params(),\n headers=self._wml_client._get_headers(),\n stream=True,\n verify=False)\n\n if csv_response.status_code != 200:\n raise ApiRequestFailure(u'Failure during {}.'.format(\"downloading model\"), csv_response)\n\n downloaded_asset = csv_response.content\n # note: read the csv/xlsx file from the memory directly into the pandas DataFrame\n buffer = io.BytesIO(downloaded_asset)\n data = try_load_dataset(buffer=buffer,\n sheet_name=self.auto_pipeline_params.get('excel_sheet', 0),\n separator=self.auto_pipeline_params.get('csv_separator', ','),\n encoding=self.auto_pipeline_params.get('encoding', 'utf-8')\n )\n except (ApiRequestFailure, AttributeError):\n with open(self.location.path, 'rb') as data_buffer:\n data = try_load_dataset(buffer=data_buffer,\n sheet_name=self.auto_pipeline_params.get('excel_sheet', 0),\n separator=self.auto_pipeline_params.get('csv_separator', ','),\n encoding=self.auto_pipeline_params.get('encoding', 'utf-8')\n )\n\n return data", "def samplesheet_path_fixture(fixtures_dir: Path) -> Path:\n _file_path = fixtures_dir / \"samplesheet.csv\"\n return _file_path" ]
[ "0.60329616", "0.5951354", "0.5933117", "0.59185493", "0.5806007", "0.57351774", "0.5663706", "0.5627654", "0.562072", "0.55951595", "0.55628073", "0.55555654", "0.5554841", "0.5539798", "0.5537601", "0.5528273", "0.5500308", "0.5499843", "0.54943573", "0.54920584", "0.5481313", "0.5477954", "0.5464948", "0.5448699", "0.5443295", "0.543615", "0.5419603", "0.5418583", "0.53786147", "0.5378308", "0.5374725", "0.5370821", "0.53588766", "0.53225124", "0.5322486", "0.5316816", "0.53120655", "0.5309933", "0.5300171", "0.5296938", "0.5259294", "0.5255726", "0.5243017", "0.5242342", "0.52363104", "0.5220255", "0.5214589", "0.5208546", "0.52029103", "0.5191672", "0.51885176", "0.51865554", "0.51826715", "0.51756173", "0.51736134", "0.5168162", "0.5154398", "0.515217", "0.51487744", "0.5148555", "0.51423895", "0.513938", "0.51242256", "0.511772", "0.51157045", "0.5115495", "0.51116896", "0.51083535", "0.510579", "0.51015896", "0.50975156", "0.5091819", "0.50912344", "0.5078556", "0.5077795", "0.50720483", "0.50703526", "0.5069562", "0.50686264", "0.5061239", "0.50538933", "0.5051557", "0.50485814", "0.50443673", "0.5039453", "0.50369984", "0.503626", "0.50361586", "0.50312346", "0.502913", "0.5028441", "0.5024879", "0.50231564", "0.5022445", "0.5021005", "0.50186163", "0.5016982", "0.5011931", "0.50088364", "0.5005458" ]
0.6474041
0
Description When is given a directory path that has no forecast or observed as parent folder and csv file with not desired name Expected Result returns dictionary with values empty
def test_invalid_file_path(self): # Test with an invalid file path #setup filepath = ".data/kano/test.txt" expected_result = { "type": "", "city": "", "state": "", "coordinates": ["", ""], '': {} } #result assert extractor.get_metadata_from_filepath(filepath) == expected_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_observed_folder_path(self):\n\n #setup\n filepath = \".data/observed/Abadia-BA_-11.56_-37.52.csv\"\n expected_result = {\n \"type\": \"observed\",\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": ['-11.56', '-37.52'],\n \"observed\": {}\n }\n \n #result\n assert extractor.get_metadata_from_filepath(filepath) == expected_result", "def test_forecast_folder_path(self):\n\n #setup\n filepath = \".data/forecast/Kano-KN_-9.09_7.39.json\"\n expected_result = {\n \"type\": \"forecast\",\n \"city\": \"Kano\",\n \"state\": \"KN\",\n \"coordinates\": ['-9.09', '7.39'],\n \"forecast\": {}\n }\n #result\n assert extractor.get_metadata_from_filepath(filepath) == expected_result", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def loadPredictions(self):\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n message = 'Select folder'\n folderDialog = QtWidgets.QFileDialog(self, message, dir_path)\n folderDialog.setFileMode(QtWidgets.QFileDialog.Directory)\n folderDialog.setOption(QtWidgets.QFileDialog.DontUseNativeDialog, True)\n fileName = [] # Returns a list of the directory\n\n # Plot the window to select the csv file\n if folderDialog.exec_():\n fileName = folderDialog.selectedFiles()\n # Debug\n #fileName = ['/media/dimitris/TOSHIBA EXT/Image_Document_Classification/PMC-Dataset']\n print(fileName)\n if os.path.isdir(str(fileName[0])):\n self.loadFolder(str(fileName[0]))\n else:\n message = 'Only csv files'\n self.messageBox(message)\n return\n\n self.selectFigures()", "def add_path_dict(input_dict: dict, start_path: str, file_path: str):\n # Determine relative path\n relpath = os.path.relpath(file_path, start=start_path)\n\n # If only file remaining, store in dict, otherwise go 1 level deeper\n if relpath == os.path.basename(file_path):\n input_dict[os.path.splitext(relpath)[0]] = pd.read_csv(file_path,\n sep='\\t')\n else:\n parent_dir = relpath.split(os.sep)[0]\n if parent_dir not in input_dict.keys():\n input_dict[parent_dir] = {}\n add_path_dict(input_dict=input_dict[parent_dir],\n start_path=os.path.join(start_path, parent_dir),\n file_path=file_path)", "def read_files(path, file_name):\n\n if os.path.exists(\n r'{}\\{}_dynamic.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_static.csv'.format(path, file_name)) and os.path.exists(\n r'{}\\{}_ego.csv'.format(path, file_name)):\n with open(r'{}\\{}_dynamic.csv'.format(path, file_name)) as tmp_dynamic:\n dynamic_csv = pd.read_csv(tmp_dynamic)\n print('Dynamic csv file found')\n with open(r'{}\\{}_static.csv'.format(path, file_name)) as tmp_static:\n static_csv = pd.read_csv(tmp_static)\n print('Static csv file found')\n with open(r'{}\\{}_ego.csv'.format(path, file_name)) as tmp_ego:\n ego_csv = pd.read_csv(tmp_ego)\n print('Ego csv file found')\n return ego_csv, dynamic_csv, static_csv\n\n else:\n print('No available data')\n sys.exit(0)", "def extract_csv_for_date(config, data_date): \n \n ### TODO: test config separately \n \n # print(config.DATA_ROOT)\n # print(data_date)\n \n # Raise an exception if attribute DATA_ROOT does not exist\n if not 'DATA_ROOT' in vars(config):\n raise AttributeError(\"Attribute DATA_ROOT does not exist\")\n \n # Raise an exception if DATA_ROOT does not exist\n if not os.path.exists(config.DATA_ROOT):\n raise NotADirectoryError(\"The path \" + config.DATA_ROOT + \" not found\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'METER_CHANNEL_DICT' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n # Raise an exception if attribute METER_CHANNEL_DICT does not exist\n if not 'SAMPLE_TIME' in vars(config):\n raise AttributeError(\"Attribute METER_CHANNEL_DICT does not exist\")\n \n data_date_dt = parse(data_date)\n \n if data_date_dt > config.DATA_END_DATE:\n raise ValueError(\"data_date entered is greater than the DATA_END_DATE: \" + \n str(config.DATA_END_DATE))\n \n if data_date_dt < config.DATA_START_DATE:\n raise ValueError(\"data_date entered is less than the DATA_START_DATE: \" + \n str(config.DATA_START_DATE))\n \n # Get the year, month and and day from date entered\n data_year = data_date_dt.year\n data_month = data_date_dt.month\n data_day = data_date_dt.day\n \n # Get the corresponding path in the directory to look for the data for the day\n data_path = os.path.join(config.DATA_ROOT, str(data_year), \"{:02}\".format(data_month), \"{:02}\".format(data_day))\n # print(data_path)\n # Find the count of meters\n meter_count = len(config.METER_CHANNEL_DICT)\n\n # Dictionary to store the names of the resulting csv files\n meter_csv_names = {}\n \n # Get the down-sampling time\n sample_time = config.SAMPLE_TIME\n \n # Create a dictionary with keys are meter names and values as dataframes \n # containing the data for the day\n meter_collection = {}\n \n # for meter_name in config.METER_CHANNEL_DICT:\n # # Create an empty dataframe, the columns will be created later\n # meter_collection[meter_name] = pd.DataFrame()\n\n #print(meter_collection)\n if os.path.exists(data_path):\n # Walk through all the files in the directory for the day's data\n for dirpath, dirnames, files in os.walk(data_path, topdown=True):\n # `files` contains the names of all the files at the location\n if len(files) == 0:\n print(\"No files found for day: \" + data_path)\n continue\n for filename in files:\n # Get the netcdf files, these are files with `.nc` extension\n if filename.lower().endswith('.nc'):\n # For the particular file, find out the corresponding meter and channel \n [meter, channel] = extract_ppty(filename, config.METER_CHANNEL_DICT.keys())\n # Create an entry in the `meter_collection` dict if it does not exist yet\n if meter not in meter_collection:\n meter_collection[meter] = pd.DataFrame()\n # Form the resulting csv name from the meter name if it doesnt exist yet\n # They are of the type - meter_name@Timestamp@Duration@Frequency\n # For e.g.: PQube3@2017-11-01T080002Z@[email protected]\n #print(meter, channel)\n if meter not in meter_csv_names:\n meter_csv_names[meter] = '@'.join([meter, '@'.join(filename.split('@')[1:4])])[:-3] + '.csv'\n #print(meter_csv_names)\n # Get the full path of the csv\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # Only extract if not already extracted to csv\n if (not os.path.isfile(csv_name)):\n # Get the dataframe containing time and channel values\n channel_df = extract_data(dirpath, filename)\n # Give the dataframe column a name\n channel_df.columns = [channel]\n # Down-sample the data to the sampling time intended\n channel_resampled = data_resample(channel_df, sample_time)\n # If our meter dataframe is empty so far, i.e. if this is the \n # first channel being entered, then create a copy of the \n # resampled dataframe\n if meter_collection[meter].empty:\n meter_collection[meter] = channel_resampled.copy()\n ####################### \n # This `else` clause handles two cases:\n # 1. If the dataframe is not empty, then add other columns to\n # the dataframe. (the else case)\n # 2. Some days have data downloaded more than once, this means \n # that channels can occur more than once. (like 05/21/2018)\n #######################\n else:\n # If the channel already exists in the dataframe\n # then either the other file has updated data or \n # subsequent data. \n if channel in meter_collection[meter].columns:\n # Get index from total dataframe \n idx_1 = meter_collection[meter].index\n # Get index from file dataframe\n idx_2 = channel_resampled.index\n # Compare the two, if the index is contained within,\n # then **update** the channel's value for file's indices. \n if np.all(np.isin(idx_2, idx_1)):\n meter_collection[meter][channel].loc[idx_2] = channel_resampled.values.tolist()\n # If the index is not contained, append the file df to\n # the total dataframe\n else:\n meter_collection[meter] = meter_collection[meter].append(channel_resampled, sort=False)\n meter_collection[meter].sort_index(inplace=True)\n #######################\n # This data is resampled a second time to handle two cases:\n # 1. When appending a resampled dataframe to an already resampled dataframe, the last\n # index of the original dataframe and the first index of the new dataframe can have\n # the same time. Resampling the appended dataframe will eliminate the repetitions.\n # 2. If the new dataframe to be appended starts at a much later time, resampling the\n # appended dataframe will create rows of missing data (NaN) at the times with no\n # measurement values. This makes it easier to detect missing measurement values and\n # perform data imputation at a later phase.\n #######################\n meter_collection[meter] = data_resample(meter_collection[meter], sample_time)\n # If the channel does not already exist, then add the\n # file dataframe to the total df. \n else:\n meter_collection[meter] = meter_collection[meter].join(channel_resampled, how='outer')\n else:\n print(\"Path not found: \" + data_path)\n \n # Perform data imputation wherrever needed\n # print(meter_collection)\n meter_collection = data_impute(meter_collection)\n \n # Write the total dataframes to csv file\n for meter in meter_collection:\n # Reorganize the order of columns to match the database tables \n meter_channels = config.METER_CHANNEL_DICT[meter]\n # meter_collection[meter].reset_index(inplace=True)\n meter_collection[meter] = meter_collection[meter].reindex(columns=meter_channels[1:])\n csv_name = os.path.join(data_path, meter_csv_names[meter])\n # print(csv_name)\n # Only write csv if it does not exist yet\n if(not os.path.isfile(csv_name)):\n meter_collection[meter].to_csv(csv_name, header=False)\n\n return meter_csv_names", "def test_dfi_raises_if_folder_missing(self):\n with TemporaryDirectory() as tmpdirname:\n # start with empty project (no data/coefficients subdirectory)\n with raises(SmifDataNotFoundError):\n CSVDataStore(tmpdirname)", "def load_data_files() -> Dict[str, Path]:\n default_path = paths.MISCELLANEOUS_DIRECTORY / \"portfolio\"\n custom_exports = (\n get_current_user().preferences.USER_PORTFOLIO_DATA_DIRECTORY / \"optimization\"\n )\n data_files = {}\n for directory in [default_path, custom_exports]:\n for file_type in [\"xlsx\", \"ini\"]:\n for filepath in Path(directory).rglob(f\"*.{file_type}\"):\n if filepath.is_file() and \"example\" not in filepath.name:\n data_files[filepath.name] = filepath\n\n return data_files", "def get_data(self):\n \n with os.scandir(self.file_path) as collection_of_files:\n files_found = [file.name.split('.')[0] for file in collection_of_files \n if (file.name.split('.')[0].lower().strip() in self._data_requirements.required_file_names \n and file.name.endswith('.csv'))]\n\n self.check_missing_files(files_found)\n \n self._data = DictObjectView(self.read_in_files(files_found))", "def __init__(self, root_dir):\n self.paths = glob.glob(root_dir + \"/*.csv\")\n self.target = 'Default'\n # Grouping variable names", "def __setupPaths(self):\n self.csv_file_names = filter(\n (lambda x: not re.match(\".*\\\\.csv$\", x) is None),\n os.listdir(self.path))", "def getAllDataFromDirectory(prediction_directory, actual_directory, write_directory, cities_file, utc_offset = False):\n city_dictionary = getCities(cities_file)\n actualGetter = getActualWeather(actual_directory, city_dictionary, get_API_keys())\n #For each day and for each city, get all the data and put it into a spreadsheet.", "def update_csv():\n return os.listdir('./data')", "def _read_source_data(self) -> pd.DataFrame:\n df = None\n try:\n logger.info(\"reading csv base file under simulation folder\", class_name=self.__class__.__name__)\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/simulation/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.warning(\"base file not processed, trying under unprocessed folder\",\n class_name=self.__class__.__name__)\n try:\n df = pd.read_csv(\n f\"{Path(__file__).parents[1]}/data/unprocessed/{self.base_data_filename}\"\n )\n except FileNotFoundError:\n logger.error(\"base file not found... exiting\", class_name=self.__class__.__name__)\n exit(1)\n return df", "def test_process(self, tmp_path):\n export_dir = tmp_path / 'export'\n export_dir.mkdir()\n\n process(['raw_data/small_raw_data_0.csv',\n 'raw_data/small_raw_data_1.csv',\n # File 2 does not exist.\n 'raw_data/small_raw_data_2.csv',\n 'raw_data/small_raw_data_3.csv'],\n SIGNALS,\n ['median_home_dwell_time',\n 'completely_home_prop_7dav'],\n ['state'],\n export_dir)\n\n expected = {\n 'wip_median_home_dwell_time': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [6, 3.5],\n 'se': [None, 0.5],\n 'sample_size': [1, 2]\n }),\n 'completely_home_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.15, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'part_time_work_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.35, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'full_time_work_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.45, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'median_home_dwell_time_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [4.5, 3.5, 7.5],\n 'se': [1.5, 0.5, 0.5],\n 'sample_size': [2, 2, 2]\n }),\n 'wip_completely_home_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.1, 0.055, 0.15],\n 'se': [0.05, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n }),\n 'part_time_work_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.25, 0.055, 0.25],\n 'se': [0.1, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n }),\n 'full_time_work_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.35, 0.055, 0.35],\n 'se': [0.1, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n })\n }\n actual = {signal: pd.read_csv(\n export_dir / f'20200612_state_{signal}.csv')\n for signal in expected}\n for signal in expected:\n pd.testing.assert_frame_equal(expected[signal], actual[signal])", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def readCsv(variables, path, pathCsv, estacion):\n # os.makedirs('../data/totalData/')\n dataVa = df.DataFrame()\n variables = variables\n mypath = path\n patron = re.compile(variables + '_'+estacion+'_\\d\\d\\d\\d-\\d\\d-\\d\\d' + '.*')\n for base, dirs, filess in os.walk(mypath, topdown=False):\n filess = sorted(filess)\n for value in filess:\n if patron.match(value) != None:\n tempData = df.read_csv(mypath + value)\n #tempData = completeMet(tempData)\n tempData = tempData.iloc[0:24, :]\n dataVa = concat([tempData, dataVa], axis=0)\n dataVa = dataVa.reset_index()\n dataVa = dataVa.drop(labels='index', axis=1)\n dataVa.to_csv(pathCsv + variables + '_'+ estacion +'_total.csv', encoding='utf-8', index=False)\n dataVa = df.DataFrame()", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def collect_data(folder):\n folder = pathlib.Path(folder)\n cases = []\n for case_folder in folder.iterdir():\n print(f'start collecting data for location {case_folder.name}')\n for tr_folder in case_folder.iterdir():\n case = calculate_values(tr_folder)\n cases.append(case)\n \n df = pd.DataFrame(cases)\n print(folder.parent.joinpath(f'{folder.stem}.csv'))\n df.to_csv(folder.parent.joinpath(f'{folder.stem}.csv'), index=False)", "def read_test_rf_csv():\n if os.path.exists(\"test_rf.csv\"):\n #print (\"--testing CSV imported\\n\")\n results = pd.read_csv(\"test_rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def Get_Player_Historic_Data(data_path, player_history_path): \n players = os.listdir(player_history_path) # Lists All The Player Folders in the Dir\n players_data = pd.read_csv(data_path + 'players_raw.csv')\n for ind in pbar(players_data.index): # ind in [0:693:1]\n # Get the Seasonal History\n player_path = players_data['first_name'][ind] + '_' + players_data['second_name'][ind] + '_' + str(players_data['id'][ind]) # Create player_history_path\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n # print(json.keys())\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n os.makedirs(player_history_path + player_path, exist_ok = True) # Create a new path for the player \n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his syeasonal history\n else: # However, if the player is within the existing directory\n if not os.path.isfile(player_history_path + player_path + \"/history.csv\"): # And a history file does not exist\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID\n history_df = pd.DataFrame(json['history_past']) # Extract history\n if not history_df.empty: # If history returned\n history_df.to_csv(player_history_path + player_path + '/history.csv', encoding='utf-8', index = False) # And write his seasonal history\n # Get the Gameweek History\n json = Access_URL(url = \"https://fantasy.premierleague.com/api/element-summary/{}/\".format(str(players_data['id'][ind]))) # Feed in Player ID \n history_df_gw = pd.DataFrame(json['history']) # Extract Gameweek History\n if not history_df_gw.empty: # If history returned\n if player_path not in players: # If the player (read from players_raw.csv) is not within the existing directory, continue: \n os.makedirs(player_history_path + player_path, exist_ok = True) # Create the directory, exit\n history_df_gw.to_csv(player_history_path + player_path + '/gw.csv', encoding='utf-8', index = False) # Write the CSV", "def create_path_dict(save_path):\n act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'sigmoid']),\n sorted(['relu', 'antirelu', 'identity', 'tanh']),\n sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),\n sorted(['relu', 'identity', 'sigmoid', 'tanh']),\n sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),\n ['relu'],\n ['sigmoid'],\n ['tanh'],\n ['antirelu'],\n ['None']]\n # ['identity']]\n\n act_fn = ['_'.join(act) for act in act_fn]\n path_dict = defaultdict(list)\n for (filepath, dirname, filename) in os.walk(save_path):\n if 'results.json' in filename:\n for act in act_fn:\n temp = filepath.split('/')\n if act == temp[-1] or act == temp[-2]:\n path_dict[act].append(filepath)\n print(path_dict)\n return path_dict", "def existing_data(self):\n # Set the directory and file name\n data_summary_dir = op.join('../logs', self.name, 'data_summary')\n file_name = 'Train_Test_Summary_generative.csv'\n\n # Read the csv and obtain the train data list\n df = pd.read_csv(op.join(data_summary_dir, file_name))\n train_data = df['Train Data'].dropna().values.tolist()\n test_data = df['Test Data'].dropna().values.tolist()\n\n train_data_list, test_data_list = [], []\n for single_train in train_data:\n data_name = single_train.split('_')[0]\n if data_name == 'LTRC':\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4]\n else:\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4] + '_' + single_train.split('_')[5]\n full_data_name = single_train.split('_')[0] + '_' + single_train.split('_')[1] + '_' + single_train.split('_')[2] + '_' + series\n train_data_list.append(full_data_name)\n\n for single_test in test_data:\n data_name = single_test.split('_')[0]\n if data_name == 'LTRC':\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4]\n else:\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4] + '_' + single_test.split('_')[5]\n full_data_name = single_test.split('_')[0] + '_' + single_test.split('_')[1] + '_' + single_test.split('_')[2] + '_' + series\n test_data_list.append(full_data_name)\n\n # Obtain the label map and CT list and file names\n label_map_list = glob(op.join(self.save_root_dir, 'source_data_2', '*'))\n ct_list = glob(op.join(self.save_root_dir, 'target_data_2', '*'))\n\n label_map_files = [single_file.split('/')[-1] for single_file in label_map_list]\n ct_files = [single_file.split('/')[-1] for single_file in ct_list]\n label_map_files.sort(), ct_files.sort()\n\n # Initialize empty list\n existing_train_lm, existing_train_ct = [], []\n existing_test_lm, existing_test_ct = [], []\n\n for single_lm, single_ct in zip(label_map_files, ct_files):\n\n ct_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\n lm_data_name = single_lm.split('_')[0] + '_' + single_lm.split('_')[1] + '_' + single_lm.split('_')[2]\n\n assert ct_data_name == lm_data_name, 'Data is not the same.'\n\n data_name = single_ct.split('_')[0]\n if data_name == 'LTRC':\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4]\n else:\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4] + '_' + single_ct.split('_')[5]\n full_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\\\n + '_' + series\n\n if full_data_name in train_data_list:\n existing_train_lm.append(single_lm)\n existing_train_ct.append(single_ct)\n if full_data_name in test_data_list:\n existing_test_lm.append(single_lm)\n existing_test_ct.append(single_ct)\n existing_train_data = [existing_train_lm, existing_train_ct]\n existing_test_data = [existing_test_lm, existing_test_ct]\n return existing_train_data, existing_test_data", "def parse_folder(self, path):\n\n for filename in os.listdir(path):\n self.parse_file(os.path.join(path, filename), filename)\n return self.country_dict, self.hre_dict, self.name_dict", "def check_intermediate_file(cache_dir, pull_start_dates):\n previous_dfs = {}\n for test_type in TEST_TYPES:\n previous_dfs[test_type] = None\n if pull_start_dates[test_type] is not None:\n pull_start_dates[test_type] = datetime.strptime(\n pull_start_dates[test_type], '%Y-%m-%d')\n\n for filename in os.listdir(cache_dir):\n if \".csv\" in filename:\n test_type = \"_\".join(filename.split(\"_\")[:2])\n date_string = filename.split(\"_\")[4].split(\".\")[0]\n pull_start_dates[test_type] = datetime.strptime(date_string,\n '%Y%m%d') + timedelta(days=1)\n previous_dfs[test_type] = pd.read_csv(join(cache_dir, filename),\n sep=\",\", parse_dates=[\"timestamp\"])\n return previous_dfs, pull_start_dates", "def test_search_file(self):\n base_dir = join(get_current_path(), 'samples', 'base_dir1')\n output_dir = join(get_current_path(), 'samples', 'base_dir1', 'result')\n files = search_files(base_dir, output_dir)\n self.assertTrue(self.verify_sub_folders(list(files.keys())))\n\n # sub folders under Concord is not counted, only files\n self.assertEqual(len(files['Concord']), 5)\n self.assertEqual(len(files['ListCo Equity']), 1)\n self.assertEqual(len(files['CLO Equity']), 2)\n self.assertEqual(files['ListCo Equity'][0], join(base_dir, 'ListCo Equity', 'Positions1219.xlsx'))", "def import_to_df(\n list: str,\n path: str = \"competition_data\"\n ) -> dict:\n\n df_dict = {}\n for file in list:\n if 'csv' not in file:\n continue\n df = pd.read_csv(\"/\".join([path, file]))\n # remove extension\n name = file.split('.')[0]\n df_dict[name] = df\n \n return df_dict", "def csv_path(name):\n return \"./data/%s\" % name", "def _get_model_val(models_name, data_dir, val_source='test'):\n model_val = {}\n for model in models_name:\n mypath = data_dir + '/' + model\n only_files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n select_files = [val_source in x for x in only_files]\n only_files = list(compress(only_files, select_files))\n if type(only_files) == list:\n for name_file in only_files:\n df_name = name_file.replace('.csv', '')\n model_val[model + '_' + df_name] = pd.read_csv(mypath + '/' + name_file)\n else:\n df_name = only_files.replace('.csv', '')\n model_val[model + '_' + df_name] = pd.read_csv(mypath + '/' + only_files)\n return model_val", "def get_csv_paths(top_path):\n # exclude is a set holding all dirnames to be excluded\n exclude = {\"fails\", \"archive\", \"exclude\", \"fail\", \"backup\"}\n # files is a dict that defaults to lists, so values can be appended to keys\n files = defaultdict(list)\n for dirpath, dirnames, filenames in os.walk(top_path, topdown=True):\n dirnames[:] = [d for d in dirnames if d.lower() not in exclude]\n\n for filename in filenames:\n\n # gather .csv and .tsv files\n if \".csv\" in str(filename).lower() or \".tsv\" in str(filename).lower():\n # Add filename to the key of dirpath\n files[dirpath].append(filename)\n return files", "def get_files(input_dir):\n file_rep = { \"tars\" : [] }\n \n files = os.listdir(input_dir)\n \n the_file, the_date = find_bootstrap(files)\n \n #add index file in file_rep\n file_rep['index'] = the_file\n file_rep['date'] = the_date\n \n pattern = \"ncep_forecast_%s_(?P<name>\\S+).tar\" % (the_date)\n \n the_re = re.compile(pattern)\n\n for the_file in files:\n matched = the_re.match(the_file)\n if matched:\n print(\"matched %s\" % (matched.group(\"name\")))\n file_rep['tars'].append(the_file)\n \n return file_rep", "def test_invalid_csv(self):\n cwd=os.getcwd()\n url=\"http://stackoverflow.com/questions/17730173/python-cant-get-full-path-name-of-file\"\n with self.assertRaises(TypeError):\n requester.url_to_csv(url,\"{0}/{1}.csv\".format(cwd,'tester'))", "def analyzeViSDEMData(dict):\n \n if 'path_in' in dict:\n path_in = dict['path_in']\n else:\n print(\"Caution: No path for input folder containing the data has been defined. Please define path to folder by dict['path_in']=path_in\") \n return\n \n path_out_default = '../colordeficiency-data/' \n if 'path_out' in dict:\n path_out = dict['path_out']\n else:\n print(\"Caution: No path for output folder where the data should be stored has been defined. Using default output path instead: \"+str(path_out_default))\n path_out = path_out_default\n \n if 'round' in dict:\n round = dict['round']\n else:\n print(\"Error: You have to chose a round first.\")\n \n path = os.path.join(os.path.dirname(os.path.abspath(os.path.join(__file__,os.pardir))),'colordeficiency-data')\n \n # 0. Step: Get all the relevant information, i.e. motive_ids, obs_col_defs etc.\n if round == 1:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids.csv\")\n elif round == 2:\n visualsearch_ids = os.path.join(path,\"visualsearch_ids_2.csv\")\n \n vs_ids_sheet = pandas.read_csv(visualsearch_ids,sep=\";\")\n \n # Get all the relevant information about the observers, i.e. obs_col_defs etc.\n observer_ids = os.path.join(path,\"observer_ids.csv\")\n obs_ids_sheet = pandas.read_csv(observer_ids,sep=\";\")\n \n # 1. Step: Read all the XLSX data in the path\n ext = 'xlsx'; xlsx_files = getAllXXXinPath(path_in,ext)\n dataArray = pandas.DataFrame()\n i=1\n for xlsx_file in xlsx_files:\n sys.stdout.write(xlsx_file)\n dataArray_tmp, testArray, extraDataDict = extractExperimentData(os.path.join(path_in,xlsx_file))\n \n newDataArray = dataArray_tmp[['dalt_id','coldef_type','resp.corr_raw','resp.rt_raw','stimFile']]\n \n if \"2. Session\" in extraDataDict:\n sessionID = int(extraDataDict['2. Session'])\n newDataArray['session_id'] = sessionID\n \n if 'group' in extraDataDict:\n obsGroup = str(extraDataDict['group'])\n newDataArray['obsGroup'] = obsGroup\n \n if '0. Participant ID' in extraDataDict:\n obsID = int(extraDataDict['0. Participant ID'])\n \n newDataArray['observer_id'] = obsID\n obs_coldef_type = obs_ids_sheet.loc[obs_ids_sheet['observer_id']==obsID,['observer_coldef_type']]\n newDataArray['observer_coldef_type'] = int(obs_coldef_type['observer_coldef_type'])\n \n dataArray = pandas.concat([dataArray, newDataArray])\n sys.stdout.write(' . ')\n if (i%5)==0: sys.stdout.write('\\n')\n i+=1\n sys.stdout.write('\\n')\n \n # 2. Step: Adapt values to programstandards\n for item in settings.colDefLong2ID:\n dataArray.loc[dataArray['coldef_type'] == item, ['coldef_type']] = settings.colDefLong2ID[item]\n \n for item in settings.dalt2ID:\n dataArray.loc[dataArray['dalt_id'] == item, ['dalt_id']] = settings.dalt2ID[item]\n \n dataArray.loc[dataArray['dalt_id'] == 'none', ['dalt_id']] = 0\n \n \n dataArray = dataArray.rename(columns={'dalt_id': 'dalt_id',\n 'coldef_type': 'coldef_type',\n 'resp.corr_raw': 'is_correct',\n 'resp.rt_raw': 'resp_time',\n 'stimFile': 'filepath'})\n dataArray = dataArray.reset_index()\n \n # Adding set_id, motive_id and variant_id to each file\n for index, row in dataArray.iterrows():\n path_tmp = row['filepath']\n filename = os.path.basename(path_tmp).split('.')[0]\n dict_tmp = getStatsFromFilename(filename)\n imgID_tmp = int(dict_tmp['img_id'])\n \n tempVSDataArray = vs_ids_sheet.loc[vs_ids_sheet['image_id']==imgID_tmp,['set_id','motive_id','variant_id']]\n \n dataArray.at[index,'image_id'] = imgID_tmp\n dataArray.ix[index,'set_id'] = int(tempVSDataArray['set_id'])\n dataArray.ix[index,'motive_id'] = int(tempVSDataArray['motive_id'])\n dataArray.ix[index,'variant_id'] = int(tempVSDataArray['variant_id'])\n\n dataArray.image_id = dataArray.image_id.astype(int)\n dataArray.set_id = dataArray.set_id.astype(int)\n dataArray.motive_id = dataArray.motive_id.astype(int)\n dataArray.variant_id = dataArray.variant_id.astype(int)\n dataArray.is_correct = dataArray.is_correct.astype(bool)\n \n dataArray = dataArray[['image_id','set_id','motive_id','variant_id','dalt_id','coldef_type','is_correct','resp_time','observer_id','observer_coldef_type','session_id','filepath','obsGroup']]\n \n # 3. Saving data to file\n try:\n dataArray.to_csv(os.path.join(path_out, 'visdem-data.csv'),sep=\";\")\n sys.stdout.write(\"Success: ViSDEM data successfully saved in '\"+str(path_out)+\"'.\\n\")\n except Exception as e:\n print(e)", "def read_weatherstations(path_to_data):\n namedict = read_weatherstationnames(path_to_data)\n stations = {}\n for i in namedict:\n filename = namedict[i].replace(' ', '_') + '.csv'\n print(\"Reading\", filename)\n ws = read_station_csv(os.path.join(path_to_data, filename))\n stations[i] = ws\n return stations", "def _get_csv_path(name):\n return os.path.join(cwd, 'output/app_info', name)", "def read_in_files(self, files_found):\n \n file_data_dict = {file.lower().strip():self.utilities.catch_exception(pd.read_csv, self.file_path + file + '.csv')\n for file in files_found}\n \n load_fail_files = [k for k,v in file_data_dict.items() if not isinstance(v, pd.DataFrame)]\n \n if load_fail_files:\n load_fail_files_string = ',\\n'.join(load_fail_files)\n raise ValueError(f\"Unable to load the following files: \\n\\n{load_fail_files_string}\\n\\nPlease check the data format in these files\")\n \n return file_data_dict", "def get_data(self):\r\n\r\n # Find the absolute path for the root dir (04-Decision-Science)\r\n # Uses __file__ as absolute path anchor\r\n root_dir = os.path.abspath('')\r\n\r\n # Use os library for Unix vs. Widowns robustness\r\n xls_path = os.path.join(root_dir, 'data')\r\n\r\n file_names = [f for f in os.listdir(csv_path) if f.endswith('.xls')]\r\n\r\n def key_from_file_name(f):\r\n if f[-4:] == '.xls':\r\n return f[:-4]\r\n\r\n # Create the dictionary\r\n data = {}\r\n for f in file_names:\r\n data[key_from_file_name(f)] = pd.read_excel(os.path.join(xls_path, f))", "def read_rf_csv():\n if os.path.exists(\"rf.csv\"):\n #print (\"--decision trees CSV imported\\n\")\n results = pd.read_csv(\"rf.csv\", index_col=0)\n else:\n print(\"log not found\")\n\n return results", "def get_data( name=None, force_download=False, version=19, target_extension='.csv' ):\n os.makedirs(DATA_PATH, exist_ok=True)\n\n def download_data( version ):\n url = \"https://ndownloader.figshare.com/articles/14766102/versions/\" + str(version)\n target_file_name = \"14766102.zip\"\n target_file_name_path = tf.keras.utils.get_file(target_file_name, url,\n cache_subdir=DATA_PATH, extract = True )\n os.remove( DATA_PATH + target_file_name )\n\n if force_download:\n download_data( version = version )\n\n\n files = []\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if len( files ) == 0 :\n download_data( version = version )\n for fname in os.listdir(DATA_PATH):\n if ( fname.endswith(target_extension) ) :\n fname = os.path.join(DATA_PATH, fname)\n files.append(fname)\n\n if name == 'all':\n return files\n\n datapath = None\n\n for fname in os.listdir(DATA_PATH):\n mystem = (Path(fname).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n mystem = (Path(mystem).resolve().stem)\n if ( name == mystem and fname.endswith(target_extension) ) :\n datapath = os.path.join(DATA_PATH, fname)\n\n if datapath is None:\n raise ValueError('File doesnt exist. Options: ' , os.listdir(DATA_PATH))\n return datapath", "def read_data(name: str) -> pd.DataFrame:\n import_dir = Path.cwd().joinpath('eikon_data_files')\n\n path = Path.joinpath(import_dir, Path(name))\n if path.exists():\n return pd.read_csv(path, sep=',')\n else:\n print('File type \"' + name + '.csv' + ' does not exist. Aborted.')\n quit()", "def test_divide_csv_daily(self):\n\n with tempfile.TemporaryDirectory() as td:\n filename = \"storage_data.csv\"\n file_path = f\"{td}/{filename}\"\n with patch(\"masu.external.downloader.ocp.ocp_report_downloader.pd\") as mock_pd:\n with patch(\n \"masu.external.downloader.ocp.ocp_report_downloader.utils.detect_type\",\n return_value=(\"storage_usage\", None),\n ):\n dates = [\"2020-01-01 00:00:00 +UTC\", \"2020-01-02 00:00:00 +UTC\"]\n mock_report = {\n \"interval_start\": dates,\n \"persistentvolumeclaim_labels\": [\"label1\", \"label2\"],\n }\n df = pd.DataFrame(data=mock_report)\n mock_pd.read_csv.return_value = df\n daily_files = divide_csv_daily(file_path, self.ocp_manifest_id)\n self.assertNotEqual([], daily_files)\n self.assertEqual(len(daily_files), 2)\n gen_files = [\n f\"storage_usage.2020-01-01.{self.ocp_manifest_id}.0.csv\",\n f\"storage_usage.2020-01-02.{self.ocp_manifest_id}.0.csv\",\n ]\n expected_dates = [datetime.strptime(date[:10], \"%Y-%m-%d\") for date in dates]\n expected = [\n {\"filename\": gen_file, \"filepath\": f\"{td}/{gen_file}\", \"date\": expected_dates[i]}\n for i, gen_file in enumerate(gen_files)\n ]\n for expected_item in expected:\n self.assertIn(expected_item, daily_files)", "def route_data(route):\n os.chdir(\"../Data/test\") #change to whatever directory your data files are stored in\n with open(\"../Sorted Data/\"+str(route)+\"_data.csv\",\"w\",newline=\"\") as result_file: #storing resulting data in csv file in different directory\n wr=csv.writer(result_file, dialect='excel') #setting up csv writer\n for file in glob.glob(\"*.csv\"): #looping through raw data files\n reader=csv.reader(open(file))\n for line in reader:\n if extract_bus_route(line[3])==route: #extract_bus_route returns the bus route from journey pattern id (col D)\n wr.writerow(line)", "def _unit_test_only_get_category_map_files(self):\n result = {}\n for key in self.__all_model_categories:\n category_file_name = '%s.json' % self._get_category_file_prefix(key)\n category_file_path = '%s%s' % (self.__category_modules_dir_name, category_file_name)\n result[key] = category_file_path\n return result", "def getFake(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/Fake.csv\")", "def read_outputs(self):\n out_dir = 'example/output/pm_abcd_mrtm_watch_1971_2001/'\n out_file_names = glob.glob('{}*.csv'.format(out_dir))\n\n out_files = {}\n for f in out_file_names:\n df = pd.read_csv(f)\n out_files[f] = df\n\n return(out_files)", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def preprocess_files(file_path):\n # checking your current working directory\n cur_dir = os.getcwd()\n\n # Get your current folder and sub folder event data\n data_dir = os.path.join(cur_dir, 'event_data')\n\n # Create a for loop to create a list of files and collect each\n # file_path\n file_path_list = []\n for root, dirs, files in os.walk(data_dir):\n # join the file path and roots with the subdirectories using\n # glob\n file_path_list = glob.glob(os.path.join(root, '*'))\n\n full_data_rows_list = []\n\n # for every file_path in the file path list collect records\n for f in file_path_list:\n\n # reading csv file\n with open(f, 'r', encoding='utf8', newline='') as csvfile:\n\n # creating a csv reader object\n csvreader = csv.reader(csvfile)\n next(csvreader)\n\n # extracting each data row one by one and append it\n for line in csvreader:\n full_data_rows_list.append(line)\n\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL,\n skipinitialspace=True)\n\n # create one file with all the records\n with open(file_path, 'w', encoding='utf8',\n newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(\n ['artist', 'firstName', 'gender', 'itemInSession',\n 'lastName', 'length', 'level', 'location', 'sessionId',\n 'song', 'userId'])\n for row in full_data_rows_list:\n if row[0] == '':\n continue\n writer.writerow((row[0], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[12], row[13],\n row[16]))", "def load_data():\n directories=[\"./track1/\",\n \"./track1_recovery/\",\n \"./track2/\",\n \"./track1_reverse/\",\n \"./track2_reverse/\",#Additional data for model built on top of lenet.h5\n \"./track2_recovery/\",#Additions data for model built on top of lenet.h5\n ]\n lines=[]\n for directory in directories:\n with open(directory+\"driving_log.csv\") as csvfile:\n reader=csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n train_samples, validation_samples = train_test_split(lines, test_size=0.2)\n return train_samples, validation_samples", "def Filter(PATH,Output_folder='.',outcome=0.):\n folders = glob.glob(PATH+'/*')\n folders.sort()\n i=-1\n \n # Create target directories\n if not os.path.exists(Output_folder+'/short'):\n os.makedirs(Output_folder+'/short')\n if not os.path.exists(Output_folder+'/50ohm'):\n os.makedirs(Output_folder+'/50ohm') \n if not os.path.exists(Output_folder+'/antenna'):\n os.makedirs(Output_folder+'/antenna')\n if not os.path.exists(Output_folder+'/Tmeas'):\n os.makedirs(Output_folder+'/Tmeas') \n if not os.path.exists(Output_folder+'/K_jnc'): \n os.makedirs(Output_folder+'/K_jnc')\n \n for subdirs, dirs, files in os.walk(PATH):\n dirs[:] = [d for d in dirs if not d.startswith('.')] # Inore hidden folders (ipynb checkpoints for example)\n dirs.sort()\n files.sort()\n short,antenna,_50ohm,measure,K_jnc = [],[],[],[],[]\n short_date,_50ohm_date,measure_date =[],[],[]\n\n # Walk through directories\n for file in files:\n path = os.path.join(subdirs,file)\n date = file.split(\"_\")[0]\n if os.path.getsize(path)==0: # Filtering empty data\n print 'EMPTY FILE:',path\n continue\n \n data = np.loadtxt(path,unpack=True)\n if data.size == 0:\n print 'NO DATA IN FILE:',path\n continue\n \n elif file.endswith('short.dat'):\n T_short = Res2Temp(data,bwidth)\n short.append(T_short),short_date.append(date)\n elif file.endswith('50ohm.dat'):\n T_50ohm = Res2Temp(data,bwidth)\n _50ohm.append(T_50ohm),_50ohm_date.append(date)\n elif file.endswith('noise.dat'):\n dB_noise = data\n elif file.endswith('antenna.dat'):\n dB_antenna = data\n dB_clean = dB_antenna - dB_noise - outcome\n T_antenna = Radio_source_trans(dB_clean, Freqs, bwidth)\n T_measure = T_antenna/eta_nu - T_short # Uncalibrated measure\n Tamb = round(np.genfromtxt(path,comments='!',skip_header= 18,max_rows=1)[1]+273.15,2)\n Kjnc = Tamb/(T_50ohm-T_short) # Johnson-noise calibration coefficient\n antenna.append(T_antenna),measure.append(T_measure),K_jnc.append(Kjnc)\n measure_date.append(date)\n \n # HDF5 Table Generation \n if i>=0 and i<len(folders) and short and antenna and _50ohm and measure and K_jnc:\n name = os.path.normpath(folders[i])\n name = name.split(\"/\")[1]\n short = np.transpose(short)\n antenna = np.transpose(antenna)\n _50ohm = np.transpose(_50ohm)\n measure = np.transpose(measure)\n K_jnc = np.transpose(K_jnc)\n\n short_table = pd.DataFrame(short[mask], index = Freqs[mask], columns = short_date)\n short_table.to_hdf(Output_folder+'/short/'+name+'.hdf5','df')\n _50ohm_table = pd.DataFrame(_50ohm[mask], index = Freqs[mask], columns = _50ohm_date)\n _50ohm_table.to_hdf(Output_folder+'/50ohm/'+name+'.hdf5','df')\n antenna_table = pd.DataFrame(antenna[mask], index = Freqs[mask], columns = measure_date)\n antenna_table.to_hdf(Output_folder+'/antenna/'+name+'.hdf5','df')\n measure_table = pd.DataFrame(measure[mask], index = Freqs[mask], columns = measure_date)\n measure_table.to_hdf(Output_folder+'/Tmeas/'+name+'.hdf5','df')\n Kjnc_table = pd.DataFrame(K_jnc[mask], index = Freqs[mask], columns = measure_date)\n Kjnc_table.to_hdf(Output_folder+'/K_jnc/'+name+'.hdf5','df')\n i+=1", "def get_data(self, csv_file):\n pass", "def analyzeSaMSEMData(dict):\n \n if 'path_in' in dict:\n path_in = dict['path_in']\n else:\n print(\"Caution: No path for input folder containing the data has been defined. Please define path to folder by dict['path_in']=path_in\") \n return\n \n path_out_default = '../colordeficiency-data/' \n if 'path_out' in dict:\n path_out = dict['path_out']\n else:\n print(\"Caution: No path for output folder where the data should be stored has been defined. Using default output path instead: \"+str(path_out_default))\n path_out = path_out_default\n \n \n path = os.path.join(os.path.dirname(os.path.abspath(os.path.join(__file__,os.pardir))),'colordeficiency-data')\n \n # 0. Step: Get all the relevant information, i.e. obs_col_defs etc.\n observer_ids = os.path.join(path,\"observer_ids.csv\")\n obs_ids_sheet = pandas.read_csv(observer_ids,sep=\";\")\n \n # 1. Step: Read all the XLSX data in the path\n ext = 'xlsx'; xlsx_files = getAllXXXinPath(path_in,ext)\n dataArray = pandas.DataFrame()\n i=1\n for xlsx_file in xlsx_files:\n if not '~' in xlsx_file:\n sys.stdout.write(xlsx_file)\n dataArray_tmp, testArray, extraDataDict = extractExperimentData(os.path.join(path_in,xlsx_file))\n \n experiment_type = extraDataDict['expName'] if 'expName' in extraDataDict else 'none'\n if experiment_type == \"sample-2-match\":\n newDataArray = dataArray_tmp[['sim_id','coldef_type','resp.corr_raw','resp.rt_raw','origFile']]\n \n if '0. Participant ID' in extraDataDict:\n obsID = int(extraDataDict['0. Participant ID'])\n newDataArray['observer_id'] = obsID\n obs_coldef_type = obs_ids_sheet.loc[obs_ids_sheet['observer_id']==obsID,['observer_coldef_type']]\n newDataArray['observer_coldef_type'] = int(obs_coldef_type['observer_coldef_type'])\n \n if \"2. Session\" in extraDataDict:\n sessionID = int(extraDataDict['2. Session'])\n newDataArray['session_id'] = sessionID\n \n dataArray = pandas.concat([dataArray, newDataArray])\n sys.stdout.write(' . ')\n if (i%5)==0: sys.stdout.write('\\n')\n i+=1\n sys.stdout.write('\\n')\n \n dataArray = dataArray.reset_index()\n \n # 2.Step: Adapt values to programstandards\n for item in settings.colDefLong2ID:\n dataArray.loc[dataArray['coldef_type'] == item, ['coldef_type']] = settings.colDefLong2ID[item]\n \n if experiment_type == \"sample-2-match\":\n for item in settings.sim2ID:\n dataArray.loc[dataArray['sim_id'] == item, ['sim_id']] = settings.sim2ID[item]\n \n dataArray = dataArray.rename(columns={'sim_id': 'sim_id',\n 'coldef_type': 'coldef_type',\n 'resp.corr_raw': 'is_correct',\n 'resp.rt_raw': 'resp_time',\n 'origFile': 'filepath'})\n \n for index, row in dataArray.iterrows():\n path_tmp = row['filepath']\n filename = os.path.basename(path_tmp).split('.')[0]\n dict_tmp = getStatsFromFilename(filename)\n imgID_tmp = int(dict_tmp['img_id'])\n dataArray.at[index,'image_id'] = int(imgID_tmp)\n \n dataArray.is_correct = dataArray.is_correct.astype(bool)\n dataArray.image_id = dataArray.image_id.astype(int)\n dataArray = dataArray[['image_id','sim_id','coldef_type','is_correct','resp_time','observer_id','observer_coldef_type','session_id','filepath']]\n \n elif experiment_type == \"visual-search\":\n pass\n \n \n # 3. Saving data to file\n try:\n sys.stdout.write(\"Starting to save ... \")\n if experiment_type == \"sample-2-match\":\n dataArray.to_csv(os.path.join(path_out,'samsem-data.csv'),sep=\";\")\n sys.stdout.write(\"Success: Sample-to-match data successfully saved in '\"+str(path_out)+\"'.\\n\")\n elif experiment_type == \"visual-search\":\n dataArray.to_csv(os.path.join(path_out,'visdem-data.csv'),sep=\";\")\n sys.stdout.write(\"Visual-search data successfully saved.\")\n else:\n sys.stdout.write(\"Caution: No data saved.\")\n except Exception as e:\n print(e)", "def test_noInputSpecified(self,\n filename=None,\n input_folder='../../input/raw-data/'):\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = \"../../input/raw-data/page-views.csv\"\n self.assertEqual(csv_file, expected_output)", "def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv", "def init_map(project_name, destination_directory):\n project_path = os.path.join(destination_directory, project_name)\n map_filename = os.path.join(project_path, project_name + \"map.csv\")\n if not os.path.exists(project_path):\n os.makedirs(os.path.join(destination_directory, project_name))\n file_id = 0\n mapdf = pd.DataFrame(columns=METADATA_COLUMN_NAMES)\n open(map_filename, 'a').close()\n elif len(os.listdir(project_path)) == 1:\n file_id = 0\n mapdf = pd.DataFrame(columns=METADATA_COLUMN_NAMES)\n else:\n mapdf = pd.read_csv(map_filename)\n mapdf.columns = METADATA_COLUMN_NAMES\n file_id = mapdf['fid'].max() + 1\n return file_id, mapdf", "def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list", "def read_csv(folder):\n csv_paths = [(f, os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith('.csv') and '刑事' in f and '司法院-刑事補償_刑事' not in f and '最高' not in f]\n return csv_paths", "def Collect1DResults(Path, FolderNames, Left, Right, SavePath, OneD,\n fromf='', tof='', FilterbyName = False):\n\n second = \"=pd.DataFrame()\"\n if fromf == '':\n fromf = 0\n\n for i in range(len(FolderNames)):\n print(str(i) + \"-\" + FolderNames[i])\n\n if tof == '':\n tof = len(os.listdir(Path +\"/\" + FolderNames[i]))\n\n FileList = os.listdir(Path +\"/\" + FolderNames[i])[fromf:tof]\n # tof is only renewed if it is equal to ''\n tof = ''\n if FilterbyName == True:\n filter1 = int(FolderNames[i].split('(')[1].split('-')[0])\n filter2 = int(FolderNames[i].split('(')[1].split('-')[1].split(')')[0])\n\n for j in range(len(FileList)):\n\n go = False\n\n if Left and FileList[j].split('.')[0].endswith(\"_left\"):\n print(str(i) + \"-\" + str(j) +\"-\" + FileList[j])\n # create data frame for the sub-basin\n first = \"L\" + FileList[j].split('.')[0]\n go = True\n\n elif Right and FileList[j].split('.')[0].endswith(\"_right\"):\n print(str(i) + \"-\" + str(j) +\"-\" + FileList[j])\n first = \"R\" + FileList[j].split('.')[0]\n go = True\n\n ## try to get the integer of the file name to make sure that it is\n ## one of the 1D results file\n elif OneD and not FileList[j].split('.')[0].endswith(\"_right\") and not FileList[j].split('.')[0].endswith(\"_left\"):\n print(str(i) + \"-\" + str(j) +\"-\" + FileList[j])\n # create data frame for the sub-basin\n first = \"one\" + FileList[j].split('.')[0]\n go = True\n\n if go == True:\n # get the updated list of variable names\n variables = locals()\n\n # read the file\n try:\n temp_df = pd.read_csv(Path + \"/\" + FolderNames[i] + \"/\" + FileList[j],header = None,\n delimiter = r'\\s+')\n\n if FilterbyName == True:\n temp_df = temp_df[temp_df[0] >= filter1]\n temp_df = temp_df[temp_df[0] <= filter2]\n # check whether the variable exist or not\n # if this is the first time this file exist\n if not first in variables.keys():\n # create a datafame with the name of the sub-basin\n total = first+ second\n exec(total)\n\n # concatenate the\n exec(first + \"= pd.concat([\" + first+ \", temp_df])\")\n except:\n continue\n\n # Save files\n variables = list(locals().keys())\n # get sub-basins variables (starts with \"One\")\n for i in range(len(variables)):\n var = variables[i]\n if var.endswith(\"_left\"):\n # put the dataframe in order first\n exec(var + \".sort_values(by=[0,1,2],ascending = True, inplace = True)\")\n path = SavePath + '/' + var[1:]+ '.txt'\n exec(var + \".to_csv(path ,index= None, sep = ' ', header = None)\")\n elif var.endswith(\"_right\"):\n # put the dataframe in order first\n exec(var + \".sort_values(by=[0,1,2],ascending = True, inplace = True)\")\n path = SavePath + '/' + var[1:]+ '.txt'\n exec(var + \".to_csv(path ,index= None, sep = ' ', header = None)\")\n elif var.startswith(\"one\"):\n # put the dataframe in order first\n exec(var + \".sort_values(by=[0,1,2],ascending = True, inplace = True)\")\n print(\"Saving \" + var[3:]+ '.txt')\n path = SavePath + '/' + var[3:]+ '.txt'\n exec(var + \".to_csv(path ,index= None, sep = ' ', header = None)\")", "def create_dataframe_from_dir(directory):\n\n if not os.path.exists(directory):\n return pd.DataFrame()\n\n file_list = os.listdir(directory)\n\n file_list.sort()\n\n df_list = []\n for filename in file_list:\n\n if filename.startswith(\"_\") or (not filename.endswith(\".csv\")):\n continue\n\n # Assert that the file is named correctly\n _, start_date, end_date = check_filename_convention(filename)\n\n df = pd.read_csv(os.path.join(directory, filename))\n df = df.assign(SourceFile=filename)\n\n # In January 2020, MS changed the date format used in the usage\n # export files from US to UK. This happen between 24/01/2020 -\n # 28/01/2020. The following if statement is to deal with this\n # change.\n if start_date is None or end_date is None:\n continue\n\n if start_date > datetime.datetime(2020, 1, 24, 0, 0):\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n else:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%m/%d/%Y\"\n )\n except Exception:\n try:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%d/%m/%Y\"\n )\n except Exception:\n df[CONST_COL_NAME_DATE] = pd.to_datetime(\n df[CONST_COL_NAME_DATE], format=\"%Y-%m-%d\"\n )\n\n # Check if data comes from EduHub\n if CONST_COL_NAME_HANDOUTNAME in df.columns:\n\n # Renaming HandoutName to SubscriptionName\n df = df.rename(\n columns={CONST_COL_NAME_HANDOUTNAME: CONST_COL_NAME_SNAME}\n )\n\n # Dropping columns CourseName,LabName\n df = df.drop(\n columns=[CONST_COL_NAME_LABNAME, CONST_COL_NAME_COURSENAME]\n )\n\n df_list.append(df)\n\n if len(df_list) == 0:\n return pd.DataFrame()\n\n total_df = pd.concat(df_list, axis=0, ignore_index=True)\n\n return total_df", "def get_data_file():\n base_folder = os.path.dirname(__file__)\n # print(base_folder)\n return os.path.join(base_folder, 'data', 'Sacramentorealestatetransactions.csv')\n # print(filename)", "def parse(cls, raw_folder: str) -> Dict[str, Any]:\n folder_path = os.path.abspath(raw_folder)\n data = dict()\n files = os.listdir(folder_path)\n for file in files:\n if is_ignored(file):\n continue\n try:\n file = os.path.join(raw_folder, file)\n datum = cls.process_file(file)\n except FileNotCompatible:\n continue\n\n _, kwrd = os.path.split(file)\n kwrd = os.path.splitext(kwrd)[0]\n data[kwrd] = datum\n\n return data", "def __init__(self, dir_path= 'static/Irma data-20210525'):\n \n A=csv_to_dict(dir_path+\"\\A.csv\")\n B=csv_to_dict(dir_path+\"\\B.csv\")\n C=csv_to_dict(dir_path+\"\\C.csv\")\n D=csv_to_dict(dir_path+\"\\D.csv\")\n\n\n self.dicts_list=[A,B,C,D]\n self.image_codes=csv_to_dict(dir_path+\"\\image_codes.csv\")", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def get_data_paths(directory: Optional[str] = None) -> DataPaths:\n if directory is None:\n directory = DATA_DIRECTORY\n\n os.makedirs(directory, exist_ok=True)\n\n node_data_path = os.path.join(directory, 'nodes.tsv')\n if not os.path.exists(node_data_path):\n logger.info(f'downloading {NODE_DATA_URL}')\n urlretrieve(NODE_DATA_URL, node_data_path)\n\n edge_data_path = os.path.join(directory, 'edges.sif.gz')\n if not os.path.exists(edge_data_path):\n logger.info(f'downloading {EDGE_DATA_URL}')\n urlretrieve(EDGE_DATA_URL, edge_data_path)\n\n transformed_features_path = os.path.join(directory, 'transformed-features.tsv.bz2')\n if not os.path.exists(transformed_features_path):\n logger.info(f'downloading {TRANSFORMED_FEATURES_URL}')\n urlretrieve(TRANSFORMED_FEATURES_URL, transformed_features_path)\n\n validate_data_path = os.path.join(directory, 'validation-statuses.tsv')\n if not os.path.exists(validate_data_path):\n logger.info(f'downloading {VALIDATE_DATA_URL}')\n urlretrieve(VALIDATE_DATA_URL, validate_data_path)\n\n symptomatic_data_path = os.path.join(directory, 'probabilities.tsv')\n if not os.path.exists(symptomatic_data_path):\n logger.info(f'downloading {SYMPTOMATIC_DATA_URL}')\n urlretrieve(SYMPTOMATIC_DATA_URL, symptomatic_data_path)\n\n repurpose_data_path = os.path.join(directory,'repurpose_overlap.json')\n if not os.path.exists(repurpose_data_path):\n logger.info(f'downloading {REPURPOSE_DATA_URL}')\n urlretrieve(REPURPOSE_DATA_URL, repurpose_data_path)\n\n repo_data_path = os.path.join(directory, 'repo_data.csv')\n if not os.path.exists(repo_data_path):\n logger.info(f'downloading {REPO_DATA_URL}')\n urlretrieve(REPO_DATA_URL, repo_data_path)\n\n permutation_directory = os.path.join(directory, \"permutations\")\n os.makedirs(permutation_directory, exist_ok=True)\n\n permutation_paths = []\n for i in range(5):\n permutation_data_path = os.path.join(permutation_directory, PERMUTATION_DATA_FILE_FMT.format(i + 1))\n if not os.path.exists(permutation_data_path):\n url = PERMUTATION_DATA_URL_FMT.format(i + 1)\n logger.info(f'downloading {url}')\n urlretrieve(url, permutation_data_path)\n permutation_paths.append(permutation_data_path)\n data_edge2vec_path = os.path.join(directory, 'data_edge2vec')\n\n return DataPaths(\n node_data_path=node_data_path,\n edge_data_path=edge_data_path,\n transformed_features_path=transformed_features_path,\n validate_data_path=validate_data_path,\n symptomatic_data_path=symptomatic_data_path,\n permutation_paths=permutation_paths,\n data_edge2vec_path=data_edge2vec_path,\n repurpose_data_path = repurpose_data_path,\n repo_data_path = repo_data_path\n )", "def folder_to_df(path):\n summary_df = pd.DataFrame(columns=[\"file_name\", \"invoice_nr\", \"address\", \"contract\", \"base_charge\"])\n \n for file in Path(path).glob(\"*.pdf\"):\n print(file)\n try: \n summary_df = summary_df.append({\n \"file_name\": pdf_to_test(file)[0],\n \"invoice_nr\": pdf_to_test(file)[1],\n \"address\": pdf_to_test(file)[2],\n \"contract\": pdf_to_test(file)[3],\n \"base_charge\": pdf_to_test(file)[4]}, \n ignore_index = True)\n except:\n summary_df = summary_df.append({\n \"file_name\": file.name,\n \"invoice_nr\": \"Could not read malformed PDF file\",\n \"address\": \"Could not read malformed PDF file\",\n \"contract\": \"Could not read malformed PDF file\",\n \"base_charge\": \"Could not read malformed PDF file\"}, \n ignore_index = True)\n return summary_df", "def _parse_cvcfolder(self, cvcfolderpath):\n cvcfoldername = os.path.basename(os.path.abspath(cvcfolderpath))\n obsfolderinfo = {}\n cvcextstr = cvcfoldername.split('_')[-1]\n if cvcextstr == 'xst' or cvcextstr == 'xst-SEPTON':\n cvcfoldername_split = cvcfoldername.split('_')\n try:\n (stnid, Ymd, HMS, rcustr, sbstr, intstr, durstr, dirstr, cvcextstr\n ) = cvcfoldername_split\n obsfolderinfo['stnid'] = stnid\n obsfolderinfo['datetime'] = datetime.datetime.strptime(\n Ymd + 'T' + HMS, '%Y%m%dT%H%M%S')\n obsfolderinfo['rcumode'] = rcustr[3:]\n obsfolderinfo['subband'] = sbstr[2:]\n obsfolderinfo['integration'] = float(intstr[3:])\n obsfolderinfo['duration_tot'] = float(durstr[3:])\n obsfolderinfo['pointing'] = dirstr[3:].split(',')\n except:\n raise ValueError(\"Foldername not in xst_ext format.\")\n elif cvcextstr == 'acc':\n dirpat = re.compile(regex_ACCfolder)\n obsdirinfo_m = dirpat.match(cvcfoldername)\n if obsdirinfo_m is None:\n print(\"Cal error\")\n raise ValueError(\n \"Calibration directory does not have correct syntax.\")\n obsdirinfo = obsdirinfo_m.groupdict()\n obsfolderinfo['stnid'] = obsdirinfo['stnid']\n d0 = datetime.datetime(int(obsdirinfo['year']),\n int(obsdirinfo['month']),\n int(obsdirinfo['day']),\n int(obsdirinfo['hour']),\n int(obsdirinfo['minute']),\n int(obsdirinfo['second']))\n obsfolderinfo['sessiontimeid'] = d0\n obsfolderinfo['rcumode'] = obsdirinfo['rcumode']\n obsfolderinfo['subband'] = '0:511'\n obsfolderinfo['integration'] = 1.0\n obsfolderinfo['duration_tot'] = int(obsdirinfo['duration_tot'])\n obsfolderinfo['source'] = obsdirinfo['calsrc']\n obsfolderinfo['pointing'] = \\\n ilisa.monitorcontrol.directions.std_pointings(\n obsfolderinfo['source'])\n else:\n raise ValueError(\"Folder not expected xst or acc format.\")\n obsfolderinfo['datatype'] = cvcextstr\n return obsfolderinfo", "def load_file(input_file):\n basename = os.path.basename(input_file.name)\n filename = os.path.splitext(basename)[0]\n\n # data[filename] = json.load(input_file)\n print(\"Loading: \", filename)\n data[filename] = pd.DataFrame(json.load(input_file))\n print(data[filename])\n\n # print(data[filename][data[filename]['Tasksize'] == 32])\n # print(data[filename]['Tasksize'].drop_duplicates())\n # row_vals = data[filename]['Rows'].drop_duplicates().to_list()\n\n # print(data[filename]['Rows'].drop_duplicates().to_list())\n\n return filename", "def _init_train_valid(self, csv_path, csv_sep, csv_names):\n # load groundtruth\n # last element following a dot is file's extension\n print('Loading data...') \n if csv_path.split('.')[-1] == 'cache':\n # load cache\n # Assumes that the cache contains a list of all the identities, a dictionary containing metadata about those identities and the number of samples contained in the cache.\n # The dictionary must have the same format as the 'groundtruth_metadata' dictionary that is built below.\n # dati che mi servono: identities, groundtruth_metadata, num_samples\n with open(csv_path, 'rb') as cache_file:\n cache = pickle.load(cache_file)\n self.identities = cache['identities']\n self.groundtruth_metadata = cache['groundtruth_metadata']\n self.num_samples = cache['num_samples']\n else:\n # Assumes for the provided csv the following structure:\n # Path, ID, Gender, Age, x_min(roi_origin_x), y_min(roi_origin_y), width(roi_width), height(roi_height)\n groundtruth = pd.read_csv(csv_path, sep=csv_sep, names=csv_names)\n # for each groundtruth row\n for gt_sample in groundtruth.iterrows():\n identity = gt_sample[1][\"ID\"]\n # this iteration is over all of the elements in groundtruth, so the same id can be encountered multiple times (same id associated to multiple images)\n if identity not in self.identities:\n self.identities.append(identity)\n # load identity's metadata\n id_data = {\n 'age': gt_sample[1][\"Age\"],\n 'roi': {\n 'upper_left_x': gt_sample[1][\"x_min\"],\n 'upper_left_y': gt_sample[1][\"y_min\"],\n 'width': gt_sample[1][\"width\"],\n 'height': gt_sample[1][\"height\"]\n },\n 'path': gt_sample[1][\"Path\"]\n }\n if identity not in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity] = {\n 'index': 0,\n 'metadata': []\n }\n # the other elements in the list associated to an identity are metadata \n self.groundtruth_metadata[identity]['metadata'].append(id_data)\n self.num_samples += 1\n # Dump loaded data to cache\n # Split csv path in directory path and filename\n (csv_dir, csv_name) = os.path.split(csv_path)\n # Create a name for cache file with the same name as csv file but different extension\n cache_name = csv_name.split('.')[0]+'.cache'\n # Create a path pointing to the new cache file, locating it in the same directory as the csv file\n cache_path = os.path.join(csv_dir, cache_name)\n # Write relevant data to file\n with open(cache_path, 'wb') as cache_out_file:\n out_dict = {}\n out_dict['identities'] = self.identities\n out_dict['groundtruth_metadata'] = self.groundtruth_metadata\n out_dict['num_samples'] = self.num_samples\n pickle.dump(out_dict, cache_out_file) \n print('Finished loading data!')\n if self.mode == 'training':\n self._shuffle()", "def read_dir(directory):\n results = AttrDict()\n results.iterations = pd.read_csv(os.path.join(directory, 'iterations.csv'),\n index_col=0)\n results.solutions = AttrDict()\n for i in results.iterations.index.tolist():\n iteration_dir = os.path.join(directory, '{:0>4d}'.format(i))\n fmt = _detect_format(iteration_dir)\n logging.debug('Iteration: {}, Format detected: {}'.format(i, fmt))\n try:\n if fmt == 'netcdf':\n sol_path = os.path.join(iteration_dir, 'solution.nc')\n results.solutions[i] = read_netcdf(sol_path)\n else:\n sol_path = iteration_dir\n results.solutions[i] = read_csv(sol_path)\n logging.debug('Read as {}: {}'.format(fmt, sol_path))\n except IOError as err:\n logging.warning('I/O error in `{}` at iteration `{}`'\n ': {}'.format(iteration_dir, i, err))\n # results.solutions[i] = AttrDict() # add an empty entry\n continue\n return results", "def get_first_of_day(self, folder_before=None, day=datetime.today(), filename='Epikurve.csv'):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n reached = folder_before is not None\n __folder_before = str(folder_before).split('/')[-1]\n for folder in folders:\n if reached:\n path_csv = self.data_root_path / folder / filename\n with open(path_csv) as f:\n first = True\n for x in csv.reader(f, delimiter=';'):\n if first:\n first = False\n continue\n ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')\n break\n if ts.date() <= day.date():\n return folder\n else:\n if folder == __folder_before:\n reached = True", "def _read_trajectory_files(self):\n dflist = []\n self.Ntimes = {}\n for downD in self.case.downstreamD:\n outputs = self.case.get_outputs(self.method,downD)\n print(outputs['trajectory_file'])\n df = pd.read_csv(outputs['trajectory_file'],\n header=None,\n usecols=[0,1,2])\n df.columns = ['t','y','z']\n df['x'] = downD * self.case.turbine.D\n df['z'] -= self.case.turbine.zhub\n df = df.set_index(['t','x'])[['y','z']]\n self.Ntimes[downD] = len(df.index.levels[0])\n dflist.append(df)\n self.df = pd.concat(dflist).sort_index()", "def get_data():\n \n \"\"\" Prepare variables\"\"\"\n urls = {\"cases\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv\",\n \"deaths\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv\",\n \"recovered\": \"https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Recovered.csv\"}\n\n localnames = {\"cases\": \"Cases.csv\",\n \"deaths\": \"Deaths.csv\",\n \"recovered\": \"Recovered.csv\"}\n\n dfs = {\"cases\": None,\n \"deaths\": None,\n \"recovered\": None}\n\n \"\"\" Download\"\"\"\n for key in urls.keys():\n url = urls[key]\n localname = localnames[key]\n urllib.request.urlretrieve(url, localname)\n\n \"\"\" Load variables\"\"\"\n for key in dfs.keys():\n dfs[key] = pd.read_csv(localnames[key])\n \n \"\"\" Return\"\"\"\n return(dfs)", "def create_files_dict(csv_file_name: str):\r\n\r\n SKUs = [] # list of SKU's in the csv file\r\n with open(csv_file_name, 'r') as csv_fd:\r\n csv_reader = csv.reader(csv_fd)\r\n for line in csv_reader:\r\n for SKU in line:\r\n SKUs.append(SKU)\r\n\r\n # creating a list of file extensions [.ext, ...]\r\n file_extensions = []\r\n for SKU in SKUs:\r\n for dir_file in os.listdir():\r\n if SKU in os.path.splitext(dir_file)[0]:\r\n dir_file_ext = os.path.splitext(dir_file)[1]\r\n if dir_file_ext not in file_extensions:\r\n file_extensions.append(dir_file_ext)\r\n file_extensions.sort() # sorting by ascii for constant format view\r\n # print(\"debug:::file_extensions\", file_extensions)\r\n\r\n ext_format_dict = {} # base format for creating extension dict (to be copied for each iteration)\r\n for ext in file_extensions:\r\n ext_format_dict[ext] = ''\r\n\r\n files = {}\r\n for filename_base in SKUs:\r\n for dir_file_0 in os.listdir():\r\n current_file_extensions = ext_format_dict.copy() # reset dict values for each file\r\n if filename_base in os.path.splitext(dir_file_0)[0]:\r\n # need to take the dir_file_base and re-iterate over listdir to find all exact name filenames\r\n for dir_file_1 in os.listdir():\r\n if os.path.splitext(dir_file_0)[0] == os.path.splitext(dir_file_1)[0]:\r\n dir_file_base = os.path.splitext(dir_file_1)[0]\r\n dir_file_ext = os.path.splitext(dir_file_1)[1]\r\n if dir_file_ext in list(current_file_extensions.keys()):\r\n current_file_extensions[dir_file_ext] = 'V'\r\n files[dir_file_base] = current_file_extensions\r\n\r\n return files", "def loadFileNameByModel(self, inputDir):\n fileNames = walktree(inputDir)\n fileByModel = {}\n for file in fileNames:\n modelName = file.split('/')[-1]\n modelName = modelName.replace('.txt', '')\n fileByModel[modelName] = file\n return fileByModel", "def get_csv_data(csv_path: str, img_dir: str) -> pd.DataFrame:\r\n data = pd.read_csv(csv_path)\r\n data['title'] = data['title'].apply(preprocess_titles)\r\n data['image'] = data['image'].apply(abs_path, args=(img_dir,))\r\n return data", "def getReal(directory=\"../FakeRealNews/Data\"):\r\n return pd.read_csv(directory + \"/True.csv\")", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def import_func(path_):\n\n datasets_dic = {}\n\n for dataset_path in path_:\n # Parse labels from filenames\n dataset_label = os.path.split(dataset_path)[1].split('.')[0]\n\n # Read from csv to Pandas\n dataset = pd.read_csv(dataset_path)\n\n # insert dataset label to the dataframes\n dataset.insert(0, 'trial', dataset_label)\n dataset.insert(0, 'maneuver', dataset_label.split('_')[0])\n\n # Datasets are stored in a dictionary\n datasets_dic.update({dataset_label: dataset})\n\n # list of imported maneuvers\n dataset_names = list(datasets_dic.keys())\n\n return datasets_dic, dataset_names", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def read_data(file_name):\n\n path= os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n path = op.join(path, 'Data')\n path_clean = op.join(path, 'Cleaned Data')\n path = op.join(path_clean, file_name)\n names = os.listdir(path_clean)\n if all(file_name != i for i in names):\n raise ValueError\n return pd.read_csv(path)", "def test_get_filepaths(self):\n\n #setup\n get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn)\n \n #when\n test1 = get_filepaths(\"./dir1\", \".csv\")\n\n #result\n assert len(test1) == 2", "def import_test():\n if os.path.exists(\"test.csv\"):\n #print (\"--testing data imported to data frame\\n\")\n test_df = pd.read_csv(\"test.csv\", index_col=0)\n else:\n print(\"training CSV not found\")\n exit()\n \n return test_df", "def sample_data_path(name):\n import os.path as op\n data_dir = op.join(op.dirname(__file__), \"data\")\n data_path = op.join(data_dir, name + \".csv\")\n return op.abspath(data_path)", "def SearchObjects(directory, endwith='.csv'):\n directory = os.path.normpath(directory)\n if not os.path.isdir(directory):\n raise IOError(\"The directory \" + directory + \" is not exist\")\n objects = {}\n for curpath, subdirs, files in os.walk(directory):\n for fileType in (file for file in files if file.endswith(endwith)):\n path = os.path.join(curpath, fileType)\n label = path.split(os.path.sep)[-2]\n if label not in objects:\n objects[label] = []\n objects[label].append(path)\n \n return objects[label]", "def _find_named_files(self):\n for name, description in self.named_files.iteritems():\n name = name.format(job_name=self.job_name)\n f_path = '{}/{}'.format(self.rism3d_folder, name)\n if os.path.isfile(f_path):\n self.file_path_dic[description] = f_path\n else:\n self._not_found_error(f_path)", "def task_lst_gen(dirr, csv_path):\n train_file_lst, val_file_lst, test_file_lst = files_from_csv(csv_path)\n\n task_dict = {}\n out_prefix = '/work/jfeins1/maestro/dataset-v3/'\n for subdirs, dirs, files in os.walk(dirr):\n for file in files:\n filepath = subdirs + os.sep + file\n\n if file in train_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'train/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in test_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'test/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n if file in val_file_lst:\n uid = str(file).split('.')[0]\n out = out_prefix + 'val/' + uid\n task_dict[uid] = {'in': filepath, 'out': out}\n\n task_lst = open('/work/jfeins1/maestro/encoding_gen_task.lst', 'w')\n for uid, d in task_dict.items():\n print(d['in'], d['out'], file=task_lst)", "def read_timestep(root_path: str, time: str):\n path = os.path.join(root_path, time)\n\n agent_file = glob.glob(os.path.join(path, \"*_agents.csv\"))[0]\n rel_file = glob.glob(os.path.join(path, \"*_relationships.csv\"))[0]\n feat_files = glob.glob(os.path.join(path, \"*_feat_*.csv\"))\n exposure_files = glob.glob(os.path.join(path, \"*_exposure_*.csv\"))\n assert os.path.isfile(agent_file), f\"can't find agents.csv in {dir}\"\n assert os.path.isfile(rel_file), f\"can't find relationships.csv in {dir}\"\n\n _, agent_filename = os.path.split(agent_file)\n\n # create agents dict\n agents = {}\n\n # re-create all agents and add to population\n with open(agent_file, newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n agents[row[\"id\"]] = row\n agents[row[\"id\"]][\"time\"] = time\n\n def update_agent_extras(files, extra_type):\n pattern = re.compile(f\"^.*_{extra_type}_(.*)\\.csv$\")\n for file in files:\n m = pattern.match(file)\n if m is not None:\n extra = m.group(1)\n with open(file, newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n for k, v in row.items():\n if k != \"agent\":\n agents[row[\"agent\"]][f\"{extra}_{k}\"] = v.lower() if v in ('True', 'False') else v\n\n update_agent_extras(feat_files, \"feat\")\n update_agent_extras(exposure_files, \"exposure\")\n\n # re-create all relationships and write to file\n rels = []\n with open(rel_file, newline=\"\") as f:\n reader = csv.DictReader(f)\n for row in reader:\n row[\"time\"] = time\n rels.append(row)\n\n return rels, list(agents.values())", "def getCSVbbx(filepath, detail, folder, time):\n \n #format validation\n pd.read_csv(filepath)\n click.echo(\"csv\")\n CRSinfo = True\n listlat = [\"Koordinate_Hochwert\",\"lat\",\"Latitude\",\"latitude\"]\n listlon = [\"Koordinate_Rechtswert\",\"lon\",\"Longitude\",\"longitude\",\"lng\"]\n listCRS = [\"CRS\",\"crs\",\"Koordinatensystem\",\"EPSG\",\"Coordinate reference system\", \"coordinate system\"]\n listtime = [\"time\", \"timestamp\", \"date\", \"Time\", \"Jahr\", \"Datum\"]\n try:\n deli=';'\n df = pd.read_csv(filepath, delimiter=deli,engine='python')\n #tests if there is a column named Coordinatesystem or similar\n click.echo(\"hi\")\n #click.echo(df.columns.values)\n #click.echo(intersect(listCRS,df.columns.values))\n if not intersect(listCRS,df.columns.values):\n CRSinfo= False\n print(\"hu\")\n print(\"No fitting header for a reference system\")\n\n if not(((intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)))or (intersect(listtime, df.columns.values))):\n #output=\"No fitting header for latitudes or longitudes\"\n raise Exception('No fitting ')\n #print(output)\n #return output\n\n except Exception as exce:\n deli=','\n df = pd.read_csv(filepath, delimiter=deli,engine='python')\n #tests if there is a column named Coordinatesystem or similar\n click.echo(\"hi\")\n #click.echo(df.columns.values)\n #click.echo(intersect(listCRS,df.columns.values))\n if not intersect(listCRS,df.columns.values):\n CRSinfo= False\n \n print(\"No fitting header for a reference system2\")\n z=intersect(listtime, df.columns.values)\n print (z)\n t=intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)\n print (intersect(listlat,df.columns.values))\n print(\"_______________\")\n print(t)\n if not t:\n print(\"false\")\n\n if not(((intersect(listlat,df.columns.values) and intersect(listlon,df.columns.values)))or (intersect(listtime, df.columns.values))):\n #output=\"No fitting header for latitudes or longitudes\"\n #raise Exception('No fim')\n \n raise Exception(\"evtl kein csv oder ungueltiges Trennzeichen.\")\n #print(\"keine Koordinaten vorhanden\")\n #print(output)\n #return output\n print (exce)\n\n if detail =='bbox':\n click.echo(\"bbox\")\n # Using Pandas: http://pandas.pydata.org/pandas-docs/stable/io.html\n #if folder=='single':\n mylat=intersect(listlat,df.columns.values)\n mylon=intersect(listlon,df.columns.values)\n lats=df[mylat[0]]\n lons=df[mylon[0]]\n bbox=[min(lats),min(lons),max(lats),max(lons)]\n # CRS transformation if there is information about crs\n if(CRSinfo):\n mycrsID=intersect(listCRS,df.columns.values)\n myCRS=df[mycrsID[0]]\n lat1t,lng1t = extractTool.transformToWGS84(min(lats),min(lons), myCRS)\n lat2t,lng2t = extractTool.transformToWGS84(max(lats),max(lons), myCRS)\n bbox=[lat1t,lng1t,lat2t,lng2t]\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV object:\")\n click.echo(bbox)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append(bbox)\n if folder=='whole':\n extractTool.bboxArray.append(bbox)\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV:\")\n click.echo(bbox)\n print(\"----------------------------------------------------------------\")\n else:\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV object:\")\n print(bbox)\n print(\"Missing CRS -----> Boundingbox will not be saved in zenodo.\")\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([None])\n if folder=='whole':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Boundingbox of the CSV file:\")\n click.echo(bbox)\n click.echo(\"because of a missing crs this CSV is not part of the folder calculation.\")\n print(\"----------------------------------------------------------------\")\n\n else:\n extractTool.ret_value.append([None])\n\n #returns the convex hull of the coordinates from the CSV object.\n if detail == 'convexHull':\n click.echo(\"convexHull\")\n mylat=intersect(listlat,df.columns.values)\n mylon=intersect(listlon,df.columns.values)\n lats=df[mylat[0]]\n lons=df[mylon[0]]\n coords=np.column_stack((lats, lons))\n #definition and calculation of the convex hull\n hull=ConvexHull(coords)\n hull_points=hull.vertices\n convHull=[]\n for z in hull_points:\n point=[coords[z][0], coords[z][1]]\n convHull.append(point)\n if(CRSinfo):\n mycrsID=intersect(listCRS,df.columns.values)\n myCRS=df[mycrsID[0]]\n inputProj='epsg:'\n inputProj+=str(myCRS[0])\n print(inputProj)\n inProj = Proj(init=inputProj)\n outProj = Proj(init='epsg:4326')\n for z in coords:\n z[0],z[1] = transform(inProj,outProj,z[0],z[1])\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"convex Hull of the csv file: \")\n click.echo(convHull)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append(convHull)\n if folder=='whole':\n extractTool.bboxArray=extractTool.bboxArray+convHull\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"convex hull of the CSV:\")\n click.echo(convHull)\n print(\"----------------------------------------------------------------\")\n #return convHull\n else:\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Convex hull of the CSV object:\")\n print(convHull)\n print(\"Missing CRS -----> Boundingbox will not be saved in zenodo.\")\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([None])\n if folder=='whole':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Filepath:\")\n click.echo(filepath)\n click.echo(\"Convex hull of the CSV file:\")\n click.echo(convHull)\n click.echo(\"because of a missing crs this CSV is not part of the folder calculation.\")\n print(\"----------------------------------------------------------------\")\n\n\n else:\n extractTool.ret_value.append([None])\n\n\n\n \n if (time):\n click.echo(\"hallo\")\n # Using Pandas: http://pandas.pydata.org/pandas-docs/stable/io.html\n df = pd.read_csv(filepath, sep=';|,',engine='python')\n click.echo(listtime)\n click.echo(df.columns.values)\n intersection=intersect(listtime, df.columns.values)\n click.echo(intersection)\n if not intersection:\n print(\"No fitting header for time-values\")\n extractTool.ret_value.append([None])\n # TODO: fehlerbehandlung \n #try:\n #for t in listtime:\n #if(x not in df.columns.values):\n #click.echo(\"This file does not include time-values\")\n #else:\n #time=df[t]\n #timeextend =[min(time), max(time)]\n #click.echo(timeextend)\n #return timeextend\n #except Exception as e:\n #click.echo (\"There is no time-value or invalid file.\")\n #return None \n else:\n \n \n time=df[intersection[0]]\n print(min(time))\n print(max(time))\n timemin=str(min(time))\n timemax=str(max(time))\n timemax_formatted=dateparser.parse(timemax)\n timemin_formatted=dateparser.parse(timemin)\n timeextend=[timemin_formatted, timemax_formatted]\n print(timeextend)\n if folder=='single':\n print(\"----------------------------------------------------------------\")\n click.echo(\"Timeextend of this CSV file:\")\n click.echo(timeextend)\n print(\"----------------------------------------------------------------\")\n extractTool.ret_value.append([timeextend])\n #return timeextend\n if folder=='whole':\n extractTool.timeextendArray.append(timeextend)\n print(\"timeextendArray:\")\n print(extractTool.timeextendArray)\n\n else:\n extractTool.ret_value.append([None])\n if folder=='single':\n print(extractTool.ret_value)\n return extractTool.ret_value", "def parse_csv_files(self, filter_fn=None):\n def filter_function(f):\n return f is not None and f.endswith(\".csv\")\n if not filter_fn:\n filter_fn = filter_function\n files = self.filter_files(None,filter_fn)\n dicts = {}\n for f in files:\n with open(f) as fh:\n dicts[f] = [r for r in csv.DictReader(fh)]\n return dicts", "def get_science_sample(self, program, path_final=None):\n list_of_files = glob.glob(self.final_path)\n latest_file = max(list_of_files, key=os.path.getctime)\n df = pd.read_csv(latest_file)\n df.query(\"in_%s == 1\"%program, inplace=True)\n return df", "def load_data(self,data):\n \"\"\"Exception handling incase the path does not exist\"\"\"\n try:\n self.df = pd.read_csv(data)\n return self.df\n except IOError:\n return IOError", "def create_station_dics(data_directories):\n \n files_all = {} \n for k,v in data_directories.items() :\n files = os.listdir(v)\n \n for f in files:\n station = f.split('_')[0] \n if station not in files_all.keys():\n files_all[station] = {}\n \n if k == 'ncar': # separating ncar temperature and wind files \n if 'trhc' in f:\n k = 'ncar_t'\n elif 'windc' in f:\n k = 'ncar_w'\n files_all[station][k] = ''\n files_all[station][k] = v + '/' + f # compelte path to the netCDF file \n\n #print('check') \n \n \n return files_all", "def import_data_handler():\n\n result = ''\n try:\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\lesson5\\\\data\\\\'\n file_name_dict = {'products': 'products.csv', 'customers': 'customers.csv',\n 'rentals': 'rentals.csv'}\n for key, value in file_name_dict.items():\n tmp_file = directory_name + value\n mongo_insert = ImportUnitTestData()\n result = mongo_insert.import_data(key, tmp_file)\n print(result)\n except FileNotFoundError as e:\n logger.error('exception %s', e, exc_info=True)\n result = 'exception {}'.format(e)\n print(result)\n return result", "def get_vetted_sample(self):\n list_of_files = glob.glob(self.final_path)\n latest_file = max(list_of_files, key=os.path.getctime)\n df = pd.read_csv(latest_file)\n return df", "def test_divide_csv_daily_failure(self):\n\n with tempfile.TemporaryDirectory() as td:\n filename = \"storage_data.csv\"\n file_path = f\"{td}/{filename}\"\n errorMsg = \"CParserError: Error tokenizing data. C error: Expected 53 fields in line 1605634, saw 54\"\n with patch(\"masu.external.downloader.ocp.ocp_report_downloader.pd\") as mock_pd:\n with patch(\n \"masu.external.downloader.ocp.ocp_report_downloader.utils.detect_type\",\n return_value=(\"storage_usage\", None),\n ):\n mock_pd.read_csv.side_effect = Exception(errorMsg)\n with patch(\"masu.external.downloader.ocp.ocp_report_downloader.LOG.error\") as mock_debug:\n with self.assertRaises(Exception):\n manifest = ReportManifestDBAccessor().get_manifest_by_id(self.ocp_manifest_id)\n divide_csv_daily(file_path, manifest)\n mock_debug.assert_called_once_with(f\"File {file_path} could not be parsed. Reason: {errorMsg}\")", "def dir_exists(self):\r\n if os.path.exists(self.csvdir):\r\n return True\r\n else:\r\n return False", "def _filepath(self, which_one: str):\n dataset = self.mode.name\n with open('data/dstc2_{}/scripts/config/dstc2_{}.flist'.format(\n 'test' if self.mode is DSTC2.Mode.test else 'traindev', dataset\n )) as flist:\n paths = flist.read().splitlines()\n for path in paths:\n path = 'data/dstc2_{}/data/'.format('test' if self.mode is DSTC2.Mode.test else 'traindev') + path + '/'\n with open(path + which_one + '.json') as f:\n yield json.load(f)", "def dir_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular', sep=',')" ]
[ "0.65036917", "0.6375648", "0.6118687", "0.59009016", "0.5858981", "0.5716339", "0.5682048", "0.551454", "0.5508633", "0.55065745", "0.5505938", "0.54979366", "0.54892653", "0.5485146", "0.5470703", "0.54683197", "0.5457055", "0.54512775", "0.5430467", "0.5412322", "0.54082847", "0.5404735", "0.54044574", "0.5392655", "0.53702354", "0.5358975", "0.5355347", "0.53543943", "0.5347646", "0.53266734", "0.5322669", "0.5318805", "0.5303857", "0.5293769", "0.52830225", "0.5282713", "0.5279875", "0.5274694", "0.52694345", "0.5267129", "0.52441037", "0.52199787", "0.5208672", "0.5206566", "0.5203512", "0.5202687", "0.51855403", "0.5182764", "0.51821905", "0.5175844", "0.5171379", "0.5165366", "0.51535463", "0.514408", "0.5130513", "0.5128942", "0.51231134", "0.51137805", "0.51051974", "0.51031184", "0.50896525", "0.50816286", "0.50789005", "0.507848", "0.5073226", "0.50706196", "0.5069768", "0.5062424", "0.50616354", "0.5057506", "0.50574756", "0.5049913", "0.50492305", "0.5048461", "0.5048081", "0.5047486", "0.5042829", "0.504063", "0.5038806", "0.5038041", "0.50364965", "0.50341105", "0.5033347", "0.5011905", "0.50102395", "0.5009754", "0.5009655", "0.5002392", "0.49978817", "0.49946898", "0.4989573", "0.4985749", "0.49788404", "0.49696106", "0.4969355", "0.49678716", "0.49672148", "0.49670747", "0.49650702", "0.4964474" ]
0.5297656
33
Description When is given a csv_filepath and output_filepath and its the first time reading it Expected Result creates a json file with right values
def test_first_time_reading_csv_file(self): # Create a temporary directory for test files temp_dir = "test_files/observed" os.makedirs(temp_dir, exist_ok=True) # Create a test CSV file csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv") with open(csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json") # Call the function under test extractor.csv_to_json(csv_filepath, temp_dir) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] } } assert json_data == expected_data # Clean up the temporary directory and files os.remove(csv_filepath) os.remove(expected_output_filepath) os.rmdir(temp_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def test_csv_to_json():\r\n json_dict = {\r\n \"covariates\":{ \r\n \"value\":{\r\n \"subject0\": {\r\n \"attribute0\": 3.0,\r\n \"attribute1\": 12.0\r\n },\r\n \"subject1\": {\r\n \"attribute0\": 1.2,\r\n \"attribute1\": 10.9\r\n }\r\n }\r\n },\r\n \"data\":{\r\n \"fulfilled\": True,\r\n \"value\": {\r\n \"type\": [\"float\"],\r\n \"value\": [\r\n \"attribute0\",\r\n \"attribute1\"\r\n ]\r\n }\r\n },\r\n \"lambda\":{\r\n \"fulfilled\": True,\r\n \"value\": 0\r\n }\r\n }\r\n json_string = \"[\" + json.dumps(json_dict).replace(' ', '').replace('\\n', '') + \"]\"\r\n directory = os.path.join(os.getcwd(), \"test/\")\r\n lambda_ = \"0\"\r\n data_type = [\"float\"]\r\n data_vars = [\"attribute0\", \"attribute1\"]\r\n assert csv_to_json_(directory, lambda_, data_type, data_vars).replace(' ', '').replace('\\n', '') == json_string", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def test_csv(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(answer_file_path), 'r') as answer_file:\n csv_file = open(attach_path(input_file_path))\n assert str(read_csv(csv_file)) == answer_file.read().strip()", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def get_data(self, csv_file):\n pass", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('Downloading data set from DC Open data')\n\n with open(input_filepath, 'r') as f:\n parking_violations = json.load(f)\n\n for fullname, csv in parking_violations.items():\n download_file = csv + '.csv'\n local_filename = '_'.join(name.lower() for name in fullname.split() ) + '.csv'\n local_filename = os.path.join(output_filepath, local_filename)\n if not os.path.isfile(local_filename):\n time.sleep(5)\n r = requests.get(download_file)\n if not b'\"status\":\"Processing\",\"generating\":{}' in r.content:\n with open(local_filename, 'wb') as f:\n f.write(r.content)\n logger.info(local_filename)\n else:\n logger.warning('Cannot download {0}'.format(local_filename))", "def loadCSV(input_file):", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)", "def test_parse(self, tmpdir):\n json_file = tmpdir.join(\"f.json\")\n obj = {\"ds\": [{\"file\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}]}\n with open(str(json_file), \"w\") as f:\n json.dump(obj, f)\n\n csv_file = tmpdir.join(\"f.csv\")\n csv_file.write(\"\\n\".join([\n \",\".join(HEADER_ROW),\n \"ds,1,url,title,yes,no,{}\".format(str(json_file))\n ]))\n\n expected = {\n \"ds\": {\n \"generate_aggregation\": True,\n \"include_in_wms\": False,\n \"tech_note_title\": \"title\",\n \"tech_note_url\": \"url\",\n \"files\": [\n {\"path\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}\n ]\n }\n }\n\n s = StringIO()\n sys.stdout = s\n parse_file(str(csv_file))\n sys.stdout = sys.__stdout__\n\n output_json = s.getvalue()\n try:\n parsed = json.loads(output_json)\n except ValueError:\n assert False, \"parse_file() produced invalid JSON\"\n\n assert parsed == expected", "def init_csv(input_path, config_file, quiet):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')", "def initCSV(self, makeFile, overWrite):\n self.initialized = True\n\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n if os.path.exists(str(self.fileName)):\n\n f = open(str(self.fileName), \"r\")\n\n if not f.read():\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n else:\n if overWrite == True:\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n if overWrite == False:\n raise OSError(\"csv file is not empty!\")\n\n else:\n if makeFile == True:\n f = open(str(self.fileName), \"w\")\n \n f.close()\n else:\n raise OSError(\"csv file not found!\")", "def file_setup(outfile):\n\n extant_objids = []\n\n if os.path.exists(outfile):\n print('This file exists.')\n try:\n extant_objids = np.array(pd.read_csv(outfile)['objid']).tolist()\n except:\n print('And nonstandard!')\n # Raise an exception?\n return False\n else:\n # Initialize the file with a header\n with open(outfile, 'wb') as csvfile:\n cols = ['objid', 'flat_counts', 'mcat_bg', 'bg_counts',\n 'flux_bgsub_err', 'cps_mcatbgsub', 'counts',\n 'mag_mcatbgsub', 'cps_err', 'mag_bgsub', 'cps_bgsub',\n 'detys', 'flux_bgsub', 'flux_err', 'mag_err_1',\n 'cps_bgsub_err', 't1_data', 'bg', 'responses', 't_mean',\n 'cps_mcatbgsub_err', 'mag_bgsub_err_1', 'mag_err_2',\n 't0_data', 'racent', 'deccent', 'mag', 'exptime',\n 'bg_flat_counts', 'detxs', 't0', 't1',\n 'mag_mcatbgsub_err_2', 'flux', 'mag_mcatbgsub_err_1',\n 'flags', 'mag_bgsub_err_2', 'detrad', 'cps',\n 'flux_mcatbgsub_err', 'flux_mcatbgsub', 'mcat_expt', 'ra',\n 'dec', 'aper4', 'aper4_err', 'mcat_bg',\n 'aper7', 'aper7_err']\n\n spreadsheet = csv.writer(csvfile, delimiter=',', quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n spreadsheet.writerow(cols)\n\n return extant_objids", "def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)", "def __openAndInitCSVFile(self, modelInfo):\n # Get the base path and figure out the path of the report file.\n basePath = self.__outputDirAbsPath\n\n # Form the name of the output csv file that will contain all the results\n reportCSVName = \"%s_Report.csv\" % (self.__outputLabel,)\n reportCSVPath = self.__reportCSVPath = os.path.join(basePath, reportCSVName)\n\n # If a report CSV file already exists, back it up\n backupCSVPath = None\n if os.path.exists(reportCSVPath):\n backupCSVPath = self.__backupCSVPath = _backupFile(reportCSVPath)\n\n\n # Open report file\n if self.__replaceReport:\n mode = \"w\"\n else:\n mode = \"a\"\n csv = self.__csvFileObj = open(reportCSVPath, mode)\n\n # If we are appending, add some blank line separators\n if not self.__replaceReport and backupCSVPath:\n print >> csv\n print >> csv\n\n # Print the column names\n print >> csv, \"jobID, \",\n print >> csv, \"modelID, \",\n print >> csv, \"status, \" ,\n print >> csv, \"completionReason, \",\n print >> csv, \"startTime, \",\n print >> csv, \"endTime, \",\n print >> csv, \"runtime(s), \" ,\n print >> csv, \"expDesc, \",\n print >> csv, \"numRecords, \",\n\n for key in self.__sortedVariableNames:\n print >> csv, \"%s, \" % key,\n for key in self.__sortedMetricsKeys:\n print >> csv, \"%s, \" % key,\n print >> csv", "def convert_to_json(dict_to_convert, csv_file):\n json_file = csv_to_json(csv_file)\n\n with open(json_file, 'w') as file:\n json.dump(dict_to_convert, file)\n\n logging.info('JSON file written with heart rate metrics')\n return 0", "def test_json_file(self):\n #response = os.system(\"python3 client.py -f filename.csv\")\n response = client.result(False, 'json', 'unittest',file = 'test_file.csv')\n response = json.loads(response)\n first_name = response['person'][0]['first_name']\n self.assertEqual(first_name,'John','Should print John')\n length = len(response['person'])\n for count in range(0,length):\n self.assertNotIn('nationality',response['person'][count], 'Nationality should not be present')", "def test_findCSV(self,\n filename=\"page-views.csv\",\n input_folder='../../input/raw-data/'):\n\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = input_folder + filename\n self.assertEqual(csv_file, expected_output)", "def test_repo_to_json(self):\n if path.exists(\"report.csv\"):\n remove(\"report.csv\")\n\n with open(\"test/fixtures/results.json\", 'r') as data:\n data = json.load(data)\n client = CSVReport()\n client.process(data)\n\n self.assertTrue(path.exists(\"report.csv\"))", "def loop_csv(input_csv_path, output_csv_path):\n counter = 0\n with open(input_csv_path, 'rb') as read_csvfile:\n projectsreader = csv.DictReader(\n read_csvfile, delimiter=',', quotechar='\"')\n\n with open(output_csv_path, 'w') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl', 'foundProjectUrl1',\n 'foundProjectUrl2', 'foundProjectUrl3',\n 'foundProjectUrl4', 'foundProjectUrl5',\n 'foundProjectUrl6', 'foundProjectUrl7',\n 'foundProjectUrl8', 'foundProjectUrl9',\n 'foundProjectUrl10']\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n # writer.writeheader() # this method only available at python 2.7\n for row in projectsreader:\n if counter == 100:\n time.sleep(86400) # sleep 1 day\n counter = 0\n\n res = query_google_cse(\n row['acronym'] + \" \" + row['title'] +\n \" project -site:cordis.europa.eu -site:ec.europa.eu\")\n\n # save response to file\n with open('responses_gcse.json', 'w') as outfile:\n json.dump(res, outfile)\n\n # a query response may not have 10 results, so we have to check\n # for that\n results = []\n result_size = res['queries']['request'][0]['totalResults']\n\n print \"INFO: RESULT SIZE %s\" % result_size\n for i in range(10):\n if i < int(result_size):\n results.append(res['items'][i]['link'])\n else:\n results.append('')\n\n # print \"Control Print: \" + res['items'][0]['link']\n print \"INFO: First Result: \" + results[0]\n writer.writerow({\n 'acronym': row['acronym'],\n 'title': row['title'],\n 'projectUrl': row['projectUrl'],\n 'foundProjectUrl1': results[0],\n 'foundProjectUrl2': results[1],\n 'foundProjectUrl3': results[2],\n 'foundProjectUrl4': results[3],\n 'foundProjectUrl5': results[4],\n 'foundProjectUrl6': results[5],\n 'foundProjectUrl7': results[6],\n 'foundProjectUrl8': results[7],\n 'foundProjectUrl9': results[8],\n 'foundProjectUrl10': results[9],\n })\n sys.stdout.flush()\n time.sleep(2)\n counter += 1", "def _setup_output_file(self):\n\n columns = [\"Hero file\",\n \"Test type\",\n \"Name of tested entry\",\n \"Misc dice sum input\",\n \"Value of tested entry\",\n \"Modifier\",\n \"Values of related attributes\",\n \"Rolls\",\n \"Result\",\n \"Description\",\n \"Timestamp\",\n \"Type of dice input\"]\n\n # if file does not exist, add first row of column names\n if not os.path.isfile(self._result_csv):\n with open(self._result_csv, \"w\", encoding=\"utf-8\") as csv_file:\n file_writer = csv.writer(csv_file, delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n file_writer.writerow(columns)\n return True\n return False", "def _load_single_file(self, table_name, manifest_row, csv_reader,\n temp_filepath):\n # get database interface and it's equivalent manifest row\n sql_interface = self._configure_db_interface(\n manifest_row=manifest_row, temp_filepath=temp_filepath)\n\n sql_manifest_row = sql_interface.get_sql_manifest_row()\n\n cleaner = self._get_cleaner(table_name=table_name,\n manifest_row=manifest_row)\n csv_writer = CSVWriter(meta=self.meta,\n manifest_row=manifest_row,\n filename=temp_filepath)\n\n # clean the file and save the output to a local pipe-delimited file\n # if it doesn't have a 'loaded' status in the database manifest\n if csv_reader.should_file_be_loaded(sql_manifest_row=sql_manifest_row):\n print(\" Cleaning...\")\n meta_only_fields = self._get_meta_only_fields(\n table_name=table_name, data_fields=csv_reader.keys)\n for idx, data_row in enumerate(csv_reader):\n data_row.update(meta_only_fields) # insert other field dict\n clean_data_row = cleaner.clean(data_row, idx)\n if clean_data_row is not None:\n csv_writer.write(clean_data_row)\n\n csv_writer.close()\n\n # write the data to the database\n self._update_database(sql_interface=sql_interface)\n\n if not self._keep_temp_files:\n csv_writer.remove_file()", "def create_metadata_shell_for_csv(csv_file_path: str) -> str:\n metadata_file = f\"{csv_file_path}-metadata.json\"\n if path.exists(metadata_file):\n raise Exception(f\"Metadata file {metadata_file} already exists.\")\n if not path.exists(csv_file_path):\n raise Exception(f\"CSV file {csv_file_path} does not exist.\")\n\n label = map_file_path_to_label(csv_file_path)\n concept_scheme_uri = generate_concept_scheme_root_uri(label)\n\n # Just inserting basic structure at this point as already exists in standard files. Additional metadata will be\n # added as the script continues to run.\n metadata = {\n \"@context\": \"http://www.w3.org/ns/csvw\",\n \"@id\": concept_scheme_uri,\n \"url\": csv_file_path,\n \"rdfs:label\": label,\n \"dc:title\": label,\n \"tableSchema\": {\n \"columns\": [],\n },\n \"prov:hadDerivation\": {\n \"@id\": concept_scheme_uri,\n \"@type\": [\n \"skos:ConceptScheme\",\n f\"{pmdcat_base_uri}DatasetContents\"\n ]\n }\n }\n\n table_schema: Dict = metadata[\"tableSchema\"]\n columns: List[Dict] = table_schema[\"columns\"]\n\n with open(csv_file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\", quotechar=\"\\\"\")\n column_names: List[str] = next(reader)\n\n for column_name in column_names:\n column = generate_schema_for_column(column_name, concept_scheme_uri)\n columns.append(column)\n\n columns.append({\n \"virtual\": True,\n \"propertyUrl\": \"rdf:type\",\n \"valueUrl\": \"skos:Concept\"\n })\n columns.append({\n \"virtual\": True,\n \"propertyUrl\": \"skos:inScheme\",\n \"valueUrl\": concept_scheme_uri\n })\n\n if \"notation\" in [c.lower() for c in column_names]:\n override(table_schema, {\n \"primaryKey\": \"notation\",\n \"aboutUrl\": concept_scheme_uri + \"/{notation}\"\n })\n else:\n print(\"WARNING: could not determine primary key. As a result, `aboutUrl` property is not specified and \" +\n \"so each row will not have a true URI. This is basically required. Manual configuration required.\")\n\n with open(metadata_file, 'w+') as file:\n file.write(json.dumps(metadata, indent=4))\n\n return str(metadata_file)", "def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def convert(csv_filepath, output_format):\n valid_data, invalid_data = [], []\n\n logger.info('Started processing the csv.')\n\n with csv_filepath.open(newline='', encoding='utf-8') as f:\n csv_data = (row for row in csv.reader(f) if row)\n next(csv_data) # skip the header rows\n\n for row in csv_data:\n hotel = Hotel.from_row(row)\n if hotel.is_valid():\n valid_data.append(hotel.as_dict())\n else:\n invalid_data.append(hotel)\n\n processor = FORMAT_PROCESSORS[output_format]['processor']\n ext = FORMAT_PROCESSORS[output_format]['ext']\n\n output_filepath = csv_filepath.parent / 'output.{}'.format(ext)\n processor(valid_data, output_filepath)\n\n # TODO implement a way to report errors and invalid data\n logger.info('Finish processing the csv. {} hotels converted.'.format(\n len(valid_data)))", "def test_to_csv_with_no_rows_returns_none(self):\n output = row_handling.to_csv(rows=[], csv_path=self.csv_path)\n assert output is None", "def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def test_csv(self, mock_upload_method):\n\n args = argparse.Namespace(\n request_id=\"test_id\",\n expression_manifest_key=EXPRESSION_MANIFEST,\n cell_metadata_manifest_key=CELL_MANIFEST,\n gene_metadata_manifest_key=GENE_MANIFEST,\n target_path=\"test.csv.zip\",\n format=\"csv\",\n working_dir=\".\")\n\n with mock.patch(\"matrix.docker.matrix_converter.RequestTracker\") as mock_request_tracker, \\\n mock.patch(\"os.remove\"):\n matrix_converter = MatrixConverter(args)\n matrix_converter.FS = s3fs.S3FileSystem(anon=True)\n\n mock_request_tracker.return_value.creation_date = \"1983-10-11T000000.00Z\"\n\n matrix_converter.run()\n\n with zipfile.ZipFile(\"test.csv.zip\") as csv_output:\n\n # Check the components of the zip file\n members = csv_output.namelist()\n self.assertIn(\"test.csv/expression.csv\", members)\n self.assertIn(\"test.csv/genes.csv\", members)\n self.assertIn(\"test.csv/cells.csv\", members)\n self.assertEqual(len(members), 3)\n\n # Read in the expression data\n csv_expression = {}\n for row in csv.DictReader(io.StringIO(\n csv_output.read(\"test.csv/expression.csv\").decode())):\n\n csv_expression[row[\"cellkey\"]] = {}\n for gene, exprvalue in row.items():\n if gene == \"cellkey\" or exprvalue == '0':\n continue\n csv_expression[row[\"cellkey\"]][gene] = float(exprvalue)\n\n # Check it against the direct expression data\n self.assertListEqual(list(csv_expression.keys()),\n list(self.direct_expression.keys()))\n\n for cellkey in csv_expression:\n csv_dict = csv_expression[cellkey]\n direct_dict = self.direct_expression[cellkey]\n self.assertListEqual(list(csv_dict.keys()), list(direct_dict.keys()))\n\n for gene in csv_dict:\n self.assertAlmostEqual(csv_dict[gene], direct_dict[gene], places=0)\n\n del csv_expression\n\n csv_cells = {}\n for row in csv.DictReader(io.StringIO(csv_output.read(\"test.csv/cells.csv\").decode())):\n csv_cells[row[\"cellkey\"]] = row\n self.assertListEqual(list(csv_cells.keys()), list(self.direct_cell.keys()))\n for cellkey in csv_cells:\n self.assertListEqual(list(csv_cells[cellkey].values()),\n list(self.direct_cell[cellkey].values()))", "def csv_to_json(file_obj: Path):\n if not file_obj.is_file():\n raise FileNotFoundError\n\n with open(file_obj) as f:\n csv_file = list(csv.reader(f))\n return json.dumps(csv_file)", "def read_csv_file(self):\n pass", "def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df", "def get_args():\n\n parser = argparse.ArgumentParser(\n \"Create json file from csv by infering json'structure using a delimiter inside csv's columns.\"\n )\n parser.add_argument(\"--csv\", type=str, help='Set path to csv file as input')\n parser.add_argument(\"--json\", type=str, help='Set path to json file as output')\n parser.add_argument(\"--delimiter\", type=str, default='_', help='Set delimiter used to infer json\\'s structure (default=\\'_\\')')\n parser.add_argument(\"--config\", type=str, default=None, help='Set path to json file containing data type information and or default value(default=\\'None\\', optional and precise column type)') \n parser.add_argument(\"--cols_delimiter\", type=str, default=',', help='Set delimiter of the csv (default=\\',\\')')\n parser.add_argument(\"--max_docs\", type=int, default=-1, help='Set max number of documents in a json file, several will be created if necessary (default=\\'-1\\' means single output file)') \n parser.add_argument(\"--per_line\", action='store_true', default=False, help='Dump a file containing one json per line. Careful the output is not a correct json (default=\\'False\\')')\n parser.add_argument(\"--infer_types\", action='store_true', default=False, help='Infer data type based on its value: float, list and date are supported. Carefull, \\'config\\' will override it if specified. (default=\\'False\\')') \n parser.add_argument(\"--keep\", action='store_true', default=False, help='Keep fields with empty values replaced by null instead of ignoring them (default=\\'True\\')') \n args = parser.parse_args()\n return args", "def analyse(self, csv_reader, json_input, csv_writer):\n csv_writer.writeheader()\n\n new_rows_count = 1\n\n while True:\n try:\n row = csv_reader.next()\n except StopIteration:\n break\n\n new_row = dict()\n\n try:\n transaction_type = row['Detalii tranzactie']\n\n if transaction_type == 'Cumparare POS':\n # Data ,Detalii tranzactie ,Debit ,Credit\n # 02 decembrie 2015 ,Cumparare POS ,\"246,33\" ,\n # ,Nr.card:42XXX3965 , ,\n # ,Terminal:AUCHAN , ,\n # ,Data:29-11-2015 Autorizare: 495514 , ,\n\n new_row['id'] = new_rows_count\n new_row['transaction'] = transaction_type\n new_row['amount'] = INGBankStatementParser.obtain_amount(row['Debit'])\n\n new_row['details'] = csv_reader.next()['Detalii tranzactie']\n\n row = csv_reader.next()\n new_row['partner'] = INGBankStatementParser.obtain_partner(row['Detalii tranzactie'])\n new_row['details'] += ' ' + row['Detalii tranzactie']\n\n row = csv_reader.next()\n new_row['timestamp'] = INGBankStatementParser.obtain_date(row['Detalii tranzactie']) + ' 10:0' + str(new_rows_count % 10) + 'AM'\n new_row['details'] += ' ' + row['Detalii tranzactie']\n\n else:\n self.logger.warning('Unknown transaction type, ignoring row: ' + str(row.items()))\n\n except Exception as ex:\n self.logger.warning('Exception caught: ' + str(ex))\n self.logger.warning('Exception around row: ' + str(row.items()))\n self.logger.warning('Ignoring partial composed new row: after row: ' + str(new_row.items()))\n\n if new_row.items():\n self.logger.debug('Adding new row: ' + str(new_row.items()))\n csv_writer.writerow(new_row)\n new_rows_count += 1\n\n return json_input", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n df = create_data_frame(input_filepath)\n process_columns(df)\n logger.info(df.head())\n df.to_csv(output_filepath, index=False)", "def get_output_json(self, case_path):\r\n if not os.path.exists(case_path):\r\n logging.ERROR('the path of source files does not exist')\r\n else:\r\n self.case_path = os.path.abspath(case_path)\r\n self.case_json = os.path.join(self.case_path, 'output.json')\r\n self.case_image = os.path.join(self.case_path, 'images')\r\n self.num_name = os.path.abspath(self.case_path).split(sep='\\\\')[-2]\r\n self.chi_name = IdToChinese[self.num_name]\r\n\r\n with io.open(self.case_json, 'r', encoding='utf-8') as f:\r\n json_data = json.load(f)\r\n self.audioResult = json_data['data']['audioResult']\r\n self.docs = self.audioResult['docs']\r\n self.classify_four_w= self.audioResult['4W']\r\n self.approval_information = self.audioResult['approval_information']\r\n return True", "def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def read_csv():\n global csvdata\n global CONFIG\n if type(csvdata) == type(None):\n if not os.path.exists(CONFIG[\"csvfile\"]):\n csvdata = pandas.read_csv(CONFIG[\"csvrepo\"],\n na_values=[\"-999999\",\"NOT AVAILABLE\"])\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n csvdata.to_csv(CONFIG[\"csvfile\"])\n else:\n csvdata = pandas.read_csv(CONFIG[\"csvfile\"])\n return csvdata", "def get_concatenated_csv_data(concatenated_filepath, concatenated_filename, device_id, output_create_files_filepath, output_create_files_filename):\n\n # Create the full file name of the concatenated filename.\n concatenated_file = concatenated_filepath + \"/\" + concatenated_filename + \"_concatenated.csv\"\n print(\"Looking for concatenated file name: \", concatenated_file)\n\n # Test if the concatenated file exists and if it does, return it.\n if os.path.isfile(concatenated_file):\n print(\"Concatenated file exists: \", concatenated_file)\n return concatenated_file\n\n # If it does not exist, test if the individual files exist.\n elif not os.path.isfile(concatenated_file):\n print(\"Concatenated file does not exist. Create file: \", concatenated_file)\n file_list = get_data_from_files(concatenated_filepath, concatenated_filename)\n # print(\"File list:\", file_list)\n\n # If the individual files exist, create the concatenated file.\n if len(file_list) > 0:\n print(\"Individual csv files exist. Creating the concatenated file.\")\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file\n\n # If the individual files do not exist, get the data from the database, create the files then concatenate them.\n else:\n database_query = \"select * from ship_data_gpggagpsfix where device_id=\" + int(\n device_id) + \" order by date_time;\"\n # print(database_query)\n password = input()\n\n db_connection = MySQLdb.connect(host='localhost', user='ace', passwd=password, db='ace2016', port=3306);\n\n track_df = get_data_from_database(database_query, db_connection)\n track_df = string_to_datetime(track_df)\n\n # Output the data into daily files (as they do not already exist).\n output_daily_files(track_df, output_create_files_filepath, output_create_files_filename)\n\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file", "def main():\n\n # Load arguments\n args = get_args()\n \n assert os.path.exists(args.csv), ' [ERR] File' + os.path.exists(args.csv) +'does not exist'\n\n print(args)\n try:\n dir_name = os.path.dirname(args.json)\n os.mkdir(dir_name)\n print(' [INFO] Creating', dir_name, 'directory')\n except:\n print(' [INFO] Directory', dir_name, 'already exists. Data will be replaced')\n pass\n\n if args.config:\n assert os.path.exists(args.config), ' [ERR] File' + os.path.exists(args.config) +'does not exist'\n dic_types = read_config(args.config)\n else:\n dic_types = {}\n \n # Create json\n create_json_from_csv(args.csv, args.delimiter, args.cols_delimiter, args.keep, dic_types, args.infer_types, args.max_docs, args.json, args.per_line)\n\n return 0", "def get_csv(\n self,\n csv_name: str,\n csv_directory: Optional[str] = None,\n csv_output_name: Optional[str] = None,\n graph_type: Optional[str] = \"instance\",\n graph_id: Optional[str] = \"main\",\n ):\n self._check_connection()\n options = {}\n if csv_directory is None:\n csv_directory = os.getcwd()\n if csv_output_name is None:\n csv_output_name = csv_name\n options[\"name\"] = csv_name\n\n result = self._dispatch(\n \"get\",\n self._csv_url(graph_type, graph_id),\n options,\n )\n\n stream = open(f\"{csv_directory}/{csv_output_name}\", \"w\")\n stream.write(result)\n stream.close()", "def seed_from_csv_diff(original_file_path, new_file_path, model, **kwargs):\n\n original_diff_set = set()\n new_diff_set = set()\n new_file = open(new_file_path, 'r')\n headers = new_file.readline().replace('\\n', '').split(',')\n new_reader = model.update_set_filter(csv.reader(new_file), headers)\n\n original_file = open(original_file_path, 'r')\n original_reader = csv.reader(original_file)\n next(original_reader, None)\n logger.debug(\" * Beginning CSV diff process.\")\n\n for row in new_reader:\n new_diff_set.add(json.dumps(row))\n\n for row in original_reader:\n original_diff_set.add(json.dumps(row))\n\n diff = new_diff_set - original_diff_set\n temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(\n 'set_diff' + str(random.randint(1, 10000000))) + '.mock' if settings.TESTING else '.csv')\n with open(temp_file_path, 'w') as temp_file:\n writer = csv.writer(temp_file, delimiter=',')\n writer.writerow(headers)\n for row in diff:\n writer.writerow(json.loads(row))\n\n diff_gen = from_csv_file_to_gen(temp_file_path, kwargs['update'])\n logger.debug(\" * Csv diff completed, beginning batch upsert.\")\n batch_upsert_from_gen(model, diff_gen, settings.BATCH_SIZE, **kwargs)\n if os.path.isfile(temp_file_path):\n os.remove(temp_file_path)\n if 'callback' in kwargs and kwargs['callback']:\n kwargs['callback']()", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def test_to_csv_with_valid_rows(self, mock_open):\n row_handling.to_csv(rows=self.rows, csv_path=self.csv_path)\n open.assert_called_with(self.csv_path, 'w')", "def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)", "def main(args):\n \n args_are_valid, input_filepath, output_filepath, base_url, message = handle_arguments(args)\n if not args_are_valid:\n return print(message)\n \n with open(input_filepath, newline=\"\") as input_csv:\n csvreader = csv.reader(input_csv, delimiter=\",\",)\n\n needed_input_columns = [\"Account ID\",\"First Name\", \"Created On\"]\n needed_output_columns = [\"Account ID\",\"First Name\", \"Created On\", \"Status\", \"Status Set On\"]\n headers = next(csvreader) #grab first row as headers\n if not set(needed_input_columns).issubset(headers):\n print('ERROR - input csv must contain columns [\"Account ID\",\"First Name\", \"Created On\"] as headers')\n\n with open(output_filepath, mode = \"w\", newline = \"\") as output_csv:\n csvwriter = csv.DictWriter(output_csv, fieldnames = needed_output_columns)\n csvwriter.writeheader()\n\n index_of = {}\n for index,header in enumerate(headers):\n index_of[header] = index\n write_dict = {}\n\n #Loop through inputfile\n for row in csvreader:\n still_valid = True\n if len(row) != len(headers):\n message = \"ERROR - csv row has incomplete data\"\n still_valid = False\n if still_valid:\n # extract data from row, columns can be in any order\n for column in needed_input_columns:\n write_dict[column] = row[index_of[column]]\n still_valid, write_dict, message = verify_and_clean_input(write_dict)\n if still_valid:\n write_dict, message = extend(write_dict, query(write_dict[\"Account ID\"], base_url))\n #only write to csv if all input data valid, query data nulled out if invalid\n csvwriter.writerow(write_dict) \n print(message)\n\n output_csv.close() \n input_csv.close()", "def importFile(self):\n\n ## Backing up old CSV and JSON files before beginning import operations\n if os.path.isfile(\"text_files/customers.csv\") and os.path.isfile(\"text_files/customers.json\"):\n print(\"\\nCreating a backup of the existing customer .csv and .json files before overwriting\")\n shutil.copy2(\"text_files/customers.csv\", \"text_files/customers.csv.backup\" + str(time.time()))\n shutil.copy2(\"text_files/customers.json\", \"text_files/customers.json.backup\" + str(time.time()))\n\n ## Importing the text file for cleaning then converting to CSV\n input_file = open(\"text_files/customer_export.txt\", \"r\")\n output_file = open(\"text_files/customers.csv\", \"w\")\n\n ## A loop to clean and write the customer_export txt file to a CSV\n for line in input_file:\n clean_text = \"\"\n check_line = line.replace(\"#\", \"\").replace(\",,\",\"\").split(\"|\")\n for line in check_line:\n if line != check_line[10]:\n clean_text += line + \",\"\n elif line == check_line[10]:\n clean_text += line + \"\\n\"\n output_file.write(clean_text)\n\n ## Closing TXT file and CSV file after formatting\n input_file.close()\n output_file.close()\n\n ## Opening the cleaned CSV file for conversion to Json\n with open('text_files/customers.csv') as clean_csv:\n ## Converting CSV file to Json\n converted = csv.DictReader(clean_csv)\n rows = list(converted)\n\n ## Writing converted CSV to Json file\n with open('text_files/customers.json', 'w') as convert:\n json.dump(rows, convert)\n\n ## Deleting all data currently in database before importing new file\n db_connection.executeQuery(\"DELETE FROM CRM;DBCC CHECKIDENT ('CRM', RESEED, 0) DELETE FROM Mailings; DBCC CHECKIDENT ('Mailings', RESEED, 0) COMMIT\") \n\n ## Loading the newly created Json file\n with open(\"text_files/customers.json\") as customers_json:\n customers = json.load(customers_json)\n\n ## A loop to add the contents of the Json file to the database \n print(\"Writing imported file to database please wait...\")\n for key in customers:\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"address\"] + \"', '\" + key[\"city\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"county\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"state\"] + \"', '\" + str(key[\"zip\"]) + \"', '\" + key[\"phone1\"] + \"', '\" + key[\"phone2\"] + \"' , '\" + key[\"email\"] + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \" \" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"','\" + key[\"address\"] + \" \" + key[\"city\"] + \" \" + key[\"county\"] + \" \" + key[\"state\"] + \" \" + str(key[\"zip\"]) + \"'); COMMIT\") \n\n print(\"\\nFinished writing to file. Returning to main menu...\")", "def read_csv():", "def dataset_constructor_csv_file_upload(request):\n if request.method == \"POST\":\n relation_support_dataset = request.FILES['csv_file']\n handle_uploaded_file(relation_support_dataset, 'temp/cntr_csv_file.csv')\n df = pd.read_csv('temp/cntr_csv_file.csv')\n ind = {}\n data = []\n for i, row in df.iterrows():\n if row['reldescription'] not in ind:\n data.append({'name':row['reldescription'], 'examples':[]})\n ind[row['reldescription']] = len(data) - 1\n data[ind[row['reldescription']]]['examples'].append({'head':row['head'], 'tail':row['tail'], 'sentence':row['sentence']})\n return HttpResponse(\n json.dumps({'num_rels':len(data), 'num_exs':len(data[0]['examples']), 'data':data}),\n content_type=\"application/json\"\n )", "def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):\n\n for key in header_csv:\n key_struct = key.split(delimiter)\n if key in dic_types.keys():\n # if no value indicated set to default\n if row[key] == '' and 'default' in dic_types[key].keys():\n row[key] = dic_types[key]['default']\n else:\n try:\n # Cast to indicated type\n row[key] = dic_types[key]['type'](row[key]) \n except:\n print(\" [WARN] Can not parse \", row[key] , \"to type\", dic_types[key]['type'])\n jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))\n \n return jstruct", "def test_blank_column(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def test_read_object(self):\n\n # the expected dataframe\n result_expected = pd.read_csv(\n StringIO(self.test_csv_content), usecols=[\"col1\"])\n # mock upload csv to s3\n self.bucket.put_object(\n Body=self.test_csv_content, Key=self.test_csv_key)\n result = self.src_bucket_connector.read_object(\n self.test_csv_key, columns=[\"col1\"])\n self.assertTrue(result.equals(result_expected))", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def regenerate_json_file():\n \n # filepaths used in generating json data...\n dir_export = f\"C:\\\\Users\\\\{os.getlogin()}\\\\Downloads\"\n list_name = 'my-favourite-films.csv'\n json_output_filename = 'reviews_web_data.json'\n path_to_rootreducer = f'D:\\\\Programming-Projects\\\\nathansteele\\\\src\\\\reducers\\\\RootReducer.js';\n path_to_json_output = f'D:\\\\Programming-Projects\\\\nathansteele\\\\src\\\\components\\\\films\\\\{json_output_filename}'\n \n # 1) find letterboxd zip export...\n prefix = 'letterboxd'\n postfix = 'utc.zip'\n zip_file_name = \"\"\n download_folder = sorted(glob.glob(f\"{dir_export}/*.zip\"), key=os.path.getmtime)\n download_folder.reverse() # reversing the list means most recent downloads are first in the list...\n for filepath in download_folder:\n filename = filepath.split('\\\\')[-1]\n if filename.split('-')[0] == prefix and filename.split('-')[-1] == postfix:\n zip_file_name = filename\n print(f\"Found the zip export! -> {zip_file_name}\")\n break\n else:\n continue\n\n path_to_zip_file = f\"{dir_export}\\\\{zip_file_name}\" # path to zip file we downlodaded from letterboxd...\n path_to_extracted_zip = f\"{dir_export}\\\\{zip_file_name[:-4]}\" # path to extracted zip (which we are about to do)...\n\n # 2) extract zip automatically (if not done already)...\n if os.path.exists(path=path_to_zip_file):\n if not os.path.exists(path=path_to_extracted_zip):\n with zipfile.ZipFile(file=path_to_zip_file , mode='r') as z:\n z.extractall(path_to_extracted_zip)\n print(f\"Extract complete! -> {path_to_extracted_zip}\")\n else:\n print(f'Zip file already exists... No need to extract again... -> ({path_to_extracted_zip})')\n else:\n print(f'Zip file could not be found... -> ({path_to_zip_file})')\n\n\n # local version of letterboxd data list\n letterboxd_list = []\n \n # 3) locate the csv file of interest\n if os.path.exists(path_to_extracted_zip): \n # open csv file...\n path_to_list = f'{path_to_extracted_zip}\\\\lists\\\\{list_name}'\n with open(path_to_list, 'r') as f:\n # csv reader object\n csv_reader = csv.reader(f)\n \n # skip header in the csv file...\n rows_to_skip = 5\n for i in range(0, rows_to_skip):\n next(csv_reader)\n \n # 4) iterate over each row (i.e. each film) in csv file...\n for csv_row in csv_reader:\n pos = csv_row[0]\n title = csv_row[1]\n year = csv_row[2]\n letterboxd_url = csv_row[3]\n letterboxd_film_id = letterboxd_url.split('/')[-1]\n genres = []\n directors = []\n duration = 0\n language = \"\"\n imdb_url = \"\"\n imdb_avg_rating = \"\"\n imdb_num_votes = \"\"\n tmdb_url = \"\"\n poster_url = \"\"\n \n # parse data from letterboxd web page... (this takes ~1.21s)\n page = session.get(url=letterboxd_url, verify=False, stream=True)\n soup = BeautifulSoup(page.content, 'html.parser')\n \n # retrieve list of genres...\n div_genres = soup.select_one('#tab-genres')\n for link in div_genres.select('div.text-sluglist a.text-slug[href]'):\n if 'genre' in link['href']:\n genre = link.text.capitalize()\n if genre == 'Science-fiction':\n genre = 'Sci-fi'\n genres.append(genre)\n\n # retrieve duration\n textfooter = soup.select_one('section.col-main > p.text-footer').text\n if 'mins' in textfooter:\n minstext = textfooter.find('mins', 0, len(textfooter))\n duration = textfooter[minstext-5:minstext].strip()\n \n # retrieve the first spoken language\n div_details = soup.select_one('#tab-details')\n for link in div_details.select('div.text-sluglist a.text-slug[href]'):\n if 'language' in link['href']:\n language = link.text.capitalize()\n break\n \n # retrieve IMDB url\n footer = soup.select_one('section.col-main p.text-footer')\n url_buttons = footer.select('a.micro-button')\n for url in url_buttons:\n if 'imdb' in url.text.lower():\n # get IMDB URL\n imdb_url = url['href']\n elif 'tmdb' in url.text.lower():\n # get TMDB URL\n tmdb_url = url['href']\n if len(url_buttons) == 1:\n # if there's only 1 url_button, it's usually the IMDB link that's missing...\n if soup.select_one('#featured-film-header > h1').text == 'xxxHOLiC':\n imdb_url = 'https://www.imdb.com/title/tt16233104/'\n\n # retrieve avg rating on letterboxd\n # page_content_str = page.content.decode('utf-8')\n # letterboxd_avg_rating = soup.select_one('.average-rating')\n\n # Use the OMDB API (https://www.omdbapi.com/) to retrieve some additional data from IMDB website...\n omdb_api_key = '4af56bed' # apparently this expires after 1000 days (today is 07/02/2023, 1000 days from now is 03/11/2025... holy shit!)\n imdb_film_id = imdb_url.split('/')[-2]\n url = f'http://www.omdbapi.com/?i={imdb_film_id}&apikey={omdb_api_key}'\n imdb = requests.get(url=url, verify=False)\n imdb_json = json.loads(imdb.text)\n\n # retrieve avg IMD rating\n imdb_avg_rating = imdb_json['imdbRating']\n \n # retrieve number of votes on IMDB (we remove the comma, because react is fucking stupid and cant sort numbers when they contain commas)\n imdb_num_votes = imdb_json['imdbVotes'].replace(',', '')\n \n # retrieve poster from IMDB\n poster_url = imdb_json['Poster']\n if 'SX300' in poster_url:\n poster_url = poster_url.split('SX300')[0]\n \n # retrieve title from IMDB (because letterboxd parsing is shite!)\n # title = imdb_json['Title']\n \n # retrieve list of directors (because letterboxd parsing is shite!)\n directors = imdb_json['Director'].split(', ')\n \n # id of my review!\n titlev2 = Helpers.simplify_movie_title(title=title)\n review_id = f\"{titlev2}-{letterboxd_film_id}-review\"\n \n \"\"\"\n # iterate over \\\\img\\\\films\\\\\n screenshots = []\n directory_in_str = f'D:\\\\Programming-Projects\\\\nathansteele\\\\src\\\\img\\\\films';\n for subdir, dirs, files in os.walk(directory_in_str):\n for file in files:\n # find the right directory...\n title_from_directory_storing_screenshots = subdir.split('\\\\')[-1]\n titlev3 = Helpers.simplify_movie_title(title=title_from_directory_storing_screenshots)\n if titlev3 == titlev2:\n # get my screenshots...\n if 'screenshot' in file:\n screenshot = os.path.join(subdir, file)\n screenshots.append(screenshot)\n \n # use my custom poster if I put one there...\n if 'custom_poster' in file:\n poster_url = os.path.join(subdir, file)\n \"\"\"\n \n # DEBUGGING....\n print(f\" > {pos}: {title}\")\n #print(f' > Title = ({title})')\n #print(f' > IMDB url = ({imdb_url})')\n #print(f' > Language = ({language})')\n #print(f' > Duration = ({duration})')\n #print(f' > Genres = ({genres})')\n #print(f' > IMDB avg rating = ({imdb_avg_rating})')\n #print(f' > IMDB num votes = ({imdb_num_votes})')\n #print(f' > Poster = ({poster_url})')\n #print(f' > Directors = ({directors})')\n #print(f' > Tags = ({my_tags})')\n \n # append to list...\n letterboxd_list.append({\n 'letterboxdFilmId': letterboxd_film_id,\n 'imdbFilmId': imdb_film_id,\n 'letterboxdUrl': letterboxd_url,\n 'imdbUrl': imdb_url,\n 'posterUrl': poster_url,\n 'position': pos,\n 'title': title,\n 'year': year,\n 'duration': duration,\n 'language': language,\n 'imdbAvgRating': imdb_avg_rating,\n 'imdbNumVotes': imdb_num_votes,\n 'tmdbUrl': tmdb_url,\n 'directors': directors,\n 'genres': genres,\n 'reviewId': review_id\n })\n \n # clear json file first...\n open(path_to_json_output, 'w').close()\n \n # write new content...\n with open(path_to_json_output, 'w') as f:\n json.dump(letterboxd_list, f, indent=4)\n \n print(f'JSON export finished! -> {path_to_json_output}')", "def populate(self):\n\n self.create_index()\n self.check_type()\n self.create_mapping()\n\n f = open(self.csv_file, 'rU')\n\n # Read the first line for all the headers\n headers = f.readline().split(',')\n\n # Read the rest of the document\n rows = f.readlines()\n added_counter = 0\n\n actions = []\n for row in rows:\n fields = row.split(',')\n obj = {}\n for header in headers:\n # we call lower-case here because we were originally using\n # analyzed strings in elasticsearch (and they were\n # automatically converted). Code was built based on that so it's\n # easiest to convert for now\n try:\n obj[header.replace('\\n', '')] = float(fields[\n headers.index(header)].replace('\\n', '').lower())\n except ValueError:\n obj[header.replace('\\n', '')] = fields[\n headers.index(header)].replace('\\n', '').lower()\n # check afterwards to replace empty strings with None (which json.dumps hopefully writes to null)\n if obj[header.replace('\\n', '')] == '':\n obj[header.replace('\\n', '')] = None\n try:\n item = {\n '_index': self.es_main_index,\n '_type': self.es_main_type,\n '_source': obj\n }\n\n actions.append(item)\n\n added_counter += 1\n print('%s new records added' % added_counter,\n end='\\r')\n sys.stdout.flush()\n\n if added_counter % self.chunk_size == 0:\n helpers.bulk(self.es, actions)\n actions = []\n\n except ConnectionError:\n print('There was a connection error. Check your Elastic' +\n ' Search setting and make sure Elastic Search is ' +\n 'running.')\n return False\n\n # add the remaining items\n if actions:\n helpers.bulk(self.es, actions)\n\n print('The update is completed. %s new records were added.' %\n added_counter)", "def import_from_csv(self) -> None:\n logging.info('import_from_csv')\n if self.target_table and str(self.target_table).lower() in [\"issue\", \"version\"]:\n if self.file_path and exists(self.file_path):\n # Read CSV file\n csv_data = pd.read_csv(self.file_path).to_dict('records')\n\n # Import Version\n if str(self.target_table).capitalize() == \"Version\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Version).delete()\n click.echo('Overwrite Version table')\n\n for version in csv_data:\n if all(item in list(version.keys()) for item in ['tag', 'start_date', 'end_date']):\n newVersion=Version(\n project_id=version['project_id'],\n name=version[\"name\"], \n tag=version[\"tag\"], \n start_date=datetime.strptime(version[\"start_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n end_date=datetime.strptime(version[\"end_date\"], '%Y-%m-%d %H:%M:%S.%f'), \n )\n \n try:\n self.session.add(newVersion)\n compute_version_metrics(self.session, self.configuration.current_branch, newVersion.project_id)\n click.echo('Importing ' + str(len(csv_data)) + ' version(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields')\n\n # Import Issue\n if str(self.target_table).capitalize() == \"Issue\":\n # Overwrite option\n if self.overwrite:\n self.session.query(Issue).delete()\n click.echo('Overwrite Issue table')\n\n for issue in csv_data:\n if all(item in list(issue.keys()) for item in ['number', 'created_at', 'updated_at']):\n newIssue=Issue(\n project_id=issue['project_id'],\n number=issue[\"number\"],\n title=issue[\"title\"],\n created_at=datetime.strptime(issue[\"created_at\"], '%Y-%m-%d %H:%M:%S.%f'),\n updated_at=datetime.strptime(issue[\"updated_at\"], '%Y-%m-%d %H:%M:%S.%f'))\n\n try:\n self.session.add(newIssue)\n click.echo('Importing ' + str(len(csv_data)) + ' issue(s) on database')\n except Exception:\n logging.error(Exception)\n else:\n logging.error(\"CSV file no contain minimal mandatory fields\")\n sys.exit('CSV file no contain minimal mandatory fields') \n\n self.session.commit()\n else:\n logging.error('File not found')\n sys.exit('File not found')\n else:\n logging.error('Target table not found')\n sys.exit('Target table not found')", "def main(input_filepath, output_filepath):\n # return processed data and save in the output files\n in_data_y, y_output, in_data = make_data_set(input_filepath)\n in_data_y.to_csv(output_filepath)\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n return in_data_y, y_output, in_data", "def init_csv_file(self):\n folder = \"/home/pi/data/\" + datetime.now().strftime(\"%Y_%m_%d\") + \"/\"\n if not os.path.isdir(folder):\n # append 'a' to the folder name until we find a name that does not exist\n while os.path.exists(folder):\n folder = folder[:-1] + \"a\" + \"/\"\n os.mkdir(folder)\n filename = folder + 'particledata_' + datetime.now().strftime (\"%H-%M-%S\") \n while os.path.exists(filename):\n filename = filename + '_a'\n filename += '.csv'\n log.info('Writing data to: ' + filename)\n self.file = open(filename, \"w\")\n self.file.write('Unix Time;Human Readable Time;pm 2.5;pm 10;Has Fix;Longitude;Latitude;Altitude;GPS Unix Time\\n')\n self.file.flush()\n self.synced_time = False", "def test_read_objects(self):\n\n date1 = \"2021-09-17\"\n date2 = \"2021-09-16\"\n key1 = f\"{date1}.csv\"\n key2 = f\"{date2}.csv\"\n csv_content1 = \"\"\"col1,col2\n valA,valB\n \"\"\"\n csv_content2 = \"\"\"col1,col2\n valC,valD\n \"\"\"\n # the expected dataframe\n df1 = pd.read_csv(StringIO(csv_content1))\n df2 = pd.read_csv(StringIO(csv_content2))\n csv_expected = (pd.concat([df2, df1])).to_csv(index=False)\n # mock upload csv to s3\n self.bucket.put_object(Body=csv_content1, Key=key1)\n self.bucket.put_object(Body=csv_content2, Key=key2)\n csv_result = (self.src_bucket_connector.read_objects(\n \"2021-09-17\", \"all\")).to_csv(index=False)\n self.assertEqual(csv_expected, csv_result)", "def csvfileUsage(self):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.DictReader(file_obj, delimiter=',') # CSV DictReader object\n \"\"\" reader.fieldnames returns header , slicing intial 'Month' and\n 'Year' header from list\n \"\"\"\n for com_names in reader.fieldnames[2:]:\n self.company_data[com_names] = {}\n # iterating each row\n for row in reader:\n month, year = self.parse_my(row) # parsing the year and month from row\n # pop the `Month` and `Year` Key to minimize iteration below\n row.pop('Month'), row.pop('Year')\n \"\"\" saving and updating the data at same point of time\n each iteration time, checking the max value and updating \n `Month` `Year` and `Value`\n \"\"\"\n self.prepare_company_data(month, year, row, self.company_data)\n file_obj.close() # close file\n return self.company_data", "def csv_maker(ctx, output_file):\n ### Plan\n\n ### Configuration\n # Check if campaign_info is not None\n ## If not None\n ### Process the data\n ## Else:\n ### Get data\n ### Process the data\n\n #### Get the data\n # Authenticate to the GoPhish server\n ## Capture auth failures\n # Request campaign data\n # Parse returned data into buckets\n ## Capture bad campaign data\n\n\n if ctx.campaign_info is None: # Command is not chained together, get our own data\n gophish_inst = GoPhish(ctx.api_key, ctx.host, ctx.port, verify=False)\n\n campaign_info = gophish_inst.get_campaigns(ctx.campaign_number)\n\n ctx.campaign_info = campaign_info\n else:\n campaign_info = ctx.campaign_info\n\n # Dict of final values per email\n final_email_dict = dict()\n\n headers = ['Email Address', 'Time Clicked', 'Credentials Harvested', 'Reported', 'Replied to Email', 'Notes']\n\n\n\n for i in campaign_info['timeline']:\n if i['message'] != 'Campaign Created': # and len(i['details']) > 0:\n row = build_row(i)\n # Update file dictionary\n final_email_dict[row['Email Address']] = row\n\n with open(output_file, 'w') as f:\n writer = csv.DictWriter(f, headers)\n writer.writeheader()\n for email in final_email_dict:\n writer.writerow(final_email_dict[email])", "def test_content_file(self):\n\n url=[\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/car/car.data\",\n \"http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\"]\n cwd=os.getcwd()\n list_of_files=requester.batch_url_to_csv(url, fnames=[\"m1\", \"m2\",\"m3\"])\n total_rows=0\n reader_list=[]\n for j in range(len(list_of_files)):\n reader=csv.DictReader(list_of_files[j])\n for rows in reader:\n total_rows+=1\n reader_list.append(total_rows)\n\n unique=set((reader_list))\n if len(unique)!=len(reader_list):\n with self.assertRaises(AssertionError):\n requester.batch_url_to_csv(url,fnames=['m1','m2','m3'])", "def write_csv(invocations, job_information, out_file, null_string =\"NA\"):\n\n\t# assume every invocation of a task of a certain type takes the same number of input files\n\tnum_input_files = len(job_information[invocations[0]]['input_files'])\n\t#file_attributes = [\"input_file_%s_kb\"%i for i in range(1, num_input_files + 1)]\n\tfile_attributes = [\"host_name\", \"input_file_sum_kb\"]\n\tusage_attributes = ['utime', 'stime', 'maxrss', 'nvcsw', 'nivcsw', 'nswap', 'minflt', ] # 'majflt', 'inblock', 'outblock', 'nsignals', 'msgsnd', 'msgrcv', 'nswap'\n\tload_attributes = [\"min1\", \"min5\", \"min15\"]\n\tprocs_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\", \"vmsize\", \"rss\"]\n\ttask_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\",]\n\tram_attributes = [\"total\", \"free\", \"shared\", \"buffer\",]\n\tswap_attributes = [\"total\", \"free\",]\n\tmachine_attributes_headers = load_attributes + list(map(lambda a: \"procs_\"+a, procs_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"task_\"+a, task_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"ram_\"+a, ram_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"swap_\"+a, swap_attributes))\n\n\t# the csv column labels\n\theader = ['run_goup', 'run', 'transformation', 'mainjob_started', \"duration\"] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n#\theader = ['workflow','transformation', 'mainjob_started'] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n\n\twith open(out_file, 'w', newline='') as csvfile:\n\n\t\tspamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tspamwriter.writerow(header)\n\n\t\tfor job_info in [job_information[job_id] for job_id in invocations]:\n\n\t\t\tfile_sizes = [float(file['size']) for file in job_info['input_files']]\n\t\t\tusage_values = [float(job_info['usage'][attr]) for attr in usage_attributes]\n#\n\t\t\ttry:\n\t\t\t\tout_size = sum([float(file['size']) for file in job_info['output_files']])\n\t\t\texcept KeyError as k:\n\t\t\t\tout_size = null_string\n#\n\t\t\tpeak_mem = float(job_info['usage']['maxrss'])\n\t\t\tmachine_values = []\n\n\t\t\tfor machine_attrs, attrs in [(\"load\", load_attributes), (\"procs\", procs_attributes), (\"task\", task_attributes), (\"ram\", ram_attributes), (\"swap\", swap_attributes)]:\n\t\t\t\tfor attr in attrs:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmachine_values.append(job_info[machine_attrs][attr])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tmachine_values.append(null_string)\n\n#\t\t\tdata = [job_info[\"workflow\"], job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tdata = [job_information[\"run_group\"], job_information[\"run\"], job_info[\"transformation\"], job_info['mainjob_started_ts'], job_info[\"mainjob_duration\"]] + [job_info['host_name']] + [sum(file_sizes)] + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n#\t\t\tdata = [job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tspamwriter.writerow(data)", "def run_create_hyper_file_from_csv():\n if args.preprocessed:\n print('running on 4 columns')\n else:\n print('running on 16 columns')\n\n load_time = -1\n query_time = -1\n tstart = time.time()\n path_to_database = Path(\"lineitem.hyper\")\n\n # Optional process parameters.\n # They are documented in the Tableau Hyper documentation, chapter \"Process Settings\"\n # (https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/processsettings.html).\n process_parameters = {\n # Limits the number of Hyper event log files to two.\n #\"log_file_max_count\": \"2\",\n # Limits the size of Hyper event log files to 100 megabytes.\n #\"log_file_size_limit\": \"100M\"\n \"soft_concurrent_query_thread_limit\" : \"16\",\n \"hard_concurrent_query_thread_limit\" : \"16\",\n \"memory_limit\" : \"100g\"\n }\n\n # single threaded?\n if args.single_threaded:\n process_parameters[\"soft_concurrent_query_thread_limit\"] = \"1\"\n process_parameters[\"hard_concurrent_query_thread_limit\"] = \"1\"\n\n result = None\n\n # Starts the Hyper Process with telemetry enabled to send data to Tableau.\n # To opt out, simply set telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU.\n with HyperProcess(telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU,\n parameters=process_parameters) as hyper:\n\n # Optional connection parameters.\n # They are documented in the Tableau Hyper documentation, chapter \"Connection Settings\"\n # (https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/connectionsettings.html).\n connection_parameters = {\"lc_time\": \"en_US\"}\n\n # Creates new Hyper file \"customer.hyper\".\n # Replaces file with CreateMode.CREATE_AND_REPLACE if it already exists.\n with Connection(endpoint=hyper.endpoint,\n database=path_to_database,\n create_mode=CreateMode.CREATE_AND_REPLACE,\n parameters=connection_parameters) as connection:\n\n table_name = ''\n if args.preprocessed:\n connection.catalog.create_table(table_definition=lineitem_table_preprocessed)\n table_name = lineitem_table_preprocessed.table_name\n else:\n connection.catalog.create_table(table_definition=lineitem_table)\n table_name = lineitem_table.table_name\n\n # Using path to current file, create a path that locates CSV file packaged with these examples.\n path_to_csv = args.data_path\n\n # Load all rows into \"Lineitem\" table from the CSV file.\n # `execute_command` executes a SQL statement and returns the impacted row count.\n count_in_lineitem_table = connection.execute_command(\n command=f\"COPY {table_name} from {escape_string_literal(path_to_csv)} with \"\n f\"(format csv, NULL 'NULL', delimiter '|')\")\n\n print(f\"The number of rows in table {lineitem_table.table_name} is {count_in_lineitem_table}.\")\n load_time = time.time() - tstart\n print('Loading CSV to Hyper took {}s'.format(load_time))\n tstart = time.time()\n # issue query\n # here, TPC-H Q6\n # SELECT\n # sum(l_extendedprice * l_discount) as revenue\n # FROM\n # lineitem\n # WHERE\n # l_shipdate >= date '1994-01-01'\n # AND l_shipdate < date '1994-01-01' + interval '1' year\n # AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n # AND l_quantity < 24;\n\n q = ''\n if args.preprocessed:\n q = f\"\"\"SELECT\n sum(l_extendedprice * l_discount) as revenue\nFROM\n {table_name}\nWHERE\n l_shipdate >= 19940101\n AND l_shipdate < 19950101\n AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n AND l_quantity < 24\"\"\"\n else:\n q = f\"\"\"SELECT\n sum(l_extendedprice * l_discount) as revenue\nFROM\n {table_name}\nWHERE\n l_shipdate >= date '1994-01-01'\n AND l_shipdate < date '1994-01-01' + interval '1' year\n AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n AND l_quantity < 24\"\"\"\n\n result = connection.execute_list_query(query=q)\n query_time = time.time() - tstart\n print('Query took {}s'.format(query_time))\n print('Result::')\n print(result)\n \n print(\"The connection to the Hyper file has been closed.\")\n print(\"The Hyper process has been shut down.\")\n print('framework,version,load,query,result\\n{},{},{},{},{}'.format('hyper',hyperversion,load_time, query_time, str(result)))", "def make(filename):\r\n\r\n # Source file is csv file\r\n extension = \".csv\"\r\n if filename.endswith(extension):\r\n return JSONFromCSV(re.sub((extension + \"$\"), \"\", filename))\r\n\r\n return None", "def test_read_results(self, setup_folder_structure, config_handler):\n modelrun = \"energy_transport_baseline\"\n model = \"energy_demand\"\n decision_iteration = 1\n output = \"electricity_demand\"\n timestep = 2020\n output_spec = Spec(\n name=\"electricity_demand\",\n unit=\"MWh\",\n dtype=\"float\",\n dims=[\"region\", \"interval\"],\n coords={\"region\": [\"oxford\"], \"interval\": [1]},\n )\n\n expected_data = np.array([[2.0]])\n expected = DataArray(output_spec, expected_data)\n csv_contents = \"region,interval,electricity_demand\\noxford,1,2.0\\n\"\n\n path = os.path.join(\n str(setup_folder_structure),\n \"results\",\n modelrun,\n model,\n \"decision_{}\".format(decision_iteration),\n \"output_{}_timestep_{}\".format(output, timestep),\n )\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n with open(path + \".csv\", \"w\") as fh:\n fh.write(csv_contents)\n actual = config_handler.read_results(\n modelrun, model, output_spec, timestep, decision_iteration\n )\n assert actual == expected", "def convert_to_json(self, csvname):\n csvfile = open(csvname, 'r')\n self.__obtain_csv_delimiter__(csvfile)\n self.__obtain_csv_fieldnames__(csvfile)\n data = self.__obtain_data_from_csv__(csvfile)\n jsons = self.__convert_data_to_list_of_dict__(data)\n list_of_jsons = self.__transform_list_to_jsons__(jsons)\n csvfile.close()\n return list_of_jsons", "def main():\n try:\n kerbals_csv = pd.read_csv(\"kerbals.csv\")\n except FileNotFoundError:\n print(\"Kerbals csv file not found in current directory!\")\n sys.exit(1)\n kerbals_csv.to_json(\"kerbals.json\", orient=\"records\")\n kerbals_json = open(\"kerbals.json\")\n print(kerbals_json.read())\n return 0", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def csv_to_json(file_path: Path) -> dict:\n\n output = {}\n\n with open(file_path, \"r\", ) as f:\n columns = get_columns_from_csv(file_path)\n reader = csv.DictReader(f, fieldnames=columns)\n next(reader, None)\n for row in reader:\n powerplan = row[\"DESCRIPTION\"]\n\n if powerplan not in output and powerplan:\n output[powerplan] = {\n k: v\n for k, v in row.items()\n if not k.startswith(\"PHASE\") and not k.startswith(\"DOT\")\n }\n\n output[powerplan][\"phases\"] = {}\n\n phase = row[\"PHASE_DESCRIPTION\"]\n\n if phase not in output[powerplan][\"phases\"] and phase:\n output[powerplan][\"phases\"][phase] = {\n k: v for k, v in row.items() if k.startswith(\"PHASE\")\n }\n\n output[powerplan][\"phases\"][phase][\"dots\"] = {}\n\n dot = row[\"DOT_DESCRIPTION\"]\n\n if phase:\n if dot not in output[powerplan][\"phases\"][phase][\"dots\"] and dot:\n output[powerplan][\"phases\"][phase][\"dots\"][dot] = {\n k: v for k, v in row.items() if k.startswith(\"DOT\")\n }\n return output", "def csv2json(csvfile, jsonfile=None):\n if not hasattr(csvfile, \"read\"):\n csvfile = open(csvfile, \"r\")\n if (jsonfile is not None) and (not hasattr(jsonfile, \"write\")):\n jsonfile = open(jsonfile, \"w\")\n csvdata = list(csv.reader(csvfile))\n fieldnames = csvdata[0]\n # use 'OrderedDict' to keep fields order\n jsondata = [ OrderedDict(zip(fieldnames, row)) for row in csvdata[1:] ]\n csvfile.close()\n if jsonfile is None:\n return jsondata\n else:\n # 'ensure_ascii=False' to support UTF-8\n json.dump(jsondata, jsonfile, ensure_ascii=False, indent=4)\n jsonfile.close()", "def convert_csv_to_alfed(self) -> None:\n global output_path, file_name\n self.parse_command_line_args()\n self.validate_command_line_args()\n\n for _, _, files in walk(self.args.input):\n for output_file in files:\n if output_file.endswith(\".csv\"):\n file_name, _ = path.splitext(output_file)\n output_path = \"\"\n output_path = path.join(self.args.output, file_name)\n\n try:\n mkdir(output_path)\n print(f\"Creating folder {output_path}...\")\n except OSError:\n print(f\"Creation of directory {output_path} failed\")\n\n with open(path.join(self.args.input, output_file), \"rt\") as csv_file:\n reader = DictReader(csv_file, fieldnames=self.args.fieldorder)\n\n for row in reader:\n uid = str(uuid.uuid1()).upper()\n row[\"content\"] = self.replace_embedded_snipptes(row[\"content\"], self.args.lplaceholder,\n self.args.rplaceholder, self.args.changeplaceholders)\n output = dumps(\n {\n \"alfredsnippet\": {\n \"snippet\": row['content'],\n \"uid\": uid,\n \"name\": row['name'],\n \"keyword\": row['abbreviation']\n }\n },\n sort_keys=False, indent=4,\n separators=(',', ': ')\n )\n\n output_file = f\"{row['name']}_[{uid}].json\"\n target = path.join(output_path, output_file)\n f = open(target, \"w\")\n f.write(output)\n f.close()\n print(f\"Writing file {target}...\")\n else:\n self.error_msg(\"The files in the input folder are not with extension '*.csv'\")\n\n subprocess.call(\n [\n 'ditto',\n '--norsrc',\n '-ck',\n output_path,\n self.args.output + \"/\" + file_name + \".alfredsnippets\"\n ]\n )\n print(f\"{self.args.output}/{file_name}.alfredsnippets was created\")\n self.created_folders.append(file_name)\n\n self.remove_temp_folders()", "def generate_ev_file(id_test):\n print(\"generate_ev_file\")\n \n ev_output_file_name=id_test+\".ev\"\n ev_input_file_name=id_test+\"_events.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+ev_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+ev_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"campaign\"):\n print(\"First field of events file input should be 'campaing' but is:\"+field_names_list[0])\n print(\"Cannot generate event file\")\n return\n else:\n print(field_names_list)\n for line in input_reader:\n #generate timestamp for campaign\n #campania = int(int(ms)/100)+int(ss)*10+int(mm)*600+int(hh)*36000\n campaign = int(line[0])\n ms = (campaign*100)%1000\n ss = ((campaign*100)//1000)%60\n mm = ((campaign*100)//60000)%60\n hh = ((campaign*100)//360000)\n timeFormat = \"{:0>2d}\"\n msFormat = \"{:0>3d}\"\n timestamp_begin_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms))\n timestamp_end_event = str(timeFormat.format(hh))+\":\"+ \\\n str(timeFormat.format(mm))+\":\"+ \\\n str(timeFormat.format(ss))+\":\"+ \\\n str(msFormat.format(ms+1))\n \n print(\"timestamp generated: \"+timestamp_begin_event)\n \n #generate events\n #begin events\n \n \n port_idx =0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n begin_event=CELL_DEVS_EXTERNAL_EVENT_BEGIN+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n \n f_output.write(timestamp_begin_event+\" \"+begin_event+\"\\n\")\n \n #end events\n port_idx=0\n for event_port in line[1:]:\n port_idx=port_idx+1\n #print(\"processing port: \"+str(field_names_list[port_idx]))\n end_event=CELL_DEVS_EXTERNAL_EVENT_ENDS+ \\\n field_names_list[port_idx]+ \\\n \" \"+str(line[port_idx])\n f_output.write(timestamp_end_event+\" \"+end_event+\"\\n\")\n \n \n \n f_input.close()\n f_output.close()", "def clean_file(csv_file):\n my_list = []\n with open(csv_file, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar=\" \")\n for row in file_reader:\n my_list.append(row)\n\n \"\"\"\n > Part Two\n Input: Nested list csv_table and a string file_name\n Action: Write fields in csv_table into a comma-separated CSV file with the name file_name\n Mutates output: Yes\n \"\"\"\n with open(csv_file, 'w', newline='') as csvfile:\n my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n for row in my_list:\n row2 = []\n for item in row:\n a = item.lstrip('\"')\n b = a.rstrip('\"')\n row2.append(b)\n my_csv_writer.writerow(row2)", "def test_37_bulk_csv_import_no_column_names(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,Baz\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n task = db.session.query(Task).first()\r\n assert {u'Bar': u'2', u'Foo': u'1', u'Baz': u'3'} == task.info\r\n assert \"1 Task imported successfully!\" in res.data", "def process_csv(self, user: User, csv_file):\n self.db_session.rollback()\n csv = pandas.read_csv(StringIO(csv_file.read().decode('utf-8')))\n missing_cols = [col_name for col_name in CSV_SENSOR_MAP.values() if col_name not in csv.columns.values]\n if missing_cols:\n raise OBDControllerError(f'CSV is missing the following columns: {\", \".join(missing_cols)}')\n\n csv = csv[CSV_SENSOR_MAP.values()]\n start_datetime = self._resolve_date_from_csv_row(csv.iloc[0])\n gen_session_id = str(start_datetime.timestamp()).replace('.', '')[:12]\n\n if self.db_session.query(OBDSession).filter(OBDSession.id == gen_session_id).first():\n return\n\n session = OBDSession.create(self.db_session, id=gen_session_id, user_id=user.id, date=start_datetime)\n _ = CarState.create_from_csv(self.db_session, session, csv)\n self.db_session.commit()", "def test_process(self, tmp_path):\n export_dir = tmp_path / 'export'\n export_dir.mkdir()\n\n process(['raw_data/small_raw_data_0.csv',\n 'raw_data/small_raw_data_1.csv',\n # File 2 does not exist.\n 'raw_data/small_raw_data_2.csv',\n 'raw_data/small_raw_data_3.csv'],\n SIGNALS,\n ['median_home_dwell_time',\n 'completely_home_prop_7dav'],\n ['state'],\n export_dir)\n\n expected = {\n 'wip_median_home_dwell_time': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [6, 3.5],\n 'se': [None, 0.5],\n 'sample_size': [1, 2]\n }),\n 'completely_home_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.15, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'part_time_work_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.35, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'full_time_work_prop': pd.DataFrame(data={\n 'geo_id': ['al', 'ga'],\n 'val': [0.45, 0.055],\n 'se': [None, 0.005],\n 'sample_size': [1, 2]\n }),\n 'median_home_dwell_time_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [4.5, 3.5, 7.5],\n 'se': [1.5, 0.5, 0.5],\n 'sample_size': [2, 2, 2]\n }),\n 'wip_completely_home_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.1, 0.055, 0.15],\n 'se': [0.05, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n }),\n 'part_time_work_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.25, 0.055, 0.25],\n 'se': [0.1, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n }),\n 'full_time_work_prop_7dav': pd.DataFrame(data={\n 'geo_id': ['al', 'ga', 'pa'],\n 'val': [0.35, 0.055, 0.35],\n 'se': [0.1, 0.005, 0.05],\n 'sample_size': [2, 2, 2]\n })\n }\n actual = {signal: pd.read_csv(\n export_dir / f'20200612_state_{signal}.csv')\n for signal in expected}\n for signal in expected:\n pd.testing.assert_frame_equal(expected[signal], actual[signal])", "def process_csv_data(file_for_processing: FileForProcessing):\n \n if file_for_processing.file_to_process.os_type == ANDROID_API:\n # Do fixes for Android\n if file_for_processing.data_type == ANDROID_LOG_FILE:\n file_for_processing.file_contents = fix_app_log_file(\n file_for_processing.file_contents, file_for_processing.file_to_process.s3_file_path\n )\n \n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n if file_for_processing.data_type != ACCELEROMETER:\n # If the data is not accelerometer data, convert the generator to a list.\n # For accelerometer data, the data is massive and so we don't want it all\n # in memory at once.\n csv_rows_list = list(csv_rows_list)\n \n if file_for_processing.data_type == CALL_LOG:\n header = fix_call_log_csv(header, csv_rows_list)\n if file_for_processing.data_type == WIFI:\n header = fix_wifi_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n else:\n # Do fixes for iOS\n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n \n if file_for_processing.data_type != ACCELEROMETER:\n csv_rows_list = list(csv_rows_list)\n \n # Memory saving measure: this data is now stored in its entirety in csv_rows_list\n file_for_processing.clear_file_content()\n \n # Do these fixes for data whether from Android or iOS\n if file_for_processing.data_type == IDENTIFIERS:\n header = fix_identifier_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n if file_for_processing.data_type == SURVEY_TIMINGS:\n header = fix_survey_timings(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n \n header = b\",\".join([column_name.strip() for column_name in header.split(b\",\")])\n if csv_rows_list:\n return (\n # return item 1: the data as a defaultdict\n binify_csv_rows(\n csv_rows_list,\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n ),\n # return item 2: the tuple that we use as a key for the defaultdict\n (\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n )\n )\n else:\n return None, None", "def ProcessCSV(self, input_file, verbose, output_file):\n row_dict = self.CSVReader(input_file)\n report = []\n output_field_names = row_dict.fieldnames\n output_field_names.append('status')\n\n for row in row_dict:\n if 'action' in row.keys():\n if row['action'] == 'create':\n self.CreateUser(row)\n elif row['action'] == 'update':\n self.UpdateUser(row)\n elif row['action'] == 'delete':\n self.DeleteUser(row)\n else:\n row['status'] = ('Error: action must be create, update, or delete for'\n ' username %s' % (row['user_name']))\n else:\n print 'error - action is a required header in the input CSV file'\n sys.exit()\n\n # delete password attribute so we dont output that to screen or CSV\n if 'password' in row.keys():\n del row['password']\n report.append(row)\n if verbose is True:\n output = []\n list_tup = ()\n for k, v in row.items():\n if v:\n list_tup = (k, v)\n output.append(list_tup)\n print output\n self.OutputWriter(report, output_file, output_field_names)", "def test_36_bulk_csv_import_dup_header(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,Foo\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n msg = \"The file you uploaded has two headers with the same name\"\r\n assert msg in res.data" ]
[ "0.711705", "0.6895903", "0.6818029", "0.6596224", "0.6538705", "0.6498996", "0.647413", "0.64032245", "0.632765", "0.63111657", "0.6258295", "0.62576884", "0.62437224", "0.6213409", "0.6196973", "0.6165735", "0.6092725", "0.6078405", "0.607367", "0.60391676", "0.6012014", "0.600576", "0.59839934", "0.59822106", "0.59682006", "0.59545165", "0.59063864", "0.5858894", "0.5857819", "0.58520776", "0.58309704", "0.58301955", "0.58204997", "0.5819329", "0.58058804", "0.5795173", "0.5788776", "0.5776339", "0.57595223", "0.57526", "0.57452226", "0.5734116", "0.5723442", "0.5707255", "0.5702719", "0.56891984", "0.5680097", "0.56797516", "0.5674679", "0.56656325", "0.56612134", "0.5650292", "0.56455576", "0.5631241", "0.56285626", "0.56281734", "0.5620955", "0.5613821", "0.55952907", "0.5594669", "0.5590521", "0.5587496", "0.5575328", "0.5574406", "0.55547774", "0.554526", "0.55336106", "0.55269235", "0.552468", "0.552468", "0.5524523", "0.5503814", "0.5503781", "0.55037504", "0.55001986", "0.54875827", "0.5482757", "0.54816085", "0.5477978", "0.5472631", "0.54630435", "0.54453665", "0.54444623", "0.54416823", "0.5433523", "0.54331785", "0.5433052", "0.5430957", "0.54243374", "0.54231185", "0.5422861", "0.54210377", "0.5420514", "0.5409784", "0.540862", "0.5395122", "0.5390123", "0.538975", "0.53895426", "0.5388964" ]
0.7492845
0
Description When is given a csv_filepath and output_filepath and already exists the file Expected Result concatenate the old json file with the values found in 2nd reading.
def test_when_file_already_exist(self): # Create a temporary directory for test files temp_dir = ["test_files/observed", "test_files/forecast", "test_files/output"] for dir in temp_dir: os.makedirs(dir, exist_ok=True) # Create the 1st csv file first_csv_filepath = os.path.join(temp_dir[0], "Abadia-BA_-11.56_-37.52.csv") with open(first_csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Creating the 2nd csv file in different directory second_csv_filepath = os.path.join(temp_dir[1], "Abadia-BA_-11.56_-37.52.csv") with open(second_csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", "max_temperature"]) writer.writerow(["2023-01-01", "5", "25", "30"]) writer.writerow(["2023-01-02", "10", "23", "28"]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir[2], "BA_Abadia.json") # Call the function under test extractor.csv_to_json(first_csv_filepath, temp_dir[2]) extractor.csv_to_json(second_csv_filepath, temp_dir[2]) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] }, "forecast": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"], "max_temperature": ["30", "28"] }, } # Assertion assert json_data == expected_data # Clean up the temporary directory and files os.remove(first_csv_filepath) os.remove(second_csv_filepath) os.remove(expected_output_filepath) for dir in temp_dir: os.rmdir(dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def get_concatenated_csv_data(concatenated_filepath, concatenated_filename, device_id, output_create_files_filepath, output_create_files_filename):\n\n # Create the full file name of the concatenated filename.\n concatenated_file = concatenated_filepath + \"/\" + concatenated_filename + \"_concatenated.csv\"\n print(\"Looking for concatenated file name: \", concatenated_file)\n\n # Test if the concatenated file exists and if it does, return it.\n if os.path.isfile(concatenated_file):\n print(\"Concatenated file exists: \", concatenated_file)\n return concatenated_file\n\n # If it does not exist, test if the individual files exist.\n elif not os.path.isfile(concatenated_file):\n print(\"Concatenated file does not exist. Create file: \", concatenated_file)\n file_list = get_data_from_files(concatenated_filepath, concatenated_filename)\n # print(\"File list:\", file_list)\n\n # If the individual files exist, create the concatenated file.\n if len(file_list) > 0:\n print(\"Individual csv files exist. Creating the concatenated file.\")\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file\n\n # If the individual files do not exist, get the data from the database, create the files then concatenate them.\n else:\n database_query = \"select * from ship_data_gpggagpsfix where device_id=\" + int(\n device_id) + \" order by date_time;\"\n # print(database_query)\n password = input()\n\n db_connection = MySQLdb.connect(host='localhost', user='ace', passwd=password, db='ace2016', port=3306);\n\n track_df = get_data_from_database(database_query, db_connection)\n track_df = string_to_datetime(track_df)\n\n # Output the data into daily files (as they do not already exist).\n output_daily_files(track_df, output_create_files_filepath, output_create_files_filename)\n\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file", "def combine(self, input_file, output_file):\n \n csvOutput = self.csvData.readCsv(input_file)\n \n # Extract csv column names\n columnsNmes = csvOutput[0]\n \n response = []\n \n # Remove first row from csv output (columns names)\n iterCsvOutput = iter(csvOutput)\n next(iterCsvOutput)\n # Get all api data with one call\n apiResponse = self.apiData.getContent(self.apiUrl)\n\n # Iterate over Csv lines\n for item in iterCsvOutput: \n\n # For each CSV line find corresponding account in Api response\n for apiItem in apiResponse['results']:\n\n # I Api response match Csv row combine data\n if str(apiItem['account_id']) == item[0]: \n # Add response form Api\n item.insert(4, apiItem['status'])\n item.insert(5, apiItem['created_on'])\n response.append(item)\n \n # Add row with new column names\n columnsNmes.insert(4, 'Status')\n columnsNmes.insert(5, 'Status Set On')\n response.insert(0, columnsNmes)\n\n # Generate csv output file\n self.csvData.writeCsv(response, output_file)\n \n return 'Given output file has been generated'", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def merge_csv_daily(output_filename, path):\n\n # import csv files from folder\n allFiles = glob.glob(path + \"*.csv\")\n\n with open(output_filename, 'wb+') as outfile:\n for i, fname in enumerate(allFiles):\n with open(fname, 'rb') as infile:\n if i != 0:\n infile.readline() # Throw away header on all but first file\n # Block copy rest of file from input to output without parsing\n shutil.copyfileobj(infile, outfile)\n # print(fname + \" has been imported.\")\n\n # adding MissingObs column back:\n df = pd.read_csv(output_filename, header=0, sep=',', index_col=[0,1], parse_dates=False)\n df.insert(loc=3, column='MissingObs', value=np.zeros((df.shape[0], )))\n df.to_csv(output_filename, sep=',')\n\n return output_filename", "def process_files_json():\n # chdir into beep root\n pwd = os.getcwd()\n os.chdir(os.environ.get(\"BEEP_ROOT\", \"/\"))\n\n meta_list = list(filter(lambda x: '_Metadata.csv' in x, os.listdir(SRC_DIR)))\n file_list = list(filter(lambda x: '.csv' in x if x not in meta_list else None, os.listdir(SRC_DIR)))\n all_list = list(filter(lambda x: '.csv' in x, os.listdir(SRC_DIR)))\n\n all_list = sorted(all_list)\n dumpfn(all_list, \"all_files.json\")\n\n [file_id, mapdf] = init_map(PROJECT_NAME, DEST_DIR)\n\n new_file_index = file_id\n\n for filename in tqdm(sorted(file_list)):\n # If the file has already been renamed another entry should not be made\n if mapdf['filename'].str.contains(filename).sum() > 0:\n continue\n old_file = os.path.join(SRC_DIR, filename)\n new_path = os.path.join(DEST_DIR, PROJECT_NAME)\n shutil.copy(old_file, new_path) # copy main data file\n shutil.copy(old_file.replace(\".csv\", '_Metadata.csv'), new_path) # copy meta data file\n\n if PROJECT_NAME == 'FastCharge':\n [date, channel_no, strname, protocol] = get_parameters_fastcharge(filename, SRC_DIR)\n elif PROJECT_NAME == 'ClosedLoopOED':\n [date, channel_no, strname, protocol] = get_parameters_oed(filename, SRC_DIR)\n else:\n raise ValueError(\"Unsupported PROJECT_NAME: {}\".format(PROJECT_NAME))\n\n df_dup = mapdf.set_index(['protocol', 'date'])\n if (protocol, date) in df_dup.index:\n row = mapdf[(mapdf['protocol'] == protocol) & (mapdf['date'] == date)]\n file_id = row['fid'].iloc[0]\n protocol = row['protocol'].iloc[0]\n date = row['date'].iloc[0]\n strname = row['strname'].iloc[0]\n else:\n file_id = new_file_index\n new_file_index = new_file_index + 1\n\n new_name = \"{}_{}_{}\".format(PROJECT_NAME, f'{file_id:06}', channel_no)\n new_file = os.path.join(DEST_DIR, PROJECT_NAME, \"{}.csv\".format(new_name))\n\n new_row = pd.DataFrame([[file_id, protocol, channel_no, date, strname,\n os.path.abspath(old_file),\n os.path.abspath(new_file)]],\n columns=METADATA_COLUMN_NAMES)\n mapdf = mapdf.append(new_row)\n\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename), new_file)\n os.rename(os.path.join(DEST_DIR, PROJECT_NAME, filename).replace(\".csv\", \"_Metadata.csv\"),\n new_file.replace(\".csv\", \"_Metadata.csv\"))\n\n mapdf.to_csv(os.path.join(DEST_DIR, PROJECT_NAME, PROJECT_NAME + \"map.csv\"), index=False)\n mapdf = mapdf.reset_index(drop=True)\n os.chdir(pwd)\n return json.dumps(mapdf.to_dict(\"list\"))", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def seed_from_csv_diff(original_file_path, new_file_path, model, **kwargs):\n\n original_diff_set = set()\n new_diff_set = set()\n new_file = open(new_file_path, 'r')\n headers = new_file.readline().replace('\\n', '').split(',')\n new_reader = model.update_set_filter(csv.reader(new_file), headers)\n\n original_file = open(original_file_path, 'r')\n original_reader = csv.reader(original_file)\n next(original_reader, None)\n logger.debug(\" * Beginning CSV diff process.\")\n\n for row in new_reader:\n new_diff_set.add(json.dumps(row))\n\n for row in original_reader:\n original_diff_set.add(json.dumps(row))\n\n diff = new_diff_set - original_diff_set\n temp_file_path = os.path.join(settings.MEDIA_TEMP_ROOT, str(\n 'set_diff' + str(random.randint(1, 10000000))) + '.mock' if settings.TESTING else '.csv')\n with open(temp_file_path, 'w') as temp_file:\n writer = csv.writer(temp_file, delimiter=',')\n writer.writerow(headers)\n for row in diff:\n writer.writerow(json.loads(row))\n\n diff_gen = from_csv_file_to_gen(temp_file_path, kwargs['update'])\n logger.debug(\" * Csv diff completed, beginning batch upsert.\")\n batch_upsert_from_gen(model, diff_gen, settings.BATCH_SIZE, **kwargs)\n if os.path.isfile(temp_file_path):\n os.remove(temp_file_path)\n if 'callback' in kwargs and kwargs['callback']:\n kwargs['callback']()", "def concat_vsource_sink_csv(csv_fn1,csv_fn2,merged_source_sink_in,\n csv_type,csv_merged,freq='infer',how='left'):\n # merged_source_sink_in: the merged source_sink.in or source_sink.yaml file \n # where the data sources are from csv_fn1, csv_fn2. \n if merged_source_sink_in.endswith('yaml'):\n df_sources,df_sinks = read_source_sink_yaml(merged_source_sink_in)\n elif merged_source_sink_in.endswith('in'):\n df_sources,df_sinks = read_source_sink_in(merged_source_sink_in)\n else:\n raise NotImplementedError(\n 'merged_source_sink_in can either be .yaml or .in file')\n if csv_type == 'sources':\n sites = df_sources.index\n elif csv_type == 'sink':\n sites = df_sinks.index\n else:\n raise NotImplementedError('csv_type can either be sources or sinks')\n th1 = read_source_sink_csv(csv_fn1)\n th2 = read_source_sink_csv(csv_fn2)\n if freq=='infer':\n if th1.index.freq!=th2.index.freq:\n print(\"th1 and th2 has different frequency\")\n else:\n th1 = th1.asfreq(freq)\n th2 = th2.asfreq(freq)\n th_merged = th1.join(th2,how=how,rsuffix='r').drop(columns=['datetimer'])\n th_merged = th_merged.fillna(-9999.0)\n cols = np.append(['datetime'],sites)\n th_merged = th_merged[cols] #rearrange the array to have the same order as defined in merged_source_sink_in\n th_merged['datetime'] = np.datetime_as_string(th_merged.index.values,'h')\n write_source_sink_csv(th_merged,csv_merged)", "def recalc_csv(input_path, config_file, family_name, source_string, quiet):\n csv_file = getCsvPath(input_path)\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n # If config.json doesn't exist, it has to be created before.\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='yellow')\n\n if os.path.exists(csv_file) and not quiet:\n confirmation = click.confirm(\n '\\n{} already exists. Do you want to overwrite it?'.format(csv_file))\n if confirmation is True:\n csvHandler(csv_file).recalcCSV(\n config_file=config_file, family_name=family_name, source_string=source_string)\n click.secho('\\n{} created'.format(csv_file), fg='green')\n else:\n # Let's ensure that, if the data.csv file doesn't exist,\n # it is created before recalculation.\n if not os.path.exists(csv_file):\n csvHandler(csv_file).resetCSV(config_file=config_file)\n\n csvHandler(csv_file).recalcCSV(\n config_file=config_file, family_name=family_name, source_string=source_string)\n click.secho('\\n{} created'.format(csv_file), fg='green')", "def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count", "def import_csv(item):\n (f_csv, f_csv_out, target_column, merge_columns) = item\n has_checked_keys = False\n\n if not merge_columns:\n raise ValueError(\"merge_columns must not be empty\")\n\n with open(f_csv_out, \"w\") as FOUT:\n CSV_HANDLE = None\n total_rows = 0\n\n for row in csv_iterator(f_csv):\n\n output = {\"_ref\": next(_ref_counter)}\n\n if not has_checked_keys:\n for key in merge_columns:\n if key not in row.keys():\n msg = \"Column **{}** not in csv file {}\"\n raise KeyError(msg.format(key, f_csv))\n has_checked_keys = True\n\n if target_column in row.keys():\n msg = \"Generated column **{}** already in csv file {}\"\n raise KeyError(msg.format(target_column, f_csv))\n\n text = []\n for key in merge_columns:\n val = row[key].strip()\n if not val:\n continue\n if val[-1] not in \".?!,\":\n val += \".\"\n text.append(val)\n\n output[target_column] = \"\\n\".join(text).strip()\n\n if CSV_HANDLE is None:\n CSV_HANDLE = csv.DictWriter(FOUT, sorted(output.keys()))\n CSV_HANDLE.writeheader()\n\n CSV_HANDLE.writerow(output)\n total_rows += 1\n\n logger.info(\"Imported {}, {} entries\".format(f_csv, total_rows))", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def loop_csv(input_csv_path, output_csv_path):\n counter = 0\n with open(input_csv_path, 'rb') as read_csvfile:\n projectsreader = csv.DictReader(\n read_csvfile, delimiter=',', quotechar='\"')\n\n with open(output_csv_path, 'w') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl', 'foundProjectUrl1',\n 'foundProjectUrl2', 'foundProjectUrl3',\n 'foundProjectUrl4', 'foundProjectUrl5',\n 'foundProjectUrl6', 'foundProjectUrl7',\n 'foundProjectUrl8', 'foundProjectUrl9',\n 'foundProjectUrl10']\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n # writer.writeheader() # this method only available at python 2.7\n for row in projectsreader:\n if counter == 100:\n time.sleep(86400) # sleep 1 day\n counter = 0\n\n res = query_google_cse(\n row['acronym'] + \" \" + row['title'] +\n \" project -site:cordis.europa.eu -site:ec.europa.eu\")\n\n # save response to file\n with open('responses_gcse.json', 'w') as outfile:\n json.dump(res, outfile)\n\n # a query response may not have 10 results, so we have to check\n # for that\n results = []\n result_size = res['queries']['request'][0]['totalResults']\n\n print \"INFO: RESULT SIZE %s\" % result_size\n for i in range(10):\n if i < int(result_size):\n results.append(res['items'][i]['link'])\n else:\n results.append('')\n\n # print \"Control Print: \" + res['items'][0]['link']\n print \"INFO: First Result: \" + results[0]\n writer.writerow({\n 'acronym': row['acronym'],\n 'title': row['title'],\n 'projectUrl': row['projectUrl'],\n 'foundProjectUrl1': results[0],\n 'foundProjectUrl2': results[1],\n 'foundProjectUrl3': results[2],\n 'foundProjectUrl4': results[3],\n 'foundProjectUrl5': results[4],\n 'foundProjectUrl6': results[5],\n 'foundProjectUrl7': results[6],\n 'foundProjectUrl8': results[7],\n 'foundProjectUrl9': results[8],\n 'foundProjectUrl10': results[9],\n })\n sys.stdout.flush()\n time.sleep(2)\n counter += 1", "def combine_files(file_name):\n\n\tif file_name == \"train\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Train/all_level1_train.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Train/all_level1_train.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Train/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop([\"Response\", \"Id\"],1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Response\")\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col + [\"Response\"]\n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Train/all_level1_train.csv\", index = False)\n\n\telif file_name == \"test\":\n\n\t\tif os.path.isfile(\"./Data/Level1_model_files/Test/all_level1_test.csv\"):\n\t\t\tos.remove(\"./Data/Level1_model_files/Test/all_level1_test.csv\")\n\n\t\tlist_files = glob(\"./Data/Level1_model_files/Test/*.csv*\")\n\t\tlist_df = []\n\t\tfor f in list_files :\n\t\t\tlist_df.append(pd.read_csv(f))\n\n\t\tfor i in range(1,len(list_df)):\n\t\t\tlist_df[i] = list_df[i].drop(\"Id\",1)\n\n\t\t# Concat\n\t\tdf_out = pd.concat(list_df, axis=1)\n\t\t# Order columns\n\t\tlist_col = df_out.columns.values.tolist()\n\t\tlist_col = sorted(list_col)\n\t\tlist_col.remove(\"Id\")\n\t\tlist_col = [\"Id\"] + list_col \n\t\tdf_out = df_out[list_col]\n\t\tdf_out.to_csv(\"./Data/Level1_model_files/Test/all_level1_test.csv\", index = False)", "def init_csv(input_path, config_file, quiet):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')", "def importFile(self):\n\n ## Backing up old CSV and JSON files before beginning import operations\n if os.path.isfile(\"text_files/customers.csv\") and os.path.isfile(\"text_files/customers.json\"):\n print(\"\\nCreating a backup of the existing customer .csv and .json files before overwriting\")\n shutil.copy2(\"text_files/customers.csv\", \"text_files/customers.csv.backup\" + str(time.time()))\n shutil.copy2(\"text_files/customers.json\", \"text_files/customers.json.backup\" + str(time.time()))\n\n ## Importing the text file for cleaning then converting to CSV\n input_file = open(\"text_files/customer_export.txt\", \"r\")\n output_file = open(\"text_files/customers.csv\", \"w\")\n\n ## A loop to clean and write the customer_export txt file to a CSV\n for line in input_file:\n clean_text = \"\"\n check_line = line.replace(\"#\", \"\").replace(\",,\",\"\").split(\"|\")\n for line in check_line:\n if line != check_line[10]:\n clean_text += line + \",\"\n elif line == check_line[10]:\n clean_text += line + \"\\n\"\n output_file.write(clean_text)\n\n ## Closing TXT file and CSV file after formatting\n input_file.close()\n output_file.close()\n\n ## Opening the cleaned CSV file for conversion to Json\n with open('text_files/customers.csv') as clean_csv:\n ## Converting CSV file to Json\n converted = csv.DictReader(clean_csv)\n rows = list(converted)\n\n ## Writing converted CSV to Json file\n with open('text_files/customers.json', 'w') as convert:\n json.dump(rows, convert)\n\n ## Deleting all data currently in database before importing new file\n db_connection.executeQuery(\"DELETE FROM CRM;DBCC CHECKIDENT ('CRM', RESEED, 0) DELETE FROM Mailings; DBCC CHECKIDENT ('Mailings', RESEED, 0) COMMIT\") \n\n ## Loading the newly created Json file\n with open(\"text_files/customers.json\") as customers_json:\n customers = json.load(customers_json)\n\n ## A loop to add the contents of the Json file to the database \n print(\"Writing imported file to database please wait...\")\n for key in customers:\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"address\"] + \"', '\" + key[\"city\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"county\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"state\"] + \"', '\" + str(key[\"zip\"]) + \"', '\" + key[\"phone1\"] + \"', '\" + key[\"phone2\"] + \"' , '\" + key[\"email\"] + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \" \" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"','\" + key[\"address\"] + \" \" + key[\"city\"] + \" \" + key[\"county\"] + \" \" + key[\"state\"] + \" \" + str(key[\"zip\"]) + \"'); COMMIT\") \n\n print(\"\\nFinished writing to file. Returning to main menu...\")", "def merge_files():\n # abs path of data folder\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\\\\KORD\")\n file_list = os.listdir(work_folder)\n with open(os.path.join(work_folder, \"..\\\\merged_history_KORD.csv\"), \"w\") as outfile:\n for line in open(os.path.join(work_folder, file_list[0])):\n outfile.write(line)\n print \"write the first line\"\n for i in range(1, len(file_list)):\n with open(os.path.join(work_folder, file_list[i])) as infile:\n infile.next()\n for line in infile:\n outfile.write(line)", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def convert_csv_to_alfed(self) -> None:\n global output_path, file_name\n self.parse_command_line_args()\n self.validate_command_line_args()\n\n for _, _, files in walk(self.args.input):\n for output_file in files:\n if output_file.endswith(\".csv\"):\n file_name, _ = path.splitext(output_file)\n output_path = \"\"\n output_path = path.join(self.args.output, file_name)\n\n try:\n mkdir(output_path)\n print(f\"Creating folder {output_path}...\")\n except OSError:\n print(f\"Creation of directory {output_path} failed\")\n\n with open(path.join(self.args.input, output_file), \"rt\") as csv_file:\n reader = DictReader(csv_file, fieldnames=self.args.fieldorder)\n\n for row in reader:\n uid = str(uuid.uuid1()).upper()\n row[\"content\"] = self.replace_embedded_snipptes(row[\"content\"], self.args.lplaceholder,\n self.args.rplaceholder, self.args.changeplaceholders)\n output = dumps(\n {\n \"alfredsnippet\": {\n \"snippet\": row['content'],\n \"uid\": uid,\n \"name\": row['name'],\n \"keyword\": row['abbreviation']\n }\n },\n sort_keys=False, indent=4,\n separators=(',', ': ')\n )\n\n output_file = f\"{row['name']}_[{uid}].json\"\n target = path.join(output_path, output_file)\n f = open(target, \"w\")\n f.write(output)\n f.close()\n print(f\"Writing file {target}...\")\n else:\n self.error_msg(\"The files in the input folder are not with extension '*.csv'\")\n\n subprocess.call(\n [\n 'ditto',\n '--norsrc',\n '-ck',\n output_path,\n self.args.output + \"/\" + file_name + \".alfredsnippets\"\n ]\n )\n print(f\"{self.args.output}/{file_name}.alfredsnippets was created\")\n self.created_folders.append(file_name)\n\n self.remove_temp_folders()", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def clean_file(csv_file):\n my_list = []\n with open(csv_file, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar=\" \")\n for row in file_reader:\n my_list.append(row)\n\n \"\"\"\n > Part Two\n Input: Nested list csv_table and a string file_name\n Action: Write fields in csv_table into a comma-separated CSV file with the name file_name\n Mutates output: Yes\n \"\"\"\n with open(csv_file, 'w', newline='') as csvfile:\n my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n for row in my_list:\n row2 = []\n for item in row:\n a = item.lstrip('\"')\n b = a.rstrip('\"')\n row2.append(b)\n my_csv_writer.writerow(row2)", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def _load_single_file(self, table_name, manifest_row, csv_reader,\n temp_filepath):\n # get database interface and it's equivalent manifest row\n sql_interface = self._configure_db_interface(\n manifest_row=manifest_row, temp_filepath=temp_filepath)\n\n sql_manifest_row = sql_interface.get_sql_manifest_row()\n\n cleaner = self._get_cleaner(table_name=table_name,\n manifest_row=manifest_row)\n csv_writer = CSVWriter(meta=self.meta,\n manifest_row=manifest_row,\n filename=temp_filepath)\n\n # clean the file and save the output to a local pipe-delimited file\n # if it doesn't have a 'loaded' status in the database manifest\n if csv_reader.should_file_be_loaded(sql_manifest_row=sql_manifest_row):\n print(\" Cleaning...\")\n meta_only_fields = self._get_meta_only_fields(\n table_name=table_name, data_fields=csv_reader.keys)\n for idx, data_row in enumerate(csv_reader):\n data_row.update(meta_only_fields) # insert other field dict\n clean_data_row = cleaner.clean(data_row, idx)\n if clean_data_row is not None:\n csv_writer.write(clean_data_row)\n\n csv_writer.close()\n\n # write the data to the database\n self._update_database(sql_interface=sql_interface)\n\n if not self._keep_temp_files:\n csv_writer.remove_file()", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format", "def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def process_file(input_file, output_good, output_bad):\n\t# Lists containing good and bad rows\n\tlist_good = []\n\tlist_bad = []\n\t# Open csv file \n\twith open(input_file, 'r') as f:\n\t\t# Create reader from csv and store header row\n\t\treader = csv.DictReader(f)\n\t\theader = reader.fieldnames\n\t\t# Store useful keys\n\t\tyear = 'productionStartYear'\n\t\turi = 'URI'\n\t\t# Loop through all rows\n\t\tfor row in reader:\n\t\t\t# Discard rows with a URI not from DBpedia\n\t\t\tif not row[uri].startswith('http://dbpedia.org'):\n\t\t\t\tcontinue\n\t\t\t# Extract year from datetime\n\t\t\tyear_value = row[year][:4]\n\t\t\t# Change row datetime value to its year\n\t\t\trow[year] = year_value\n\t\t\t# Check if year actually contains a year\n\t\t\tif not row[year].isdigit():\n\t\t\t\t# Add to list_bad\n\t\t\t\tlist_bad.append(row)\n\t\t\t\tcontinue\n\t\t\t# Check if year falls within expected range\n\t\t\tif int(row[year]) < 1886 or int(row[year]) > 2014:\n\t\t\t\t# Add list to bad\n\t\t\t\tlist_bad.append(row)\n\t\t\t\tcontinue\n\t\t\t# Row is proper, add to list_good\n\t\t\tlist_good.append(row)\n\t\t\n\t\t# Open good ouput file, write the good rows to it\n\t\twith open(output_good, 'w') as csvfile:\n\t\t\twriter = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n\t\t\twriter.writeheader()\n\t\t\tfor item in list_good:\n\t\t\t\twriter.writerow(item)\n\n\t\t# Open bad ouput file, write the nad rows to it\n\t\twith open(output_bad, 'w') as csvfile:\n\t\t\twriter = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n\t\t\twriter.writeheader()\n\t\t\tfor item in list_bad:\n\t\t\t\twriter.writerow(item)", "def test_csv(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(answer_file_path), 'r') as answer_file:\n csv_file = open(attach_path(input_file_path))\n assert str(read_csv(csv_file)) == answer_file.read().strip()", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('Downloading data set from DC Open data')\n\n with open(input_filepath, 'r') as f:\n parking_violations = json.load(f)\n\n for fullname, csv in parking_violations.items():\n download_file = csv + '.csv'\n local_filename = '_'.join(name.lower() for name in fullname.split() ) + '.csv'\n local_filename = os.path.join(output_filepath, local_filename)\n if not os.path.isfile(local_filename):\n time.sleep(5)\n r = requests.get(download_file)\n if not b'\"status\":\"Processing\",\"generating\":{}' in r.content:\n with open(local_filename, 'wb') as f:\n f.write(r.content)\n logger.info(local_filename)\n else:\n logger.warning('Cannot download {0}'.format(local_filename))", "def main():\n location = os.getcwd()\n header = \"Date,Time,Voltage,Current,Isolation,Range,SoC,Distance,Fan rpm,Fan Torque,Hyd. Pump rpm,Hyd. Pump Torque,SW Pump rpm,SW Pump Torque,Nozzle,Sidebrushes,WideSweepBrush,TempIGBT-Fan,Fan motor temp, Traction rpm, Traction torque,BMS1 Volts, BMS2 Volts\"\n header = header+\"\\n\"\n\n of =\"outFile.csv\"\n outFile = open(of, \"w\")\n outFile.write(header)\n\n for file in os.listdir(location ):\n try:\n if file.endswith(\".csv\") and not(file.startswith(\"outFile\")):\n print(\"...reading {}\".format(file))\n fcsv = csv.reader(open(file, newline=''), delimiter=' ', quotechar='|') \n for row in fcsv:\n line = ', '.join(row)\n if line[:4] == \"Date\":\n d = line[5:13]\n dd = d[6:9]+\"/\"+d[4:6]+\"/\"+d[:4]\n next\n elif line[12] == \"*\" or line[0] == \"*\":\n next\n elif line[0] == \"T\":\n next\n else:\n L = dd + \",\" + line + \"\\n\"\n outFile.write(L)\n except Exception as e:\n raise e\n print(\"No CSV files in here!\")\n\n try: \n print(\"\\nAll files have been merged into: {}\".format(of))\n outFile.close()\n \n except Exception as ee:\n raise ee", "def read_and_write_file(json_file_path, new_json_file_path):\n rests = list()\n with open(json_file_path, \"r\") as old:\n for line in old:\n line_contents = json.loads(line)\n #print line_contents['categories']\n if 'Restaurants' in line_contents['categories']:\n rests.append(line_contents)\n #print \"True\"\n\n json.JSONEncoder().encode(rests)\n with open(new_json_file_path, \"w+\") as newf:\n json.dump(rests, newf)", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def step_parallel(in_csv_filename, terrestrial_data, marine_data, ancillary_path,\n out_csv_filename, from_gbif=True):\n csv_filename_pairs, header = get_chunk_files(\n in_csv_filename, out_csv_filename=out_csv_filename)\n\n# in_csv_fn, out_csv_fn = csv_filename_pairs[0]\n# intersect_csv_and_shapefiles(in_csv_fn, terrestrial_data,\n# marine_data, ancillary_path, out_csv_fn, False)\n\n with ProcessPoolExecutor() as executor:\n for in_csv_fn, out_csv_fn in csv_filename_pairs:\n executor.submit(\n intersect_csv_and_shapefiles, in_csv_fn, terrestrial_data,\n marine_data, ancillary_path, out_csv_fn, from_gbif)\n\n try:\n outf = open(out_csv_filename, 'w', encoding='utf-8')\n outf.write('{}'.format(header))\n smfile_linecount = 0\n for _, small_csv_fn in csv_filename_pairs:\n curr_linecount = get_line_count(small_csv_fn) - 1\n print('Appending {} records from {}'.format(\n curr_linecount, small_csv_fn))\n # Do not count header\n smfile_linecount += (curr_linecount)\n lineno = 0\n try:\n for line in open(small_csv_fn, 'r', encoding='utf-8'):\n # Skip header in each file\n if lineno == 0:\n pass\n else:\n outf.write('{}'.format(line))\n lineno += 1\n except Exception as inner_err:\n print('Failed to write {} to merged file; {}'.format(small_csv_fn, inner_err))\n except Exception as outer_err:\n print('Failed to write to {}; {}'.format(out_csv_filename, outer_err))\n finally:\n outf.close()\n\n lgfile_linecount = get_line_count(out_csv_filename) - 1\n print('Total {} of {} records written to {}'.format(\n lgfile_linecount, smfile_linecount, out_csv_filename))", "def create_file(output_json):\n folder = \"data/\"\n filename = datetime.now().strftime(\"%d-%m-%Y\") + \"-moisture-read.json\"\n filepath = folder+filename\n\n # Create Local folder\n try:\n os.mkdir(folder)\n except OSError:\n pass\n #print(\"Directory already created or a failure occured on directory (%s)\" % folder)\n\n # Create Empty Json file if it doesnt exists\n if(Path(filepath)).exists():\n pass\n else:\n try:\n f = open(filepath, \"a\")\n f.write('{\\n\"moisture_iot_project\":[]\\n}')\n f.close()\n except Exception as e:\n print(\"Failure occured creating the JSON file (%s)\" % e)\n\n # Open Json file to append current structure\n with open(filepath) as outfile:\n data = json.load(outfile)\n\n # Get list with all dictionaries\n temp = data['moisture_iot_project']\n\n # Append current structure\n temp.append(output_json)\n\n # Reorganize List values and re-write to JSON file\n data['moisture_iot_project'] = temp\n write_json(data, filepath)", "def noop_merge(output_json, jsons_to_merge):\n if len(jsons_to_merge) > 1:\n print('Multiple JSONs provided: %s' % (','.join(jsons_to_merge)),\n file=sys.stderr)\n return 1\n if jsons_to_merge:\n shutil.copyfile(jsons_to_merge[0], output_json)\n else:\n with open(output_json, 'w') as f:\n json.dump({}, f)\n return 0", "def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)", "def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results", "def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err", "def analyse(self, csv_reader, json_input, csv_writer):\n csv_writer.writeheader()\n\n new_rows_count = 1\n\n while True:\n try:\n row = csv_reader.next()\n except StopIteration:\n break\n\n new_row = dict()\n\n try:\n transaction_type = row['Detalii tranzactie']\n\n if transaction_type == 'Cumparare POS':\n # Data ,Detalii tranzactie ,Debit ,Credit\n # 02 decembrie 2015 ,Cumparare POS ,\"246,33\" ,\n # ,Nr.card:42XXX3965 , ,\n # ,Terminal:AUCHAN , ,\n # ,Data:29-11-2015 Autorizare: 495514 , ,\n\n new_row['id'] = new_rows_count\n new_row['transaction'] = transaction_type\n new_row['amount'] = INGBankStatementParser.obtain_amount(row['Debit'])\n\n new_row['details'] = csv_reader.next()['Detalii tranzactie']\n\n row = csv_reader.next()\n new_row['partner'] = INGBankStatementParser.obtain_partner(row['Detalii tranzactie'])\n new_row['details'] += ' ' + row['Detalii tranzactie']\n\n row = csv_reader.next()\n new_row['timestamp'] = INGBankStatementParser.obtain_date(row['Detalii tranzactie']) + ' 10:0' + str(new_rows_count % 10) + 'AM'\n new_row['details'] += ' ' + row['Detalii tranzactie']\n\n else:\n self.logger.warning('Unknown transaction type, ignoring row: ' + str(row.items()))\n\n except Exception as ex:\n self.logger.warning('Exception caught: ' + str(ex))\n self.logger.warning('Exception around row: ' + str(row.items()))\n self.logger.warning('Ignoring partial composed new row: after row: ' + str(new_row.items()))\n\n if new_row.items():\n self.logger.debug('Adding new row: ' + str(new_row.items()))\n csv_writer.writerow(new_row)\n new_rows_count += 1\n\n return json_input", "def data_merge(path, dataset_name=\"processed_data\"):\n files = glob.glob(path+\"**//\"+dataset_name+\".json\")\n logger.info(\"Found {} files under the path {}\".format(len(files),path))\n final_data = []\n\n for file in files:\n assert dataset_name in file\n data = json.load(open(file,\"r\",encoding=\"utf-8\"))\n final_data += data\n\n data_analysis(final_data)\n final_data = json.dumps(final_data,indent=4)\n new_file = open(path + \"//merged_data.json\", \"w+\", encoding=\"UTF-8\")\n new_file.writelines(final_data)", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def ref_resp2files(output_file, output_json):\n with open(output_file, \"w\") as reference_text:\n reference_text.write(output_json)", "def convert_reviews_json_file_to_first_last_csv(json_file_path, csv_file_path):\n write_dir = os.path.dirname(csv_file_path)\n if not os.path.isdir(write_dir):\n os.mkdir(write_dir)\n\n def get_last_review():\n rev_dict = {}\n with open(json_file_path) as f:\n for line in f:\n cur_rev = json.loads(line)\n biz_id = cur_rev['business_id']\n cur_rev_date = dt.datetime.strptime(cur_rev['date'], '%Y-%m-%d').date()\n if biz_id in rev_dict:\n if rev_dict[biz_id][0] > cur_rev_date:\n rev_dict[biz_id][0] = cur_rev_date\n elif rev_dict[biz_id][1] < cur_rev_date:\n rev_dict[biz_id][1] = cur_rev_date\n else:\n rev_dict[biz_id] = [cur_rev_date, cur_rev_date]\n return rev_dict\n\n with open(str(csv_file_path), 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n reviews = get_last_review()\n for biz_id, time_tuple in reviews.iteritems():\n writer.writerow([str(biz_id), str(time_tuple[0]), str(time_tuple[1])])", "def execute_processor(self):\n \n # pull in the parameter that has the file names we will process\n filename1 = self.param_dict['file1']\n filename2 = self.param_dict['file2']\n \n ''' these next 2 lines are the ones that I added to create a dummy row '''\n right_dummy = self.create_dummy_row( self.param_dict['dummy_rec_right'])\n left_dummy = self.create_dummy_row( self.param_dict['dummy_rec_left'])\n \n \n \n\n self.open_files(os.path.join(self.entry.working_directory,filename1), os.path.join(self.entry.working_directory,filename2))\n self.process_params()\n key_dict = self.create_key_match()\n file1_rec = self.read_file1(first=True)\n file2_rec = self.read_file2(first=True)\n \n file2_used = False\n \n # call the convenience method to setup the temp_csv file. This will also write the header row by default\n self.setup_csv_temp_writer(self.get_temp_csv_name(), self.get_header(self.file1_reader.fieldnames,self.file2_reader.fieldnames),preserve_order=True)\n \n while file1_rec:\n combined = {k:v for k,v in file1_rec.items()}\n if file2_rec and self.get_key(file2_rec,self.file2_key) == self.get_key(file1_rec,self.file1_key):\n # merge these two bad boys\n combined.update(self.get_values(file2_rec))\n file2_used = True\n ### WRITE ###\n self.write_temp_rec(combined)\n file1_rec = self.read_file1()\n elif file2_rec and self.get_key(file1_rec,self.file1_key) > self.get_key(file2_rec,self.file2_key):\n if not file2_used and left_dummy:\n ''' left side dummy \n now use the already created dummy_row to updated the dictionary '''\n left_dummy.update(self.get_values(file2_rec))\n key_fields = {key_dict[k]:file2_rec[k] for k in self.file2_key.split(\",\")}\n left_dummy.update(key_fields)\n self.write_temp_rec(left_dummy)\n left_dummy = self.create_dummy_row( self.param_dict['dummy_rec_left'])\n \n \n file2_rec = self.read_file2()\n file2_used = False\n \n elif not file2_rec or self.get_key(file1_rec,self.file1_key) < self.get_key(file2_rec,self.file2_key):\n ### WRITE REC WITH NO MATCH ###\n if self.keep_nomatch:\n ''' right side dummy\n now use the already created dummy_row to updated the dictionary '''\n if right_dummy:\n combined.update(self.get_values(right_dummy))\n self.write_temp_rec(combined)\n file1_rec = self.read_file1()\n else:\n raise Exception\n self.close_temp_csv()\n return 0", "def merge_files(self, infnames, outfname, csv_header=True):\n assert outfname not in infnames\n start = time.time()\n\n header = ''\n with open(outfname, 'w') as outfile:\n if csv_header: # if not <csv_header>, we'll just end up with a zero-length file\n for fname in infnames:\n if not os.path.exists(fname) or os.stat(fname).st_size == 0:\n continue\n with open(fname) as headfile:\n reader = csv.DictReader(headfile)\n writer = csv.DictWriter(outfile, reader.fieldnames)\n writer.writeheader()\n header = ','.join(reader.fieldnames)\n break\n\n cmd = 'cat ' + ' '.join(infnames) + ' | grep -v \\'^' + header + '$\\' | sort | uniq >>' + outfname\n check_call(cmd, shell=True)\n\n if not self.args.no_clean:\n for infname in infnames:\n os.remove(infname)\n\n print ' time to merge csv files: %.3f' % (time.time()-start)", "def _setup_output_file(self):\n\n columns = [\"Hero file\",\n \"Test type\",\n \"Name of tested entry\",\n \"Misc dice sum input\",\n \"Value of tested entry\",\n \"Modifier\",\n \"Values of related attributes\",\n \"Rolls\",\n \"Result\",\n \"Description\",\n \"Timestamp\",\n \"Type of dice input\"]\n\n # if file does not exist, add first row of column names\n if not os.path.isfile(self._result_csv):\n with open(self._result_csv, \"w\", encoding=\"utf-8\") as csv_file:\n file_writer = csv.writer(csv_file, delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n file_writer.writerow(columns)\n return True\n return False", "def open_output(self, file_name='testoutput.csv', path='', reset_file=True):\n\n # create the fully qualified path name\n file_path = os.path.join(path, file_name)\n fmode = \"w\" if reset_file else \"a\"\n try:\n self.file_ref = open(file_path, fmode)\n self.csvwriter = csv.writer(self.file_ref)\n except Exception as e:\n print(\"%s\" % str(e))\n return", "def write_csv_file (metadata_list, csv_file, append) :\n try :\n with open (csv_file, 'a' if append else 'w' , newline='') as file :\n writer = csv.DictWriter(file, fieldnames=MetadataEntity.get_fieldnames())\n if not append: writer.writeheader()\n for e in metadata_list :\n writer.writerow(e.get_values())\n file.close()\n except :\n print ('ERROR: writing csv file: ' + csv_file)\n return False\n return True", "def intersect_csv_and_shapefiles(in_csv_filename, geodata1, geodata2,\n ancillary_path, out_csv_filename, from_gbif):\n pth, basefname = os.path.split(out_csv_filename)\n logbasename, _ = os.path.splitext(basefname)\n logfname = os.path.join(pth, '{}.log'.format(logbasename))\n logger = get_logger(logbasename, logfname)\n bf = BisonFiller(log=logger)\n # Pass 4 of CSV transform, final step, point-in-polygon intersection\n bf.update_point_in_polygons(\n geodata1, geodata2, ancillary_path, in_csv_filename, out_csv_filename,\n from_gbif=from_gbif)\n # Do intersection here\n sleep(randint(0, 10))\n print(' - {}'.format(out_csv_filename))", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def process_file(\n ctx,\n column: str,\n csv_file: Path,\n in_place: bool,\n output_file: Path,\n save_openrefine: bool,\n openrefine_output_file: Path,\n save_processed_values: bool,\n processed_values_output_file: Path,\n ignore_values_file: Path,\n dry_run: bool,\n):\n replacer = Replacer(ctx.obj.get('GKG_API_KEY'))\n\n unique_values, rows, headers = _read_unique_values_from_csv(\n csv_file, column,\n )\n\n ignore_values = _read_ignore_values_file(ignore_values_file)\n\n processed_values, replacements = _process_suggestions(\n replacer, unique_values, ignore_values,\n )\n\n if save_processed_values:\n _create_processed_values_output_file(\n processed_values, processed_values_output_file, csv_file,\n )\n\n if save_openrefine:\n _create_openrefine_file(\n openrefine_output_file, csv_file, replacements, column,\n )\n\n if dry_run:\n sys.exit(0)\n\n output_file_path = csv_file if in_place else output_file\n _create_output_file(\n output_file_path, csv_file, headers, rows, replacements, column,\n )", "def file_setup(outfile):\n\n extant_objids = []\n\n if os.path.exists(outfile):\n print('This file exists.')\n try:\n extant_objids = np.array(pd.read_csv(outfile)['objid']).tolist()\n except:\n print('And nonstandard!')\n # Raise an exception?\n return False\n else:\n # Initialize the file with a header\n with open(outfile, 'wb') as csvfile:\n cols = ['objid', 'flat_counts', 'mcat_bg', 'bg_counts',\n 'flux_bgsub_err', 'cps_mcatbgsub', 'counts',\n 'mag_mcatbgsub', 'cps_err', 'mag_bgsub', 'cps_bgsub',\n 'detys', 'flux_bgsub', 'flux_err', 'mag_err_1',\n 'cps_bgsub_err', 't1_data', 'bg', 'responses', 't_mean',\n 'cps_mcatbgsub_err', 'mag_bgsub_err_1', 'mag_err_2',\n 't0_data', 'racent', 'deccent', 'mag', 'exptime',\n 'bg_flat_counts', 'detxs', 't0', 't1',\n 'mag_mcatbgsub_err_2', 'flux', 'mag_mcatbgsub_err_1',\n 'flags', 'mag_bgsub_err_2', 'detrad', 'cps',\n 'flux_mcatbgsub_err', 'flux_mcatbgsub', 'mcat_expt', 'ra',\n 'dec', 'aper4', 'aper4_err', 'mcat_bg',\n 'aper7', 'aper7_err']\n\n spreadsheet = csv.writer(csvfile, delimiter=',', quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n spreadsheet.writerow(cols)\n\n return extant_objids", "def initCSV(self, makeFile, overWrite):\n self.initialized = True\n\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n if os.path.exists(str(self.fileName)):\n\n f = open(str(self.fileName), \"r\")\n\n if not f.read():\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n else:\n if overWrite == True:\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n if overWrite == False:\n raise OSError(\"csv file is not empty!\")\n\n else:\n if makeFile == True:\n f = open(str(self.fileName), \"w\")\n \n f.close()\n else:\n raise OSError(\"csv file not found!\")", "def process_and_write(loaded_dicts, input_folder, text):\n directory = 'conll-dir'\n \n #check if dir exists, if not make one\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n #get basename of path and change extension to '.conll'\n base = os.path.basename(text)[:-5]\n conll_str = '.conll'\n basename = base + conll_str\n \n #add directory with files to the input folder\n path = os.path.join(input_folder, directory)\n complete_name = os.path.join(path, basename)\n \n #open write file\n f = csv.writer(open(complete_name, 'w'), delimiter=(' '))\n \n #for every value in the json dict, add values to list and write list\n for json_dict in loaded_dicts:\n values_list = []\n for key, value in json_dict.items():\n values_list.append(value)\n \n f.writerow(values_list)", "def write_to_csv(path,data_dict):\n\n\n schema = [\"file_name\",\"family\",\"genus\",\"genus_confidence\",\n \"species_1\",\"confidence_1\",\"hall_1\",\n \"species_2\",\"confidence_2\",\"hall_2\",\n \"species_3\",\"confidence_3\",\"hall_3\",\n \"species_4\",\"confidence_4\",\"hall_4\",\"peaks\"]\n\n # if no file exists create a one and inform the user\n if not os.path.exists(path):\n print(\"creating new output file {}\".format(path))\n with open(path, \"w\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(schema)\n\n row = []\n\n row.append(data_dict[\"file_name\"])\n row.append(data_dict[\"family\"])\n \n row.append(data_dict[\"genus_1\"])\n row.append(data_dict[\"genus_confidence_1\"][:5])\n \n row.append(data_dict[\"species_1\"])\n row.append(data_dict[\"confidence_1\"][:5])\n row.append(data_dict[\"hall_1\"])\n \n row.append(data_dict[\"species_2\"])\n row.append(data_dict[\"confidence_2\"][:5])\n row.append(data_dict[\"hall_2\"])\n\n row.append(data_dict[\"species_3\"])\n row.append(data_dict[\"confidence_3\"][:5])\n row.append(data_dict[\"hall_3\"])\n\n row.append(data_dict[\"species_4\"])\n row.append(data_dict[\"confidence_4\"][:5])\n row.append(data_dict[\"hall_4\"])\n\n row.append(data_dict[\"peaks\"])\n \n with open(path, \"a\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(row)", "def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])", "def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]", "def _write_merge_map_file(self,\r\n input_file_basename,\r\n job_result_filepaths,\r\n params,\r\n output_dir,\r\n merge_map_filepath):\r\n open(merge_map_filepath, 'w').close()", "def main():\n \n lookupslocation = 'C:\\\\Users\\\\gwilliams\\\\Desktop\\\\Python Experiments\\\\work projects\\\\FaresIndexSourceData\\\\regulated_fares_data\\\\'\n destination = 'C:\\\\Users\\\\gwilliams\\\\Desktop\\\\Python Experiments\\\\work projects\\\\FaresIndexSourceData\\\\regulated_fares_data\\\\comparison output\\\\'\n lookupfileslist, count = getdata(lookupslocation)\n\n print(f\"there are {count} files found.\")\n\n newlookup = lookupfileslist[0]\n oldlookup = lookupfileslist[1]\n\n #join new to old // old to new\n new_uniquevalues = pd.merge(left=newlookup,right=oldlookup,how='left',\n left_on=['orig','dest','route','ticket'],right_on=['orig','dest','route','ticket'])\n\n old_uniquevalues = pd.merge(left=newlookup,right=oldlookup,how='right',\n left_on=['orig','dest','route','ticket'],right_on=['orig','dest','route','ticket'])\n\n print(\"These are values unique to new lookup\") \n new_uniquevalues = new_uniquevalues[new_uniquevalues.ticketa.isnull()==True]\n exportfile(new_uniquevalues,destination,'unique_new_values',1)\n\n print(\"These are values unique to old lookup\")\n old_uniquevalues = old_uniquevalues[old_uniquevalues.new_flag.isnull()==True]\n exportfile(old_uniquevalues,destination,'unique_old_values',1)", "def step2(step2_input, step2_output):\n\n global graf\n logger.info(\"###Step 2:\")\n graf = defaultdict(dict)\n start2 = clock()\n info = []\n for input_file in step2_input:\n logger.info(\"Reading input file {}\".format(input_file))\n with open(input_file) as csv_file:\n start_file=clock()\n rdr = csv.reader(csv_file)\n header = rdr.next()\n header_last = len(header) - 1\n try: \n for chunk in gen_chunks(rdr, 10000):\n info.extend([(row[0], row[header_last]) for row in chunk])\n info = list(set(info))\n process_info(info)\n except csv.Error as e:\n logger.error(\"Exception {}: {}\".format(type(e), e))\n logger.error(\"Check Problems section in readme for known issues.\")\n return 1\n check_time(start_file, \"Done reading.\")\n check_time(start2, \"Done generating full link dict...\")\n start_write = clock()\n write_to_file(step2_output, graf)\n check_time(start_write, \"Done writing output file.\")\n check_time(start2, \"Done with Step 2.\")\n return 0", "def load(user_activity: DataFrame, file_paths: Dict) -> True:\n user_activity\\\n .write.mode('overwrite')\\\n .csv(path=file_paths['output_path'], mode='overwrite')\n\n return True", "def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")", "def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def regenerate_json_file():\n \n # filepaths used in generating json data...\n dir_export = f\"C:\\\\Users\\\\{os.getlogin()}\\\\Downloads\"\n list_name = 'my-favourite-films.csv'\n json_output_filename = 'reviews_web_data.json'\n path_to_rootreducer = f'D:\\\\Programming-Projects\\\\nathansteele\\\\src\\\\reducers\\\\RootReducer.js';\n path_to_json_output = f'D:\\\\Programming-Projects\\\\nathansteele\\\\src\\\\components\\\\films\\\\{json_output_filename}'\n \n # 1) find letterboxd zip export...\n prefix = 'letterboxd'\n postfix = 'utc.zip'\n zip_file_name = \"\"\n download_folder = sorted(glob.glob(f\"{dir_export}/*.zip\"), key=os.path.getmtime)\n download_folder.reverse() # reversing the list means most recent downloads are first in the list...\n for filepath in download_folder:\n filename = filepath.split('\\\\')[-1]\n if filename.split('-')[0] == prefix and filename.split('-')[-1] == postfix:\n zip_file_name = filename\n print(f\"Found the zip export! -> {zip_file_name}\")\n break\n else:\n continue\n\n path_to_zip_file = f\"{dir_export}\\\\{zip_file_name}\" # path to zip file we downlodaded from letterboxd...\n path_to_extracted_zip = f\"{dir_export}\\\\{zip_file_name[:-4]}\" # path to extracted zip (which we are about to do)...\n\n # 2) extract zip automatically (if not done already)...\n if os.path.exists(path=path_to_zip_file):\n if not os.path.exists(path=path_to_extracted_zip):\n with zipfile.ZipFile(file=path_to_zip_file , mode='r') as z:\n z.extractall(path_to_extracted_zip)\n print(f\"Extract complete! -> {path_to_extracted_zip}\")\n else:\n print(f'Zip file already exists... No need to extract again... -> ({path_to_extracted_zip})')\n else:\n print(f'Zip file could not be found... -> ({path_to_zip_file})')\n\n\n # local version of letterboxd data list\n letterboxd_list = []\n \n # 3) locate the csv file of interest\n if os.path.exists(path_to_extracted_zip): \n # open csv file...\n path_to_list = f'{path_to_extracted_zip}\\\\lists\\\\{list_name}'\n with open(path_to_list, 'r') as f:\n # csv reader object\n csv_reader = csv.reader(f)\n \n # skip header in the csv file...\n rows_to_skip = 5\n for i in range(0, rows_to_skip):\n next(csv_reader)\n \n # 4) iterate over each row (i.e. each film) in csv file...\n for csv_row in csv_reader:\n pos = csv_row[0]\n title = csv_row[1]\n year = csv_row[2]\n letterboxd_url = csv_row[3]\n letterboxd_film_id = letterboxd_url.split('/')[-1]\n genres = []\n directors = []\n duration = 0\n language = \"\"\n imdb_url = \"\"\n imdb_avg_rating = \"\"\n imdb_num_votes = \"\"\n tmdb_url = \"\"\n poster_url = \"\"\n \n # parse data from letterboxd web page... (this takes ~1.21s)\n page = session.get(url=letterboxd_url, verify=False, stream=True)\n soup = BeautifulSoup(page.content, 'html.parser')\n \n # retrieve list of genres...\n div_genres = soup.select_one('#tab-genres')\n for link in div_genres.select('div.text-sluglist a.text-slug[href]'):\n if 'genre' in link['href']:\n genre = link.text.capitalize()\n if genre == 'Science-fiction':\n genre = 'Sci-fi'\n genres.append(genre)\n\n # retrieve duration\n textfooter = soup.select_one('section.col-main > p.text-footer').text\n if 'mins' in textfooter:\n minstext = textfooter.find('mins', 0, len(textfooter))\n duration = textfooter[minstext-5:minstext].strip()\n \n # retrieve the first spoken language\n div_details = soup.select_one('#tab-details')\n for link in div_details.select('div.text-sluglist a.text-slug[href]'):\n if 'language' in link['href']:\n language = link.text.capitalize()\n break\n \n # retrieve IMDB url\n footer = soup.select_one('section.col-main p.text-footer')\n url_buttons = footer.select('a.micro-button')\n for url in url_buttons:\n if 'imdb' in url.text.lower():\n # get IMDB URL\n imdb_url = url['href']\n elif 'tmdb' in url.text.lower():\n # get TMDB URL\n tmdb_url = url['href']\n if len(url_buttons) == 1:\n # if there's only 1 url_button, it's usually the IMDB link that's missing...\n if soup.select_one('#featured-film-header > h1').text == 'xxxHOLiC':\n imdb_url = 'https://www.imdb.com/title/tt16233104/'\n\n # retrieve avg rating on letterboxd\n # page_content_str = page.content.decode('utf-8')\n # letterboxd_avg_rating = soup.select_one('.average-rating')\n\n # Use the OMDB API (https://www.omdbapi.com/) to retrieve some additional data from IMDB website...\n omdb_api_key = '4af56bed' # apparently this expires after 1000 days (today is 07/02/2023, 1000 days from now is 03/11/2025... holy shit!)\n imdb_film_id = imdb_url.split('/')[-2]\n url = f'http://www.omdbapi.com/?i={imdb_film_id}&apikey={omdb_api_key}'\n imdb = requests.get(url=url, verify=False)\n imdb_json = json.loads(imdb.text)\n\n # retrieve avg IMD rating\n imdb_avg_rating = imdb_json['imdbRating']\n \n # retrieve number of votes on IMDB (we remove the comma, because react is fucking stupid and cant sort numbers when they contain commas)\n imdb_num_votes = imdb_json['imdbVotes'].replace(',', '')\n \n # retrieve poster from IMDB\n poster_url = imdb_json['Poster']\n if 'SX300' in poster_url:\n poster_url = poster_url.split('SX300')[0]\n \n # retrieve title from IMDB (because letterboxd parsing is shite!)\n # title = imdb_json['Title']\n \n # retrieve list of directors (because letterboxd parsing is shite!)\n directors = imdb_json['Director'].split(', ')\n \n # id of my review!\n titlev2 = Helpers.simplify_movie_title(title=title)\n review_id = f\"{titlev2}-{letterboxd_film_id}-review\"\n \n \"\"\"\n # iterate over \\\\img\\\\films\\\\\n screenshots = []\n directory_in_str = f'D:\\\\Programming-Projects\\\\nathansteele\\\\src\\\\img\\\\films';\n for subdir, dirs, files in os.walk(directory_in_str):\n for file in files:\n # find the right directory...\n title_from_directory_storing_screenshots = subdir.split('\\\\')[-1]\n titlev3 = Helpers.simplify_movie_title(title=title_from_directory_storing_screenshots)\n if titlev3 == titlev2:\n # get my screenshots...\n if 'screenshot' in file:\n screenshot = os.path.join(subdir, file)\n screenshots.append(screenshot)\n \n # use my custom poster if I put one there...\n if 'custom_poster' in file:\n poster_url = os.path.join(subdir, file)\n \"\"\"\n \n # DEBUGGING....\n print(f\" > {pos}: {title}\")\n #print(f' > Title = ({title})')\n #print(f' > IMDB url = ({imdb_url})')\n #print(f' > Language = ({language})')\n #print(f' > Duration = ({duration})')\n #print(f' > Genres = ({genres})')\n #print(f' > IMDB avg rating = ({imdb_avg_rating})')\n #print(f' > IMDB num votes = ({imdb_num_votes})')\n #print(f' > Poster = ({poster_url})')\n #print(f' > Directors = ({directors})')\n #print(f' > Tags = ({my_tags})')\n \n # append to list...\n letterboxd_list.append({\n 'letterboxdFilmId': letterboxd_film_id,\n 'imdbFilmId': imdb_film_id,\n 'letterboxdUrl': letterboxd_url,\n 'imdbUrl': imdb_url,\n 'posterUrl': poster_url,\n 'position': pos,\n 'title': title,\n 'year': year,\n 'duration': duration,\n 'language': language,\n 'imdbAvgRating': imdb_avg_rating,\n 'imdbNumVotes': imdb_num_votes,\n 'tmdbUrl': tmdb_url,\n 'directors': directors,\n 'genres': genres,\n 'reviewId': review_id\n })\n \n # clear json file first...\n open(path_to_json_output, 'w').close()\n \n # write new content...\n with open(path_to_json_output, 'w') as f:\n json.dump(letterboxd_list, f, indent=4)\n \n print(f'JSON export finished! -> {path_to_json_output}')", "def csv_write (data):\n \n csv_data=data[0:]\n csv1_data = open('backup.csv', 'a')\n csvwriter = csv.writer(csv1_data)\n\n count = 0\n\n for i in csv_data:\n if count == 0:\n header = i.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(i.values())\n\n csv1_data.close()\n\n #http://blog.appliedinformaticsinc.com/how-to-parse-and-convert-json-to-csv-using-python/", "def test_read_objects(self):\n\n date1 = \"2021-09-17\"\n date2 = \"2021-09-16\"\n key1 = f\"{date1}.csv\"\n key2 = f\"{date2}.csv\"\n csv_content1 = \"\"\"col1,col2\n valA,valB\n \"\"\"\n csv_content2 = \"\"\"col1,col2\n valC,valD\n \"\"\"\n # the expected dataframe\n df1 = pd.read_csv(StringIO(csv_content1))\n df2 = pd.read_csv(StringIO(csv_content2))\n csv_expected = (pd.concat([df2, df1])).to_csv(index=False)\n # mock upload csv to s3\n self.bucket.put_object(Body=csv_content1, Key=key1)\n self.bucket.put_object(Body=csv_content2, Key=key2)\n csv_result = (self.src_bucket_connector.read_objects(\n \"2021-09-17\", \"all\")).to_csv(index=False)\n self.assertEqual(csv_expected, csv_result)", "def combine_source_target_files(source_dir, target_dir, out_dir, file_matcher, original_ids=None):\n source_files = get_all_files(source_dir, file_matcher)\n target_files = get_all_files(target_dir, file_matcher)\n target_file_bases = np.array(list(map(lambda x: os.path.basename(x).lower(), target_files)))\n id_var = 'id'\n dedup_vars = [id_var]\n all_txt_vars = ['text', 'user_description', 'user_location']\n RETURN_CHAR_MATCHER = re.compile('[\\n\\r\\t]')\n if(not os.path.exists(out_dir)):\n os.mkdir(out_dir)\n for source_file in source_files:\n # find matching target file\n source_file_base = os.path.basename(source_file).lower()\n target_file_base_idx = np.where(target_file_bases == source_file_base)[0]\n combined_data_file_name = os.path.join(out_dir, source_file_base)\n# if(not os.path.exists(combined_data_file_name)):\n # if target file exists, then combine source/target\n if(len(target_file_base_idx) > 0):\n target_file_base_idx = target_file_base_idx[0]\n target_file = target_files[target_file_base_idx]\n try:\n source_data = pd.read_csv(source_file, sep='\\t', compression='gzip')\n if('Unnamed: 0' in source_data.columns):\n source_data.drop('Unnamed: 0', axis=1, inplace=True)\n # fix column name mismatches\n source_data.rename(columns={'user_screen_name' : 'screen_name', 'user_id' : 'author_id'}, inplace=True)\n target_data = pd.read_csv(target_file, sep='\\t', compression='gzip')\n # combine!\n logging.info(f'combining files for {source_file_base}')\n combined_data = pd.concat([source_data, target_data], axis=0)\n # deduplicate!\n combined_data.drop_duplicates(dedup_vars, inplace=True)\n # clean\n combined_data.fillna('', inplace=True)\n # filter original IDs\n if(original_ids is not None):\n combined_data = combined_data[~combined_data.loc[:, id_var].isin(original_ids)]\n # remove return characters\n for txt_var_i in all_txt_vars:\n combined_data = combined_data.assign(**{\n txt_var_i : combined_data.loc[:, txt_var_i].apply(lambda x: RETURN_CHAR_MATCHER.sub('', str(x)))\n })\n logging.info('%d/%d source/target'%(source_data.shape[0], target_data.shape[0]))\n logging.info('combined data has %d/%d data'%(combined_data.shape[0], source_data.shape[0]+target_data.shape[0]))\n # write to file\n combined_data.to_csv(combined_data_file_name, sep='\\t', compression='gzip', index=False)\n except Exception as e:\n logging.info(f'going to skip file {source_file_base} because error {e}')\n # if target file does not exist, copy the source data\n else:\n logging.info(f'copying {source_file} without combining')\n source_data = pd.read_csv(source_file, sep='\\t', compression='gzip')\n if('Unnamed: 0' in source_data.columns):\n source_data.drop('Unnamed: 0', axis=1, inplace=True)\n # fix column name mismatches\n source_data.rename(columns={'user_screen_name' : 'screen_name', 'user_id' : 'author_id'}, inplace=True)\n source_data.to_csv(combined_data_file_name, sep='\\t', compression='gzip', index=False)", "def write_csv(invocations, job_information, out_file, null_string =\"NA\"):\n\n\t# assume every invocation of a task of a certain type takes the same number of input files\n\tnum_input_files = len(job_information[invocations[0]]['input_files'])\n\t#file_attributes = [\"input_file_%s_kb\"%i for i in range(1, num_input_files + 1)]\n\tfile_attributes = [\"host_name\", \"input_file_sum_kb\"]\n\tusage_attributes = ['utime', 'stime', 'maxrss', 'nvcsw', 'nivcsw', 'nswap', 'minflt', ] # 'majflt', 'inblock', 'outblock', 'nsignals', 'msgsnd', 'msgrcv', 'nswap'\n\tload_attributes = [\"min1\", \"min5\", \"min15\"]\n\tprocs_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\", \"vmsize\", \"rss\"]\n\ttask_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\",]\n\tram_attributes = [\"total\", \"free\", \"shared\", \"buffer\",]\n\tswap_attributes = [\"total\", \"free\",]\n\tmachine_attributes_headers = load_attributes + list(map(lambda a: \"procs_\"+a, procs_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"task_\"+a, task_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"ram_\"+a, ram_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"swap_\"+a, swap_attributes))\n\n\t# the csv column labels\n\theader = ['run_goup', 'run', 'transformation', 'mainjob_started', \"duration\"] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n#\theader = ['workflow','transformation', 'mainjob_started'] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n\n\twith open(out_file, 'w', newline='') as csvfile:\n\n\t\tspamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tspamwriter.writerow(header)\n\n\t\tfor job_info in [job_information[job_id] for job_id in invocations]:\n\n\t\t\tfile_sizes = [float(file['size']) for file in job_info['input_files']]\n\t\t\tusage_values = [float(job_info['usage'][attr]) for attr in usage_attributes]\n#\n\t\t\ttry:\n\t\t\t\tout_size = sum([float(file['size']) for file in job_info['output_files']])\n\t\t\texcept KeyError as k:\n\t\t\t\tout_size = null_string\n#\n\t\t\tpeak_mem = float(job_info['usage']['maxrss'])\n\t\t\tmachine_values = []\n\n\t\t\tfor machine_attrs, attrs in [(\"load\", load_attributes), (\"procs\", procs_attributes), (\"task\", task_attributes), (\"ram\", ram_attributes), (\"swap\", swap_attributes)]:\n\t\t\t\tfor attr in attrs:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmachine_values.append(job_info[machine_attrs][attr])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tmachine_values.append(null_string)\n\n#\t\t\tdata = [job_info[\"workflow\"], job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tdata = [job_information[\"run_group\"], job_information[\"run\"], job_info[\"transformation\"], job_info['mainjob_started_ts'], job_info[\"mainjob_duration\"]] + [job_info['host_name']] + [sum(file_sizes)] + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n#\t\t\tdata = [job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tspamwriter.writerow(data)", "def _in_place(self, path, dialect, encoding):\n tmpfd, tmpfname = tempfile.mkstemp(prefix=\"clevercsv_\", suffix=\".csv\")\n tmpid = os.fdopen(tmpfd, \"w\", newline=\"\", encoding=encoding)\n self._write_to_stream(path, tmpid, dialect, encoding)\n tmpid.close()\n\n previous_sha1 = sha1sum(path)\n new_sha1 = sha1sum(tmpfname)\n if previous_sha1 == new_sha1:\n os.unlink(tmpfname)\n return 0\n\n shutil.move(tmpfname, path)\n return 2", "def append_files(in_file1, character, in_file2, out_file):\n return_data = 0\n\n write_data = ''\n\n i = 0\n try:\n with open(in_file1, 'rt') as fi1:\n lines1 = fi1.readlines() # Read all the lines in fi1 as a tuple\n \n with open(in_file2, 'rt') as fi2:\n lines2 = fi2.readlines() # Read all the lines in fi2 as a tuple\n \n with open(out_file, 'at') as fo:\n fo.seek(0,2)\n while i < len(lines1):\n lines1[i] = lines1[i].rstrip('\\n')\n #lines1[i] = lines1[i].rstrip('\\r')\n fo.write(lines1[i] + character + lines2[i])\n i = i + 1\n print(write_data)\n except IOError:\n print(\"Error in reading/writing file.\")\n return_data = 2\n else:\n print('Operation completed successfully.')\n return_data = 1\n finally:\n fi2.close()\n fi1.close()\n fo.close()\n print(\"done\")\n return return_data", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def _add_output_files(self):\n self._output_files = []\n base = os.path.join(os.path.dirname(self.in_fpath),\n os.path.splitext(os.path.basename(self.in_fpath))[0])\n\n output_path = f'{base}_out.csv'\n\n suffix = 2\n while os.path.exists(output_path):\n self._output_files.append(output_path)\n if os.path.getsize(output_path) < self.limit_fsize:\n return\n output_path = f'{base}_out_{suffix}.csv'\n suffix += 1\n\n open(output_path, 'a').close()\n self._output_files.append(output_path)", "def cat_json(output_filename, input_filenames):\n\twith open(output_filename, \"w\") as outfile:\n\t\tfirst = True\n\t\tcounter = -1\n\t\tfor infile_name in input_filenames:\n\t\t\twith open(infile_name) as infile:\n\t\t\t\tif first:\n\t\t\t\t\toutfile.write('{')\n\t\t\t\t\tfirst = False\n\t\t\t\telse:\n\t\t\t\t\toutfile.write(',')\n\t\t\t\toutfile.write(mangle(infile.read(), counter))\n\t\t\t\tcounter -= 1\n\t\toutfile.write('}')", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def mani2():\r\n #open the original file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #read the csv file\r\n csv_reader = csv.reader(csvfile1)\r\n\r\n #open the new csv file \r\n with open('csvfile2.csv', 'w') as csvfile2:\r\n #write to it with another delimeter\r\n csv_writer = csv.writer(csvfile2, delimiter='-')\r\n #loop through the line\r\n for line in csv_reader:\r\n csv_writer.writerow(line)", "def write_csv_data(csv_path, data_iterator, target_attrs):\n\n if not os.path.isdir(csv_path.split('/')[0]):\n os.makedirs('./csv')\n\n if os.path.exists(csv_path):\n os.remove(csv_path)\n\n f = open(csv_path, 'w+')\n w = csv.writer(f, delimiter=',')\n\n w.writerow(target_attrs)\n\n num_written = 0\n ids = set()\n\n print('Staged to write data to %s' % csv_path)\n\n for d in data_iterator:\n d['id'] = int(d['id']) #TODO: alter data so all ids are already int\n\n if d['id'] in ids:\n continue # if already seen id\n\n ids.add(d['id'])\n\n save_data = [value for (key, value) in d.items() if key in target_attrs]\n\n if len(save_data) == 1: # if from connections TODO: clean up logic\n save_data = save_data[0]\n\n w.writerow(save_data)\n num_written += 1\n\n print('Wrote %i rows to %s' % (num_written, csv_path))\n\n f.close()\n print('Done\\n')", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def update_csv():\n return os.listdir('./data')", "def test_36_bulk_csv_import_dup_header(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,Foo\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n msg = \"The file you uploaded has two headers with the same name\"\r\n assert msg in res.data", "def country_lookup(input_file, output_file, field_index):\n # Load existing output into memory\n # This is so if script crashes half way through it picks up where it left off.\n # (Touch output file before starting)\n existingOutput = []\n with open(output_file) as read_out_fp:\n csvreader = csv.reader(read_out_fp, delimiter=CSV_DELIMITER, quotechar=CSV_QUOTECHAR)\n header = next(csvreader)\n for row in csvreader:\n existingOutput.append(row)\n\n # Main loop over input\n with open(input_file) as in_fp:\n with open(output_file, \"w\", newline=\"\") as out_fp:\n csvreader = csv.reader(in_fp, delimiter=CSV_DELIMITER, quotechar=CSV_QUOTECHAR)\n csvwriter = csv.writer(out_fp, delimiter=CSV_DELIMITER, quotechar=CSV_QUOTECHAR, quoting=csv.QUOTE_MINIMAL)\n header = next(csvreader)\n header.append('COUNTRY-FROM-LAT-LNG')\n csvwriter.writerow(header)\n\n for row in csvreader:\n\n # If there is data for this already in output, just use that and don't look up again\n if len(existingOutput) > 0:\n\n existing_row = existingOutput.pop(0)\n\n # The existing row we got from output should match the input row we are trying to process\n # Let's check that!\n if existing_row[0] != row[0] or \\\n existing_row[1] != row[1] or \\\n existing_row[2] != row[2] or \\\n existing_row[3] != row[3] or \\\n existing_row[4] != row[4] or \\\n existing_row[5] != row[5]:\n print(\"EXISTING ROW PROBLEM\")\n print(', '.join(existing_row))\n print(', '.join(row))\n exit(-1)\n\n csvwriter.writerow(existing_row)\n\n # No data already - we need to look up ourselves\n else:\n\n # Some rows are empty latlng\n if row[field_index].strip():\n\n latlngfielddata = row[field_index].strip()\n # Some data entries have multiple spaces between lat lng, some have 3,\n # So this line is done twice to always bring it back to 1 space.\n latlngfielddata = latlngfielddata.replace(LATLNG_FIELD_SPLIT_CHAR+LATLNG_FIELD_SPLIT_CHAR, LATLNG_FIELD_SPLIT_CHAR)\n latlngfielddata = latlngfielddata.replace(LATLNG_FIELD_SPLIT_CHAR+LATLNG_FIELD_SPLIT_CHAR, LATLNG_FIELD_SPLIT_CHAR)\n lat, lng = latlngfielddata.split(LATLNG_FIELD_SPLIT_CHAR)\n url = 'http://api.geonames.org/countryCode?lat={}&lng={}&username={}'.format(lat.strip(), lng.strip(), GEONAMES_USERNAME)\n r = requests.get(url)\n if r.status_code != 200:\n print(\"API PROBLEM\")\n print(', '.join(row))\n print(r.status_code)\n print(r.text)\n exit(-1)\n\n if r.text.strip() == 'ERR:15:no country code found':\n row.append('')\n elif r.text.startswith(\"ERR\"):\n print(\"API PROBLEM\")\n print(', '.join(row))\n print(r.status_code)\n print(r.text)\n exit(-1)\n else:\n row.append(r.text.strip())\n else:\n row.append('')\n\n csvwriter.writerow(row)\n\n time.sleep(SLEEP)\n\n #exit()", "def create_metadata_shell_for_csv(csv_file_path: str) -> str:\n metadata_file = f\"{csv_file_path}-metadata.json\"\n if path.exists(metadata_file):\n raise Exception(f\"Metadata file {metadata_file} already exists.\")\n if not path.exists(csv_file_path):\n raise Exception(f\"CSV file {csv_file_path} does not exist.\")\n\n label = map_file_path_to_label(csv_file_path)\n concept_scheme_uri = generate_concept_scheme_root_uri(label)\n\n # Just inserting basic structure at this point as already exists in standard files. Additional metadata will be\n # added as the script continues to run.\n metadata = {\n \"@context\": \"http://www.w3.org/ns/csvw\",\n \"@id\": concept_scheme_uri,\n \"url\": csv_file_path,\n \"rdfs:label\": label,\n \"dc:title\": label,\n \"tableSchema\": {\n \"columns\": [],\n },\n \"prov:hadDerivation\": {\n \"@id\": concept_scheme_uri,\n \"@type\": [\n \"skos:ConceptScheme\",\n f\"{pmdcat_base_uri}DatasetContents\"\n ]\n }\n }\n\n table_schema: Dict = metadata[\"tableSchema\"]\n columns: List[Dict] = table_schema[\"columns\"]\n\n with open(csv_file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\", quotechar=\"\\\"\")\n column_names: List[str] = next(reader)\n\n for column_name in column_names:\n column = generate_schema_for_column(column_name, concept_scheme_uri)\n columns.append(column)\n\n columns.append({\n \"virtual\": True,\n \"propertyUrl\": \"rdf:type\",\n \"valueUrl\": \"skos:Concept\"\n })\n columns.append({\n \"virtual\": True,\n \"propertyUrl\": \"skos:inScheme\",\n \"valueUrl\": concept_scheme_uri\n })\n\n if \"notation\" in [c.lower() for c in column_names]:\n override(table_schema, {\n \"primaryKey\": \"notation\",\n \"aboutUrl\": concept_scheme_uri + \"/{notation}\"\n })\n else:\n print(\"WARNING: could not determine primary key. As a result, `aboutUrl` property is not specified and \" +\n \"so each row will not have a true URI. This is basically required. Manual configuration required.\")\n\n with open(metadata_file, 'w+') as file:\n file.write(json.dumps(metadata, indent=4))\n\n return str(metadata_file)", "def add_users_from_file(self, input_file, out_file):\n\n csv_file_read = open(input_file, 'r')\n rows_dict = csv.DictReader(csv_file_read)\n\n # Process file entries, appending to the file one at a time\n for row in rows_dict:\n csv_file_write = open(out_file, 'a', newline='')\n writer = csv.DictWriter(csv_file_write, rows_dict.fieldnames)\n print('---\\nProcessing: ' + row['firstName'] + ' ' + row['surname'])\n if not self.password_validates(row['newPassword']):\n comment = \"ICE won't accept this password even if i try it!\"\n else:\n comment = self.add_user(row)\n print(comment)\n # i = datetime.now()\n # row['Status'] = comment + ' (%s/%s/%s %s:%s)' % (i.day, i.month, i.year, i.hour, i.minute)\n row['Status'] = comment + ' (' + datetime.now().strftime('%d %b %Y %H:%M') + ')' # 01 Jan 1900 19:00\n writer.writerow(row)\n csv_file_write.close()\n\n csv_file_read.close()\n self.driver.quit()", "def process_csv_data(file_for_processing: FileForProcessing):\n \n if file_for_processing.file_to_process.os_type == ANDROID_API:\n # Do fixes for Android\n if file_for_processing.data_type == ANDROID_LOG_FILE:\n file_for_processing.file_contents = fix_app_log_file(\n file_for_processing.file_contents, file_for_processing.file_to_process.s3_file_path\n )\n \n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n if file_for_processing.data_type != ACCELEROMETER:\n # If the data is not accelerometer data, convert the generator to a list.\n # For accelerometer data, the data is massive and so we don't want it all\n # in memory at once.\n csv_rows_list = list(csv_rows_list)\n \n if file_for_processing.data_type == CALL_LOG:\n header = fix_call_log_csv(header, csv_rows_list)\n if file_for_processing.data_type == WIFI:\n header = fix_wifi_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n else:\n # Do fixes for iOS\n header, csv_rows_list = csv_to_list(file_for_processing.file_contents)\n \n if file_for_processing.data_type != ACCELEROMETER:\n csv_rows_list = list(csv_rows_list)\n \n # Memory saving measure: this data is now stored in its entirety in csv_rows_list\n file_for_processing.clear_file_content()\n \n # Do these fixes for data whether from Android or iOS\n if file_for_processing.data_type == IDENTIFIERS:\n header = fix_identifier_csv(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n if file_for_processing.data_type == SURVEY_TIMINGS:\n header = fix_survey_timings(header, csv_rows_list, file_for_processing.file_to_process.s3_file_path)\n \n header = b\",\".join([column_name.strip() for column_name in header.split(b\",\")])\n if csv_rows_list:\n return (\n # return item 1: the data as a defaultdict\n binify_csv_rows(\n csv_rows_list,\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n ),\n # return item 2: the tuple that we use as a key for the defaultdict\n (\n file_for_processing.file_to_process.study.object_id,\n file_for_processing.file_to_process.participant.patient_id,\n file_for_processing.data_type,\n header\n )\n )\n else:\n return None, None", "def merge_duplicate_psm_rows(\n csv_file_path=None,\n psm_counter=None,\n psm_defining_colnames=None,\n psm_colnames_to_merge_multiple_values={},\n joinchar=\"<|>\",\n overwrite_file=True,\n):\n rows_to_merge_dict = defaultdict(list)\n\n if overwrite_file:\n tmp_file = csv_file_path + \".tmp\"\n os.rename(csv_file_path, tmp_file)\n out_file = csv_file_path\n else:\n tmp_file = csv_file_path\n out_file = csv_file_path.strip(\".csv\") + \"_merged_duplicates.csv\"\n UNode.print_info(\"Merging rows of the same PSM...\", caller=\"postflight\")\n # print('Merging rows of the same PSM...')\n csv_kwargs = {}\n if sys.platform == \"win32\":\n csv_kwargs[\"lineterminator\"] = \"\\n\"\n else:\n csv_kwargs[\"lineterminator\"] = \"\\r\\n\"\n with open(tmp_file, \"r\") as tmp, open(out_file, \"w\", newline=\"\") as out:\n tmp_reader = csv.DictReader(tmp)\n writer = csv.DictWriter(out, fieldnames=tmp_reader.fieldnames, **csv_kwargs)\n writer.writeheader()\n for row in tmp_reader:\n psm = tuple([row[x] for x in psm_defining_colnames if x in row.keys()])\n # each unique combination of these should only have ONE row!\n # i.e. combination of seq+spec+score\n if psm_counter[psm] == 1:\n # no duplicate = no problem, we can just write the row again\n writer.writerow(row)\n elif psm_counter[psm] > 1:\n # we have to collect all rows of this psm, and merge + write\n # them later!\n rows_to_merge_dict[psm].append(row)\n else:\n raise Exception(\"This should never happen.\")\n # finished parsing the old unmerged unified csv\n for rows_to_merge in rows_to_merge_dict.values():\n writer.writerow(\n merge_rowdicts(\n rows_to_merge,\n psm_colnames_to_merge_multiple_values,\n joinchar=joinchar,\n )\n )\n # remove the old unified csv that contains duplicate rows\n if overwrite_file:\n os.remove(tmp_file)\n UNode.print_info(\"Done.\", caller=\"postflight\")\n return out_file", "def screenshot_csv(csv_in_name, csv_out_name, pics_out_path, screenshot_method, timeout_duration, lazy, be_lazy):\n\n with open(csv_in_name, 'r') as csv_file_in:\n csv_reader = csv.reader(csv_file_in)\n with open(csv_out_name, 'w+') as csv_file_out:\n csv_writer = csv.writer(csv_file_out, delimiter=',', quoting=csv.QUOTE_ALL)\n csv_writer.writerow([\"archive_id\", \"url_id\", \"date\", \"succeed_code\", \"archive_url\"])\n\n count = 0\n compare = '0'\n for line in csv_reader:\n if count == 0: # skip the header\n count += 1\n continue\n\n archive_id = str(line[0])\n url_id = line[1]\n date = line[2]\n url = line[3]\n\n if url == \"\":\n continue\n\n if be_lazy is True: # makes running faster by not doing hundreds of archive sites\n if url_id != compare:\n count = 0\n compare = url_id\n else:\n count += 1\n if count > lazy:\n continue\n\n print(\"\\nurl #{0} {1}\".format(url_id, url))\n logging.info(\"url #{0} {1}\".format(url_id, url))\n\n succeed = take_screenshot(archive_id, url_id, date, url, pics_out_path, screenshot_method,\n timeout_duration)\n\n csv_writer.writerow([archive_id, url_id, date, succeed, url])", "def build_csv(block_hash_or_file, filename_csv, local=False, append=False):\n\tif append == True:\n\t\t# open CSV file in \"a\" mode\n\t\tfile = open(filename_csv, \"a\")\n\telse:\n\t\t# open a fresh CSV file and write headers\n\t\tfile = open(filename_csv, \"w\")\n\t\tfile.write(\"InputAddress,OutputAddress,TransactionIndex,ExpectedValue\\r\\n\")\n\tif local == True:\n\t\t# load block JSON from local file\n\t\twith open(block_hash_or_file, \"rb\") as fp:\n\t\t\tblock = json.load(fp)\n\telse:\n\t\t# get block JSON from blockchain.info API\n\t\tr = req.get(\"https://blockchain.info/rawblock/\"+block_hash_or_file)\n\t\tblock = json.loads(r.content)\n\t# for each transaction in block\n\tfor tx in block[\"tx\"]:\n\t\t# get addresses and values\n\t\tin_addr, in_value, out_addr, out_value = get_addr_value(tx)\n\t\t# if no input addresses\n\t\tif len(in_addr) == 0:\n\t\t\t# for each output address\n\t\t\tfor j in range(len(out_addr)):\n\t\t\t\t# record as coin base transaction\n\t\t\t\tfile.write(\"base,\"+out_addr[j]+\",\"+str(tx[\"tx_index\"])+\",\"+str(out_value[j])+\"\\r\\n\")\n\t\t# if input addresses exist\n\t\telse:\n\t\t\t# get total input value\n\t\t\ttotal_input = sum(in_value)\n\t\t\t# for each input address and each output address\n\t\t\tfor i in range(len(in_addr)):\n\t\t\t\tfor j in range(len(out_addr)):\n\t\t\t\t\t# record transaction with _expected_ value\n\t\t\t\t\tfile.write(in_addr[i]+\",\"+out_addr[j]+\",\"+str(tx[\"tx_index\"])+\",\"+str(out_value[j]*in_value[i]/total_input)+\"\\r\\n\")\n\tfile.close()", "def edit_csv(input_path, config_file):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file):\n confirm = click.confirm(\n \"\\n%s doesn't exist. Do you want to create it\" % csv_file, default=True)\n if confirm:\n csvHandler(csv_file).resetCSV(config_file=config_file)\n else:\n return\n\n GUI().csvEditor(config_file=config_file, csv_file=csv_file)", "def process_input_data(input_data_path):\n if os.path.isdir(input_data_path):\n input_data_glob = glob.glob(input_data_path + \"/*.csv\")\n else:\n if is_gcs_path(input_data_path):\n # Download the input to a local\n with tempfile.NamedTemporaryFile() as hf:\n input_data = hf.name\n\n logging.info(\"Copying %s to %s\", input_data_path, input_data)\n input_data_gcs_bucket, input_data_gcs_path = split_gcs_uri(\n input_data_path)\n\n logging.info(\"Download bucket %s object %s.\", input_data_gcs_bucket,\n input_data_gcs_path)\n bucket = storage.Bucket(storage.Client(), input_data_gcs_bucket)\n storage.Blob(input_data_gcs_path, bucket).download_to_filename(\n input_data)\n else:\n input_data = input_data_path\n\n ext = os.path.splitext(input_data)[-1]\n if ext.lower() == '.zip':\n zip_ref = zipfile.ZipFile(input_data, 'r')\n zip_ref.extractall('.')\n zip_ref.close()\n # TODO: Hardcoding the file in the Archive to use is brittle.\n # We should probably just require the input to be a CSV file.:\n csv_file = 'stackoverflow-questions.csv'\n else:\n csv_file = input_data\n\n input_data_glob = glob.glob(csv_file)\n\n return input_data_glob", "def rename(self, csv_path):\n # Load from disk #\n header = csv_path.first\n header = header.split(',')\n # Modify #\n header = map(self.mapping.get, header, header)\n # Write to disk #\n header = ','.join(header)\n csv_path.remove_first_line()\n csv_path.prepend(header)", "def test_to_csv_with_no_rows_returns_none(self):\n output = row_handling.to_csv(rows=[], csv_path=self.csv_path)\n assert output is None", "def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)", "def main(args):\n \n args_are_valid, input_filepath, output_filepath, base_url, message = handle_arguments(args)\n if not args_are_valid:\n return print(message)\n \n with open(input_filepath, newline=\"\") as input_csv:\n csvreader = csv.reader(input_csv, delimiter=\",\",)\n\n needed_input_columns = [\"Account ID\",\"First Name\", \"Created On\"]\n needed_output_columns = [\"Account ID\",\"First Name\", \"Created On\", \"Status\", \"Status Set On\"]\n headers = next(csvreader) #grab first row as headers\n if not set(needed_input_columns).issubset(headers):\n print('ERROR - input csv must contain columns [\"Account ID\",\"First Name\", \"Created On\"] as headers')\n\n with open(output_filepath, mode = \"w\", newline = \"\") as output_csv:\n csvwriter = csv.DictWriter(output_csv, fieldnames = needed_output_columns)\n csvwriter.writeheader()\n\n index_of = {}\n for index,header in enumerate(headers):\n index_of[header] = index\n write_dict = {}\n\n #Loop through inputfile\n for row in csvreader:\n still_valid = True\n if len(row) != len(headers):\n message = \"ERROR - csv row has incomplete data\"\n still_valid = False\n if still_valid:\n # extract data from row, columns can be in any order\n for column in needed_input_columns:\n write_dict[column] = row[index_of[column]]\n still_valid, write_dict, message = verify_and_clean_input(write_dict)\n if still_valid:\n write_dict, message = extend(write_dict, query(write_dict[\"Account ID\"], base_url))\n #only write to csv if all input data valid, query data nulled out if invalid\n csvwriter.writerow(write_dict) \n print(message)\n\n output_csv.close() \n input_csv.close()" ]
[ "0.66875005", "0.65311", "0.64833546", "0.6277665", "0.6257637", "0.5997841", "0.5968701", "0.58972067", "0.5896129", "0.5890339", "0.58322793", "0.5827144", "0.580506", "0.57693833", "0.5767864", "0.5731467", "0.57120144", "0.56666887", "0.56376743", "0.5627514", "0.5622011", "0.55655795", "0.5558454", "0.55536425", "0.5543435", "0.5540802", "0.55283374", "0.5519627", "0.5484722", "0.54805857", "0.54736435", "0.54683733", "0.546728", "0.5459843", "0.5451182", "0.5427439", "0.5416519", "0.5401572", "0.5396415", "0.5380205", "0.53686243", "0.5357779", "0.53525037", "0.53493094", "0.5340407", "0.532943", "0.53237325", "0.53202313", "0.53191924", "0.5319142", "0.5318206", "0.5317704", "0.53100264", "0.53036606", "0.52951187", "0.5292122", "0.52804625", "0.527709", "0.5272894", "0.525811", "0.5253722", "0.52493525", "0.52442986", "0.52408224", "0.5238414", "0.52354735", "0.5231664", "0.52291757", "0.52252746", "0.5224602", "0.5220511", "0.52202374", "0.52129245", "0.5202057", "0.520123", "0.519624", "0.5192204", "0.51879096", "0.518607", "0.5181876", "0.517168", "0.5153341", "0.51411426", "0.5138515", "0.5132591", "0.512923", "0.512739", "0.5126654", "0.5126456", "0.5125947", "0.5123197", "0.5116913", "0.5113852", "0.51116437", "0.5111561", "0.5105923", "0.50949603", "0.5077662", "0.5076743", "0.5075819" ]
0.69841707
0
Description When is given a csv_filepath and output_filepath and one of the columns has blank character Expected Result creates a json file ignoring blank column
def test_blank_column(self): # Create a temporary directory for test files temp_dir = "test_files/observed" os.makedirs(temp_dir, exist_ok=True) # Create a test CSV file csv_filepath = os.path.join(temp_dir, "Abadia-BA_-11.56_-37.52.csv") with open(csv_filepath, "w", newline="") as csv_file: writer = csv.writer(csv_file, delimiter=";") writer.writerow(["periods", "precipitation", "temperature", ""]) writer.writerow(["2023-01-01", "5", "25", ""]) writer.writerow(["2023-01-02", "10", "23", ""]) # Define the expected output JSON file path expected_output_filepath = os.path.join(temp_dir, "BA_Abadia.json") # Call the function under test extractor.csv_to_json(csv_filepath, temp_dir) # Verify that the output JSON file exists assert os.path.exists(expected_output_filepath) # Load the output JSON file with open(expected_output_filepath, "r") as json_file: json_data = json.load(json_file) # Verify the contents of the JSON file expected_data = { "city": "Abadia", "state": "BA", "coordinates": ["-11.56", "-37.52"], "observed": { "periods": ["2023-01-01", "2023-01-02"], "precipitation": ["5", "10"], "temperature": ["25", "23"] } } assert json_data == expected_data # Clean up the temporary directory and files os.remove(csv_filepath) os.remove(expected_output_filepath) os.rmdir(temp_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_csv_with_no_rows_returns_none(self):\n output = row_handling.to_csv(rows=[], csv_path=self.csv_path)\n assert output is None", "def write_csv_file(csv_output_file, full_data):\n j = 0\n csv_file_path = make_dir(csv_output_file)\n\n # csv_file_path = os.path.join(csv_file_path, csv_output_file)\n try:\n with open(csv_file_path, 'w', newline='') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',')\n csvwriter.writerow(['tripId', 'agency_tripId', 'itinerary_nb', 'modes', 'actual_time', 'perceived_time',\n 'start_time', 'end_time', 'walk_time', 'walk_distance','transit_time', 'waiting_time',\n 'boardings', 'bus_lines_numbers', 'boarding_stop_ids', 'debarquer_stop_ids'])\n print(\"======================================\")\n print(\"= Creating CSV file from JSON files =\")\n print(\"======================================\")\n for id in full_data.keys(): # just so we can get all the ids\n data = full_data[id]\n j += 1\n\n printrp('( ' + str(j) + ' / ' + str(len(full_data) - 1) + ' )') if found_CmdPrinter else print(j)\n\n if 'error' in data:\n # if no itineraries were find (ie. there was an error), write the error id and error message\n # note : msg is the short message (eg. PATH_NOT_FOUND), message is the long description\n csvwriter.writerow([id] + ['error'] + [str(data['error']['id'])] +\n [str(data['error']['message'])] + [str(data['error']['msg'])])\n else:\n for itinerary_nb in range(len(data['plan']['itineraries'])):\n\n boarding = 0\n busNbs = \"\"\n boarding_stop_ids = \"\"\n debarquer_stop_ids = \"\"\n agency_trip_ids = \"\"\n modes = \"\"\n for leg in data['plan']['itineraries'][itinerary_nb]['legs']:\n modes += leg['mode'] + ';'\n if leg['mode'] == 'BUS':\n # every time a BUS step is included in the itineraries :\n # add 1 to the boarding counter\n # add the bus line number to busNbs\n # add the stop_ids to boarding_stop_ids and debarquer_stop_ids\n boarding += 1\n busNbs += leg['route'] + \";\"\n\n boarding_stop_ids += str(leg['from']['stopCode']) + ';'\n debarquer_stop_ids += str(leg['to']['stopCode']) + ';'\n agency_trip_ids += str(leg['tripId'].split(':')[1]) + ';'\n # we need to .split that line because tripId is given as agencyId:tripId\n\n\n busNbs = busNbs[:-1] # removing the trailing semi-colon\n boarding_stop_ids = boarding_stop_ids[:-1]\n debarquer_stop_ids = debarquer_stop_ids[:-1]\n agency_trip_ids = agency_trip_ids[:-1]\n modes = modes[:-1]\n startTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['startTime']/1000))\n endTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(data['plan']['itineraries'][itinerary_nb]['endTime']/1000))\n # those are /1000 because OTP gives Epoch time in milliseconds\n\n walkTime = data['plan']['itineraries'][itinerary_nb]['walkTime']\n transitTime = data['plan']['itineraries'][itinerary_nb]['transitTime']\n waitingTime = data['plan']['itineraries'][itinerary_nb]['waitingTime']\n\n # Write all the information inside a csv file\n csvwriter.writerow([id,\n str(agency_trip_ids),\n str(itinerary_nb+1),\n str(modes),\n str(data['plan']['itineraries'][itinerary_nb]['duration']),\n str(get_perceived_time(walkTime, transitTime, waitingTime)),\n str(startTime),\n str(endTime),\n str(walkTime),\n str(data['plan']['itineraries'][itinerary_nb]['walkDistance']),\n str(transitTime),\n str(waitingTime),\n str(boarding),\n str(busNbs),\n str(boarding_stop_ids),\n str(debarquer_stop_ids)])\n except PermissionError:\n print('ERROR - Cannot write to CSV file. The file might be used by another app.')\n exit()\n except OSError:\n print(\"ERROR - Couldn't open file \" + csv_file_path + \". Please verify the file's permissions.\")\n print('( ' + str(j-1) + ' / ' + str(len(full_data) - 1) + ' )')", "def create_json_from_csv(csv_file, delimiter, cols_delimiter, keep, dic_types, infer_types, max_docs, json_file, per_line):\n\n # Get header of csv\n header_csv = get_header_csv(csv_file, cols_delimiter)\n\n # Create structure of json\n print(' [INFO] Creating json\\'s structure')\n jstruct = create_json_structure(header_csv, delimiter)\n print(jstruct)\n # Read csv line by line and create list of json\n print(' [INFO] Filling json') \n js_content = []\n with open(csv_file, 'r') as f:\n reader = csv.DictReader(f, delimiter=cols_delimiter)\n i = 0\n beg = True\n end = True\n # Prepare output file if dump in one file\n if max_docs == -1 and not per_line:\n beg = False\n end = False\n with open(json_file, 'w') as jsf:\n jsf.write('[\\n')\n for row in reader:\n if infer_types:\n row = {x: infer_type(row[x]) for x in row}\n jexample = copy.deepcopy(jstruct)\n js_content.append(create_json_example(row, header_csv, jexample, delimiter, keep, dic_types))\n\n i += 1\n # Dump json in streaming\n if (max_docs == -1) and ((i % 10000) == 0):\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n elif (max_docs != -1) and (i % max_docs) == 0:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, end)\n js_content = []\n\n # Dump last jsons\n if js_content:\n dump(json_file, js_content, max_docs, per_line, i // max_docs, beg, True)\n\n print(' [INFO] Json{} successfully created and dumped'.format('s' if (max_docs != -1) else ''))\n\n return", "def csv_to_json(csv_filename):\n csv_trimmed = csv_filename[:-3]\n json_added = csv_trimmed + 'json'\n return json_added", "def test_csv_to_json():\r\n json_dict = {\r\n \"covariates\":{ \r\n \"value\":{\r\n \"subject0\": {\r\n \"attribute0\": 3.0,\r\n \"attribute1\": 12.0\r\n },\r\n \"subject1\": {\r\n \"attribute0\": 1.2,\r\n \"attribute1\": 10.9\r\n }\r\n }\r\n },\r\n \"data\":{\r\n \"fulfilled\": True,\r\n \"value\": {\r\n \"type\": [\"float\"],\r\n \"value\": [\r\n \"attribute0\",\r\n \"attribute1\"\r\n ]\r\n }\r\n },\r\n \"lambda\":{\r\n \"fulfilled\": True,\r\n \"value\": 0\r\n }\r\n }\r\n json_string = \"[\" + json.dumps(json_dict).replace(' ', '').replace('\\n', '') + \"]\"\r\n directory = os.path.join(os.getcwd(), \"test/\")\r\n lambda_ = \"0\"\r\n data_type = [\"float\"]\r\n data_vars = [\"attribute0\", \"attribute1\"]\r\n assert csv_to_json_(directory, lambda_, data_type, data_vars).replace(' ', '').replace('\\n', '') == json_string", "async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))", "def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df", "def create_dataset(input_file_path, output_file_path):\n col_index_map = {'user_id': 0, 'session_id': 1, 'timestamp': 2, 'step': 3, 'action_type': 4, 'reference': 5,\n 'platform': 6, 'city': 7, 'device': 8,\n 'current_filters': 9, 'impressions': 10, 'prices': 11}\n flat_dict = dict()\n with open(input_file_path, 'r') as csvFile:\n reader = csv.reader(csvFile)\n header = next(reader)\n col_names = [col_name for col_name in col_index_map.keys()]\n col_names.pop(0)\n index = 0\n for row in tqdm(reader):\n if len(flat_dict) > 40000:\n index += 1\n with open(output_file_path + \"_\" + str(index) + \".json\", \"w\") as file:\n json.dump(flat_dict, file)\n print(\" JSON : \", index)\n flat_dict = dict()\n col_values = [row[col_index_map[c_n]] for c_n in col_names]\n dict_for_each_row = dict(zip(col_names, col_values))\n to_list = dict_for_each_row['impressions']\n dict_for_each_row['impressions'] = to_list.split('|')\n to_list = dict_for_each_row['prices']\n dict_for_each_row['prices'] = to_list.split('|')\n user_id = row[col_index_map['user_id']]\n if user_id in flat_dict:\n flat_dict[user_id].append(dict_for_each_row)\n else:\n flat_dict[user_id] = [dict_for_each_row]\n\n print(\"Output is Saved\")", "def init_csv_file(csv_path):\n with open(csv_path, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n header = ['file_name', 'chart_in_file',\n 'year', 'month', 'row_no', 'bird_species']\n header += list(range(1, 32))\n writer.writerow(header)", "def check_for_csv(inp_dict):\n if inp_dict[\".csv\"]:\n for path in inp_dict[\".csv\"]:\n csv_path = convert_csv(path)\n inp_dict[\".xls\"].append(csv_path)\n inp_dict[\"del\"].append(csv_path)\n inp_dict[\"out\"].append(csv_path)\n inp_dict[\".csv\"] = []\n return inp_dict", "def obs_csv2json(input_file,output_file,example_path,instrument):\r\n\r\n obs_path = Path(cfg.obs_path)\r\n \r\n with open(example_path,'r') as e:\r\n example = js.load(e)\r\n \r\n #deleting unused categories\r\n del(example['sep_forecast_submission']['forecasts'])\r\n del(example['sep_forecast_submission']['triggers'][2])\r\n del(example['sep_forecast_submission']['triggers'][1])\r\n del(example['sep_forecast_submission']['triggers'][0])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['instrument'])\r\n del(example['sep_forecast_submission']['triggers'][0]['particle_intensity']['last_data_time'])\r\n del(example['sep_forecast_submission']['contacts'])\r\n del(example['sep_forecast_submission']['model'])\r\n del(example['sep_forecast_submission']['issue_time'])\r\n \r\n example['sep_forecast_submission']['mode'] = 'observation'\r\n\r\n #json template for observations\r\n obs_json = example\r\n\r\n fieldnames = ('energy_threshold','flux_threshold','start_time','intensity',\r\n 'peak_time','rise_time','end_time','duration','fluence>10',\r\n 'fluence>100')\r\n\r\n #extracting data from csv file\r\n with open(input_file,'r') as f:\r\n reader = csv.DictReader(f, fieldnames)\r\n out = js.dumps( [ row for row in reader ] )\r\n\r\n obs_data = js.loads(out)\r\n\r\n data={}\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['observatory']) = instrument\r\n\r\n #creating data for all energy levels forecast\r\n for j in range(1,len(obs_data)):\r\n data[j-1]=obs_data[j]\r\n\r\n #recording start and end times for all events\r\n for i in range(len(data)):\r\n data[i]['start_time'] = datetime.strptime(data[i]['start_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['start_time'] = data[i]['start_time'].isoformat()\r\n data[i]['end_time'] = datetime.strptime(data[i]['end_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['end_time'] = data[i]['end_time'].isoformat()\r\n data[i]['peak_time'] = datetime.strptime(data[i]['peak_time'],'%Y-%m-%d %H:%M:%S')\r\n data[i]['peak_time'] = data[i]['peak_time'].isoformat()\r\n \r\n #recording observed values for all events\r\n if i > 0:\r\n (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events']).append({})\r\n\r\n event = (obs_json['sep_forecast_submission']['triggers'][0]['particle_intensity']\r\n ['ongoing_events'][i])\r\n \r\n #start and end times\r\n event['start_time']=data[i]['start_time']\r\n event['threshold'] = data[i]['flux_threshold']\r\n event['energy_min'] = float(data[i]['energy_threshold'][1:])\r\n event['energy_max'] = -1\r\n event['end_time']=data[i]['end_time']\r\n\r\n #peak values\r\n event['peak_intensity']=data[i]['intensity']\r\n event['peak_time'] = data[i]['peak_time']\r\n event['intensity_units']='pfu'\r\n \r\n #fluence values\r\n event['fluence'] = [{'energy_min' : '10','fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'},\r\n {'energy_min' : '100', 'fluence_value' : 'fluence_value',\r\n 'units' : 'MeV [cm^-2]'}]\r\n event['fluence'][0]['fluence']=data[i]['fluence>10']\r\n event['fluence'][1]['fluence']=data[i]['fluence>100']\r\n\r\n\r\n if float(event['peak_intensity']) >= cfg.pfu_threshold[cfg.energy_threshold.index\r\n (int(event['energy_min']))]:\r\n event['all_clear_boolean'] = 'false'\r\n\r\n else:\r\n event['all_clear_boolean'] = 'true'\r\n\r\n\r\n #building json file\r\n with open(obs_path / output_file, 'w') as s:\r\n js.dump(obs_json,s,indent=1)\r\n print('json file %s created' %output_file)\r\n \r\n return", "def test_first_time_reading_csv_file(self):\n\n # Create a temporary directory for test files\n temp_dir = \"test_files/observed\"\n os.makedirs(temp_dir, exist_ok=True)\n\n # Create a test CSV file\n csv_filepath = os.path.join(temp_dir, \"Abadia-BA_-11.56_-37.52.csv\")\n with open(csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir, \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(csv_filepath, temp_dir)\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n }\n }\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(csv_filepath)\n os.remove(expected_output_filepath)\n os.rmdir(temp_dir)", "def csv_to_json(csv_file_path: str, json_file_path: str):\n fieldnames = ('last_name', 'first_name', 'second_name')\n\n # read csv file\n try:\n with open(Path(csv_file_path)) as csv_file:\n csv_reader = csv.DictReader(csv_file, fieldnames)\n csv_data = {num: row for num, row in enumerate(csv_reader, start=1)}\n except FileNotFoundError as err:\n raise CustomException() from err\n\n # generate json\n try:\n with open(Path(json_file_path), 'w') as json_file:\n json.dump(csv_data, json_file, indent=2)\n except OSError as err:\n raise CustomException() from err", "def formatJSON(csvpath, jsonfilepath):\n\n data = {}\n my_list = []\n with open(path) as file:\n csvReader = csv.DictReader(file)\n for csvRow in csvReader:\n\n data = csvRow\n my_list.append(data)\n\n \"\"\"\n\n Write retrieved data into a json file\n NOTE: json file is automatically created when code is run from terminal\n and updates each time it run again.\n \"\"\"\n\n\n with open(jsonfilepath,\"w\") as jsonfile:\n\n jsonfile.write(json.dumps(my_list,indent=4))", "def test_csv_no_callback(self):\n csvfile = testdata.create_csv({\n \"foo\": testdata.get_int(),\n \"bar\": testdata.get_words(),\n })\n self.assertEqual(1, len(csvfile))", "def csv_write (data):\n \n csv_data=data[0:]\n csv1_data = open('backup.csv', 'a')\n csvwriter = csv.writer(csv1_data)\n\n count = 0\n\n for i in csv_data:\n if count == 0:\n header = i.keys()\n csvwriter.writerow(header)\n count += 1\n csvwriter.writerow(i.values())\n\n csv1_data.close()\n\n #http://blog.appliedinformaticsinc.com/how-to-parse-and-convert-json-to-csv-using-python/", "def main(input_filepath, output_filepath, data_type):\n a = jsonCSV(input_filepath, os.path.join(output_filepath, data_type+'.csv'))\n column_names = a.get_superset_column_names()\n a.read_write(column_names)\n\n logger = logging.getLogger(__name__)\n logger.info('transform log files into csv')", "def create_json_example(row, header_csv, jstruct, delimiter, keep, dic_types):\n\n for key in header_csv:\n key_struct = key.split(delimiter)\n if key in dic_types.keys():\n # if no value indicated set to default\n if row[key] == '' and 'default' in dic_types[key].keys():\n row[key] = dic_types[key]['default']\n else:\n try:\n # Cast to indicated type\n row[key] = dic_types[key]['type'](row[key]) \n except:\n print(\" [WARN] Can not parse \", row[key] , \"to type\", dic_types[key]['type'])\n jstruct.update(update_jstruct(jstruct, key_struct, row[key], keep))\n \n return jstruct", "def write_to_csv(path,data_dict):\n\n\n schema = [\"file_name\",\"family\",\"genus\",\"genus_confidence\",\n \"species_1\",\"confidence_1\",\"hall_1\",\n \"species_2\",\"confidence_2\",\"hall_2\",\n \"species_3\",\"confidence_3\",\"hall_3\",\n \"species_4\",\"confidence_4\",\"hall_4\",\"peaks\"]\n\n # if no file exists create a one and inform the user\n if not os.path.exists(path):\n print(\"creating new output file {}\".format(path))\n with open(path, \"w\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(schema)\n\n row = []\n\n row.append(data_dict[\"file_name\"])\n row.append(data_dict[\"family\"])\n \n row.append(data_dict[\"genus_1\"])\n row.append(data_dict[\"genus_confidence_1\"][:5])\n \n row.append(data_dict[\"species_1\"])\n row.append(data_dict[\"confidence_1\"][:5])\n row.append(data_dict[\"hall_1\"])\n \n row.append(data_dict[\"species_2\"])\n row.append(data_dict[\"confidence_2\"][:5])\n row.append(data_dict[\"hall_2\"])\n\n row.append(data_dict[\"species_3\"])\n row.append(data_dict[\"confidence_3\"][:5])\n row.append(data_dict[\"hall_3\"])\n\n row.append(data_dict[\"species_4\"])\n row.append(data_dict[\"confidence_4\"][:5])\n row.append(data_dict[\"hall_4\"])\n\n row.append(data_dict[\"peaks\"])\n \n with open(path, \"a\") as csv_file:\n filewriter = csv.writer(csv_file, delimiter=\",\")\n filewriter.writerow(row)", "def test_when_file_already_exist(self):\n\n # Create a temporary directory for test files\n temp_dir = [\"test_files/observed\", \"test_files/forecast\", \"test_files/output\"]\n for dir in temp_dir:\n os.makedirs(dir, exist_ok=True)\n\n # Create the 1st csv file\n first_csv_filepath = os.path.join(temp_dir[0], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(first_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n \n # Creating the 2nd csv file in different directory\n second_csv_filepath = os.path.join(temp_dir[1], \"Abadia-BA_-11.56_-37.52.csv\")\n with open(second_csv_filepath, \"w\", newline=\"\") as csv_file:\n writer = csv.writer(csv_file, delimiter=\";\")\n writer.writerow([\"periods\", \"precipitation\", \"temperature\", \"max_temperature\"])\n writer.writerow([\"2023-01-01\", \"5\", \"25\", \"30\"])\n writer.writerow([\"2023-01-02\", \"10\", \"23\", \"28\"])\n\n # Define the expected output JSON file path\n expected_output_filepath = os.path.join(temp_dir[2], \"BA_Abadia.json\")\n\n # Call the function under test\n extractor.csv_to_json(first_csv_filepath, temp_dir[2])\n extractor.csv_to_json(second_csv_filepath, temp_dir[2])\n\n # Verify that the output JSON file exists\n assert os.path.exists(expected_output_filepath)\n\n # Load the output JSON file\n with open(expected_output_filepath, \"r\") as json_file:\n json_data = json.load(json_file)\n\n # Verify the contents of the JSON file\n expected_data = {\n \"city\": \"Abadia\",\n \"state\": \"BA\",\n \"coordinates\": [\"-11.56\", \"-37.52\"],\n \"observed\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n \"forecast\": {\n \"periods\": [\"2023-01-01\", \"2023-01-02\"],\n \"precipitation\": [\"5\", \"10\"],\n \"temperature\": [\"25\", \"23\"],\n \"max_temperature\": [\"30\", \"28\"]\n },\n }\n\n # Assertion\n assert json_data == expected_data\n\n # Clean up the temporary directory and files\n os.remove(first_csv_filepath)\n os.remove(second_csv_filepath)\n os.remove(expected_output_filepath)\n for dir in temp_dir:\n os.rmdir(dir)", "def clean_file(csv_file):\n my_list = []\n with open(csv_file, newline='') as csvfile:\n file_reader = csv.reader(csvfile, delimiter=',', quotechar=\" \")\n for row in file_reader:\n my_list.append(row)\n\n \"\"\"\n > Part Two\n Input: Nested list csv_table and a string file_name\n Action: Write fields in csv_table into a comma-separated CSV file with the name file_name\n Mutates output: Yes\n \"\"\"\n with open(csv_file, 'w', newline='') as csvfile:\n my_csv_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_NONE)\n for row in my_list:\n row2 = []\n for item in row:\n a = item.lstrip('\"')\n b = a.rstrip('\"')\n row2.append(b)\n my_csv_writer.writerow(row2)", "def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename", "def prepare_out_csv(output_dir, filename):\n out_columns_pi = ['fasta_file', 'acc.code',\n 'organism', 'EC.code', 'species',\n 'note', 'pi', 'modification', 'category']\n string = ''\n for i in out_columns_pi:\n if i == out_columns_pi[-1]:\n string += i\n else:\n string += i+','\n string += '\\n'\n with open(output_dir+filename, 'w') as f:\n f.write(string)", "def _setup_output_file(self):\n\n columns = [\"Hero file\",\n \"Test type\",\n \"Name of tested entry\",\n \"Misc dice sum input\",\n \"Value of tested entry\",\n \"Modifier\",\n \"Values of related attributes\",\n \"Rolls\",\n \"Result\",\n \"Description\",\n \"Timestamp\",\n \"Type of dice input\"]\n\n # if file does not exist, add first row of column names\n if not os.path.isfile(self._result_csv):\n with open(self._result_csv, \"w\", encoding=\"utf-8\") as csv_file:\n file_writer = csv.writer(csv_file, delimiter=',',\n quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n file_writer.writerow(columns)\n return True\n return False", "def test_write_csv_file(self, tmpdir):\n filename = tmpdir.join(\"output.csv\").strpath\n\n csv_formatter = CSVFormatter(fmt_str=\"${aaa},${bbb},${ccc}\", header=\"# Custom header line\")\n csv_formatter.to_csv(self.records, path_or_buf=filename)\n\n csv = open(filename).read()\n csv_expected = textwrap.dedent(\"\"\"\\\n # Custom header line\n foobar_01,8,4898FE19\n foobar_02,160,5825D187\n foobar_03,99,3648A436\n \"\"\")\n\n assert csv == csv_expected", "def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]", "def test_37_bulk_csv_import_no_column_names(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,Baz\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n task = db.session.query(Task).first()\r\n assert {u'Bar': u'2', u'Foo': u'1', u'Baz': u'3'} == task.info\r\n assert \"1 Task imported successfully!\" in res.data", "def read_csv(self, csv_input):\n # https://stackoverflow.com/a/45063514\n dtypes = {\n 'lat': 'U',\n 'long': 'U'\n }\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''], dtype=dtypes)\n\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)\n #print('Data read from CSV %s' % csv_input)", "def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }", "def write_csv(invocations, job_information, out_file, null_string =\"NA\"):\n\n\t# assume every invocation of a task of a certain type takes the same number of input files\n\tnum_input_files = len(job_information[invocations[0]]['input_files'])\n\t#file_attributes = [\"input_file_%s_kb\"%i for i in range(1, num_input_files + 1)]\n\tfile_attributes = [\"host_name\", \"input_file_sum_kb\"]\n\tusage_attributes = ['utime', 'stime', 'maxrss', 'nvcsw', 'nivcsw', 'nswap', 'minflt', ] # 'majflt', 'inblock', 'outblock', 'nsignals', 'msgsnd', 'msgrcv', 'nswap'\n\tload_attributes = [\"min1\", \"min5\", \"min15\"]\n\tprocs_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\", \"vmsize\", \"rss\"]\n\ttask_attributes = [\"total\", \"running\", \"sleeping\", \"waiting\",]\n\tram_attributes = [\"total\", \"free\", \"shared\", \"buffer\",]\n\tswap_attributes = [\"total\", \"free\",]\n\tmachine_attributes_headers = load_attributes + list(map(lambda a: \"procs_\"+a, procs_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"task_\"+a, task_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"ram_\"+a, ram_attributes)) \\\n\t \t\t\t\t\t\t\t\t\t\t\t + list(map(lambda a: \"swap_\"+a, swap_attributes))\n\n\t# the csv column labels\n\theader = ['run_goup', 'run', 'transformation', 'mainjob_started', \"duration\"] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n#\theader = ['workflow','transformation', 'mainjob_started'] + file_attributes + usage_attributes + machine_attributes_headers + ['out_size_kb', 'total_time_s', 'peak_memory_kb']\n\n\twith open(out_file, 'w', newline='') as csvfile:\n\n\t\tspamwriter = csv.writer(csvfile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\t\tspamwriter.writerow(header)\n\n\t\tfor job_info in [job_information[job_id] for job_id in invocations]:\n\n\t\t\tfile_sizes = [float(file['size']) for file in job_info['input_files']]\n\t\t\tusage_values = [float(job_info['usage'][attr]) for attr in usage_attributes]\n#\n\t\t\ttry:\n\t\t\t\tout_size = sum([float(file['size']) for file in job_info['output_files']])\n\t\t\texcept KeyError as k:\n\t\t\t\tout_size = null_string\n#\n\t\t\tpeak_mem = float(job_info['usage']['maxrss'])\n\t\t\tmachine_values = []\n\n\t\t\tfor machine_attrs, attrs in [(\"load\", load_attributes), (\"procs\", procs_attributes), (\"task\", task_attributes), (\"ram\", ram_attributes), (\"swap\", swap_attributes)]:\n\t\t\t\tfor attr in attrs:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tmachine_values.append(job_info[machine_attrs][attr])\n\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\tmachine_values.append(null_string)\n\n#\t\t\tdata = [job_info[\"workflow\"], job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tdata = [job_information[\"run_group\"], job_information[\"run\"], job_info[\"transformation\"], job_info['mainjob_started_ts'], job_info[\"mainjob_duration\"]] + [job_info['host_name']] + [sum(file_sizes)] + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n#\t\t\tdata = [job_info[\"transformation\"], job_info['mainjob_started_ts']] + file_sizes + usage_values + machine_values + [out_size, job_info['total_time'], peak_mem]\n\t\t\tspamwriter.writerow(data)", "def generate_csv(type, json_list, columns_list):\n with open(\"data/\" + type + \"_\" + time.strftime(\"%Y-%m-%d_%H:%M:%S\") +\n \".csv\", 'a+') as f:\n csv_file = csv.DictWriter(f, fieldnames=columns_list,\n extrasaction=\"ignore\")\n csv_file.writeheader()\n for item in json_list:\n csv_file.writerow(item)\n print(\"\\nCSV file saved as data/\" + type + \"_\" +\n time.strftime(\"%Y-%m-%d_%H:%M:%S\") + \".csv\")", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n df = create_data_frame(input_filepath)\n process_columns(df)\n logger.info(df.head())\n df.to_csv(output_filepath, index=False)", "def test_empty_file(self):\n\t\tmain.Main(['input/empty.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/empty.csv'))", "def __init__(self, in_csvfile, out_csvfile, col_name, cell_filler):\r\n self.in_csvfile = in_csvfile\r\n self.out_csvfile = out_csvfile\r\n self.col_name = col_name\r\n self.cell_filler = cell_filler", "def loadCSV(input_file):", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))", "def test_csv(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(answer_file_path), 'r') as answer_file:\n csv_file = open(attach_path(input_file_path))\n assert str(read_csv(csv_file)) == answer_file.read().strip()", "def test_create_csv(self):\n\n # absolute path to xml file to parse\n xml_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.xml\")\n\n # absolute path to the csv file to create\n csv_file = os.path.join(self.csvfile, \"DLTINS_20210117_01of01.csv\")\n\n # Test for correct data\n self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)\n\n # Test for incorrect input xml file\n self.assertEqual(create_csv(\"somerandomfile\", self.csvfile), None)\n\n # Test for incorrect path to write csv to\n self.assertEqual(create_csv(xml_file, r\"D:\\kqcA CK j \"), None)", "def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')", "def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None", "def create_manifest_file(csv_file, manifest_file, s3_path):\n logger.info(\"Processing CSV file %s.\", csv_file)\n\n image_count = 0\n anomalous_count = 0\n\n with open(csv_file, newline='', encoding=\"UTF-8\") as csvfile,\\\n open(manifest_file, \"w\", encoding=\"UTF-8\") as output_file:\n\n image_classifications = csv.reader(\n csvfile, delimiter=',', quotechar='|')\n\n # Process each row (image) in the CSV file.\n for row in image_classifications:\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n source_ref = str(s3_path) + row[0]\n classification = 0\n\n if row[1].lower() == 'anomaly':\n classification = 1\n anomalous_count += 1\n\n # Create the JSON line.\n json_line = {}\n json_line['source-ref'] = source_ref\n json_line['anomaly-label'] = str(classification)\n\n metadata = {}\n metadata['confidence'] = 1\n metadata['job-name'] = \"labeling-job/anomaly-classification\"\n metadata['class-name'] = row[1]\n metadata['human-annotated'] = \"yes\"\n metadata['creation-date'] = datetime.now(timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')\n metadata['type'] = \"groundtruth/image-classification\"\n\n json_line['anomaly-label-metadata'] = metadata\n\n output_file.write(json.dumps(json_line))\n output_file.write('\\n')\n image_count += 1\n\n logger.info(\"Finished creating manifest file %s.\\n\"\n \"Images: %s\\nAnomalous: %s\",\n manifest_file,\n image_count,\n anomalous_count)\n return image_count, anomalous_count", "def pre_process_books(csv, outputname):\n df = pd.read_csv(csv, error_bad_lines=False, encoding = \"ISO-8859-1\")\n df.to_csv(outputname, index=False)", "def get_concatenated_csv_data(concatenated_filepath, concatenated_filename, device_id, output_create_files_filepath, output_create_files_filename):\n\n # Create the full file name of the concatenated filename.\n concatenated_file = concatenated_filepath + \"/\" + concatenated_filename + \"_concatenated.csv\"\n print(\"Looking for concatenated file name: \", concatenated_file)\n\n # Test if the concatenated file exists and if it does, return it.\n if os.path.isfile(concatenated_file):\n print(\"Concatenated file exists: \", concatenated_file)\n return concatenated_file\n\n # If it does not exist, test if the individual files exist.\n elif not os.path.isfile(concatenated_file):\n print(\"Concatenated file does not exist. Create file: \", concatenated_file)\n file_list = get_data_from_files(concatenated_filepath, concatenated_filename)\n # print(\"File list:\", file_list)\n\n # If the individual files exist, create the concatenated file.\n if len(file_list) > 0:\n print(\"Individual csv files exist. Creating the concatenated file.\")\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file\n\n # If the individual files do not exist, get the data from the database, create the files then concatenate them.\n else:\n database_query = \"select * from ship_data_gpggagpsfix where device_id=\" + int(\n device_id) + \" order by date_time;\"\n # print(database_query)\n password = input()\n\n db_connection = MySQLdb.connect(host='localhost', user='ace', passwd=password, db='ace2016', port=3306);\n\n track_df = get_data_from_database(database_query, db_connection)\n track_df = string_to_datetime(track_df)\n\n # Output the data into daily files (as they do not already exist).\n output_daily_files(track_df, output_create_files_filepath, output_create_files_filename)\n\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file", "def _parse_csv(\n path: Path,\n *,\n settings: Settings = DEFAULT_SETTINGS,\n encoding: Optional[str],\n delimiter: Optional[str],\n has_header: bool,\n autoconvert_text_to_numbers: bool,\n) -> ParseCsvResult:\n warnings = []\n\n with contextlib.ExitStack() as ctx:\n n_bytes = path.stat().st_size\n if n_bytes > settings.MAX_CSV_BYTES:\n # We can't simply os.truncate() the input file, because sandboxed code\n # can't modify input files.\n truncated_path = ctx.enter_context(tempfile_context(prefix=\"truncated-\"))\n with path.open(\"rb\") as src, truncated_path.open(\"wb\") as dest:\n os.sendfile(dest.fileno(), src.fileno(), 0, settings.MAX_CSV_BYTES)\n path = truncated_path\n warnings.append(\n _trans_cjwparse(\n \"csv.truncated_file\",\n \"{n_bytes_truncated, one{Truncated # byte} other{Truncated # bytes}} from file (maximum is {max_n_bytes} bytes)\",\n dict(\n n_bytes_truncated=(n_bytes - settings.MAX_CSV_BYTES),\n max_n_bytes=settings.MAX_CSV_BYTES,\n ),\n )\n )\n\n utf8_path = ctx.enter_context(tempfile_context(prefix=\"utf8-\", suffix=\".txt\"))\n # raises LookupError, UnicodeError\n warnings.extend(\n transcode_to_utf8_and_warn(path, utf8_path, encoding, settings=settings)\n )\n\n # Sniff delimiter\n if not delimiter:\n delimiter = detect_delimiter(utf8_path, settings)\n\n with tempfile_context(suffix=\".arrow\") as arrow_path:\n # raise subprocess.CalledProcessError on error ... but there is no\n # error csv-to-arrow will throw that we can recover from.\n child = subprocess.run(\n [\n \"/usr/bin/csv-to-arrow\",\n \"--delimiter\",\n delimiter,\n \"--max-rows\",\n str(settings.MAX_ROWS_PER_TABLE),\n \"--max-columns\",\n str(settings.MAX_COLUMNS_PER_TABLE),\n \"--max-bytes-per-value\",\n str(settings.MAX_BYTES_PER_VALUE),\n utf8_path.as_posix(),\n arrow_path.as_posix(),\n ],\n capture_output=True,\n check=True,\n )\n warnings.extend(_parse_csv_to_arrow_warnings(child.stdout.decode(\"utf-8\")))\n\n reader = pyarrow.ipc.open_file(arrow_path.as_posix())\n raw_table = reader.read_all() # efficient -- RAM is mmapped\n\n table, more_warnings = _postprocess_table(\n raw_table, has_header, autoconvert_text_to_numbers, settings\n )\n return ParseCsvResult(table, warnings + more_warnings)", "def test_csvfile_unordered(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\"\n1\n2\n1\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }", "def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])", "def write_to_csv(list_of_rows, file_name):\n with open(file_name, 'w') as f:\n writer = csv.writer(f)\n for row in list_of_rows:\n if None in row:\n continue\n writer.writerow(row)\n \n f.close()", "def initCSV(self, makeFile, overWrite):\n self.initialized = True\n\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n if os.path.exists(str(self.fileName)):\n\n f = open(str(self.fileName), \"r\")\n\n if not f.read():\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n else:\n if overWrite == True:\n f.close()\n\n f = open(str(self.fileName), \"w\")\n outString = \"\"\n for varName in self.variableDescriptions:\n outString += varName\n outString += \",\"\n\n f.write(outString[0:-1])\n\n f.write('\\n')\n if overWrite == False:\n raise OSError(\"csv file is not empty!\")\n\n else:\n if makeFile == True:\n f = open(str(self.fileName), \"w\")\n \n f.close()\n else:\n raise OSError(\"csv file not found!\")", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def import_csv(item):\n (f_csv, f_csv_out, target_column, merge_columns) = item\n has_checked_keys = False\n\n if not merge_columns:\n raise ValueError(\"merge_columns must not be empty\")\n\n with open(f_csv_out, \"w\") as FOUT:\n CSV_HANDLE = None\n total_rows = 0\n\n for row in csv_iterator(f_csv):\n\n output = {\"_ref\": next(_ref_counter)}\n\n if not has_checked_keys:\n for key in merge_columns:\n if key not in row.keys():\n msg = \"Column **{}** not in csv file {}\"\n raise KeyError(msg.format(key, f_csv))\n has_checked_keys = True\n\n if target_column in row.keys():\n msg = \"Generated column **{}** already in csv file {}\"\n raise KeyError(msg.format(target_column, f_csv))\n\n text = []\n for key in merge_columns:\n val = row[key].strip()\n if not val:\n continue\n if val[-1] not in \".?!,\":\n val += \".\"\n text.append(val)\n\n output[target_column] = \"\\n\".join(text).strip()\n\n if CSV_HANDLE is None:\n CSV_HANDLE = csv.DictWriter(FOUT, sorted(output.keys()))\n CSV_HANDLE.writeheader()\n\n CSV_HANDLE.writerow(output)\n total_rows += 1\n\n logger.info(\"Imported {}, {} entries\".format(f_csv, total_rows))", "def test_noInputSpecified(self,\n filename=None,\n input_folder='../../input/raw-data/'):\n csv_file = retrive_csv_file(filename, input_folder)\n expected_output = \"../../input/raw-data/page-views.csv\"\n self.assertEqual(csv_file, expected_output)", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def test_to_csv_with_valid_rows(self, mock_open):\n row_handling.to_csv(rows=self.rows, csv_path=self.csv_path)\n open.assert_called_with(self.csv_path, 'w')", "def create_metadata_shell_for_csv(csv_file_path: str) -> str:\n metadata_file = f\"{csv_file_path}-metadata.json\"\n if path.exists(metadata_file):\n raise Exception(f\"Metadata file {metadata_file} already exists.\")\n if not path.exists(csv_file_path):\n raise Exception(f\"CSV file {csv_file_path} does not exist.\")\n\n label = map_file_path_to_label(csv_file_path)\n concept_scheme_uri = generate_concept_scheme_root_uri(label)\n\n # Just inserting basic structure at this point as already exists in standard files. Additional metadata will be\n # added as the script continues to run.\n metadata = {\n \"@context\": \"http://www.w3.org/ns/csvw\",\n \"@id\": concept_scheme_uri,\n \"url\": csv_file_path,\n \"rdfs:label\": label,\n \"dc:title\": label,\n \"tableSchema\": {\n \"columns\": [],\n },\n \"prov:hadDerivation\": {\n \"@id\": concept_scheme_uri,\n \"@type\": [\n \"skos:ConceptScheme\",\n f\"{pmdcat_base_uri}DatasetContents\"\n ]\n }\n }\n\n table_schema: Dict = metadata[\"tableSchema\"]\n columns: List[Dict] = table_schema[\"columns\"]\n\n with open(csv_file_path, newline=\"\") as csv_file:\n reader = csv.reader(csv_file, delimiter=\",\", quotechar=\"\\\"\")\n column_names: List[str] = next(reader)\n\n for column_name in column_names:\n column = generate_schema_for_column(column_name, concept_scheme_uri)\n columns.append(column)\n\n columns.append({\n \"virtual\": True,\n \"propertyUrl\": \"rdf:type\",\n \"valueUrl\": \"skos:Concept\"\n })\n columns.append({\n \"virtual\": True,\n \"propertyUrl\": \"skos:inScheme\",\n \"valueUrl\": concept_scheme_uri\n })\n\n if \"notation\" in [c.lower() for c in column_names]:\n override(table_schema, {\n \"primaryKey\": \"notation\",\n \"aboutUrl\": concept_scheme_uri + \"/{notation}\"\n })\n else:\n print(\"WARNING: could not determine primary key. As a result, `aboutUrl` property is not specified and \" +\n \"so each row will not have a true URI. This is basically required. Manual configuration required.\")\n\n with open(metadata_file, 'w+') as file:\n file.write(json.dumps(metadata, indent=4))\n\n return str(metadata_file)", "def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def file_setup(outfile):\n\n extant_objids = []\n\n if os.path.exists(outfile):\n print('This file exists.')\n try:\n extant_objids = np.array(pd.read_csv(outfile)['objid']).tolist()\n except:\n print('And nonstandard!')\n # Raise an exception?\n return False\n else:\n # Initialize the file with a header\n with open(outfile, 'wb') as csvfile:\n cols = ['objid', 'flat_counts', 'mcat_bg', 'bg_counts',\n 'flux_bgsub_err', 'cps_mcatbgsub', 'counts',\n 'mag_mcatbgsub', 'cps_err', 'mag_bgsub', 'cps_bgsub',\n 'detys', 'flux_bgsub', 'flux_err', 'mag_err_1',\n 'cps_bgsub_err', 't1_data', 'bg', 'responses', 't_mean',\n 'cps_mcatbgsub_err', 'mag_bgsub_err_1', 'mag_err_2',\n 't0_data', 'racent', 'deccent', 'mag', 'exptime',\n 'bg_flat_counts', 'detxs', 't0', 't1',\n 'mag_mcatbgsub_err_2', 'flux', 'mag_mcatbgsub_err_1',\n 'flags', 'mag_bgsub_err_2', 'detrad', 'cps',\n 'flux_mcatbgsub_err', 'flux_mcatbgsub', 'mcat_expt', 'ra',\n 'dec', 'aper4', 'aper4_err', 'mcat_bg',\n 'aper7', 'aper7_err']\n\n spreadsheet = csv.writer(csvfile, delimiter=',', quotechar='|',\n quoting=csv.QUOTE_MINIMAL)\n spreadsheet.writerow(cols)\n\n return extant_objids", "def write_csv(row_list,out_name,*header_strings : str):\n with open(out_name,'w',newline='') as result_file:\n wr = csv.writer(result_file, delimiter='\\t')\n if header_strings:\n wr.writerow([name for name in header_strings])\n if type(row_list[0]) is list:\n wr.writerows(row_list)\n else:\n for row in row_list:\n wr.writerow([row])", "def get_args():\n\n parser = argparse.ArgumentParser(\n \"Create json file from csv by infering json'structure using a delimiter inside csv's columns.\"\n )\n parser.add_argument(\"--csv\", type=str, help='Set path to csv file as input')\n parser.add_argument(\"--json\", type=str, help='Set path to json file as output')\n parser.add_argument(\"--delimiter\", type=str, default='_', help='Set delimiter used to infer json\\'s structure (default=\\'_\\')')\n parser.add_argument(\"--config\", type=str, default=None, help='Set path to json file containing data type information and or default value(default=\\'None\\', optional and precise column type)') \n parser.add_argument(\"--cols_delimiter\", type=str, default=',', help='Set delimiter of the csv (default=\\',\\')')\n parser.add_argument(\"--max_docs\", type=int, default=-1, help='Set max number of documents in a json file, several will be created if necessary (default=\\'-1\\' means single output file)') \n parser.add_argument(\"--per_line\", action='store_true', default=False, help='Dump a file containing one json per line. Careful the output is not a correct json (default=\\'False\\')')\n parser.add_argument(\"--infer_types\", action='store_true', default=False, help='Infer data type based on its value: float, list and date are supported. Carefull, \\'config\\' will override it if specified. (default=\\'False\\')') \n parser.add_argument(\"--keep\", action='store_true', default=False, help='Keep fields with empty values replaced by null instead of ignoring them (default=\\'True\\')') \n args = parser.parse_args()\n return args", "def test_delimiter_none(self):\n with self.assertRaisesRegexp(Exception, \"delimiter\"):\n self.context.frame.import_csv(self.dataset,\n self.schema, delimiter=None)", "def csv(filepath, header=True, mode=DataSaver.MODE_OVERWRITE, sep=',',\n na_rep='', float_format=None, columns=None, encoding=None,\n quoting=None, quotechar='\"', date_format=None, doublequote=True,\n escapechar=None, decimal='.'):\n\n format_file = DataSaver.FORMAT_CSV\n kwargs = locals()\n _apply_datasaver(format_file, kwargs, last_uuid)\n return None", "def test_convert_csv_to_kml_missing_coordinate_fields(self):\n import tempfile\n from pykml.util import convert_csv_to_kml\n\n # create a CSV file for testing\n csvfile = tempfile.TemporaryFile(mode='w+')\n csvfile.write('name,snippet,y,x\\n')\n csvfile.write('first,The first one,45.0,-90.0\\n')\n csvfile.write('second,The second one,46.0,-89.0\\n')\n csvfile.seek(0)\n\n try:\n convert_csv_to_kml(csvfile)\n except KeyError:\n self.assertTrue(True)\n except:\n raise\n finally:\n csvfile.close()", "def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)", "def test_36_bulk_csv_import_dup_header(self, Mock, mock):\r\n empty_file = FakeRequest('Foo,Bar,Foo\\n1,2,3', 200,\r\n {'content-type': 'text/plain'})\r\n Mock.return_value = empty_file\r\n self.register()\r\n self.new_application()\r\n app = db.session.query(App).first()\r\n url = '/app/%s/tasks/import?template=csv' % (app.short_name)\r\n res = self.app.post(url, data={'csv_url': 'http://myfakecsvurl.com',\r\n 'formtype': 'csv'},\r\n follow_redirects=True)\r\n msg = \"The file you uploaded has two headers with the same name\"\r\n assert msg in res.data", "def _json_export(self, exppath):\n # TODO: Settle on JSON format for colortable\n pass", "def test_csvfile_empty(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=\"\")\n\n with pytest.raises(ProgrammingError) as excinfo:\n CSVFile(\"test.csv\")\n assert str(excinfo.value) == \"The file has no rows\"", "def test_delimiter_empty(self):\n with self.assertRaisesRegexp(Exception, \"delimiter\"):\n self.context.frame.import_csv(self.dataset,\n self.schema, delimiter=\"\")", "def _configure_csv_file(self, file_handle, schema):\n csv_writer = csv.writer(file_handle, delimiter=self.field_delimiter)\n csv_writer.writerow(schema)\n return csv_writer", "def place_types_read_csv(self, csv_input):\n csv_data = pd.read_csv(csv_input, encoding='UTF-8', sep=',', na_values=[''])\n self.table = csv_data.fillna('').applymap(lambda x: x.strip() if type(x) == str else x)\n self.log.info('Data read from CSV %s' % csv_input)", "def parse_isys_output(path_to_csv,directory_details):\n isys_results=open(path_to_csv).readlines()\n partial_paths_list=[]\n #below we are starting with the second row because the first row has the column\n # headings \n start=1\n for item in isys_results[start:]:\n partial_path=item.split(',')[0]\n partial_paths_list.append(partial_path)\n filing_details=[]\n for partial_path in partial_paths_list:\n temp_dict={}\n split_partial_path=partial_path.split('\\\\')\n temp_dict['cik']=split_partial_path[1]\n temp_dict['date_details']=split_partial_path[2]\n temp_dict['file_type']=split_partial_path[3].split('.')[-1]\n temp_dict['file_path']=directory_details+partial_path\n filing_details.append(temp_dict)\n return filing_details", "def apple_search_csv_writer(file: IO, data: str) -> callable:\n file_writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n file_writer.writerows(data)\n return None", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results", "def write_csv(self, _dict, filename):\n with open(filename, 'w') as f:\n f.write('\"'+'\";\"'.join(_dict.keys())+'\"\\n')\n for i in np.arange(len(_dict[list(_dict.keys())[0]])):\n values = []\n for col in _dict.keys():\n try:\n values.append(str(_dict[col][i]))\n except IndexError as e:\n # LSTM don't have first times available because of lacking history\n pass\n f.write(';'.join(values)+'\\n')\n\n logging.info('Wrote {}'.format(filename))\n self._upload_to_bucket(filename, filename)", "def write_csv_file (metadata_list, csv_file, append) :\n try :\n with open (csv_file, 'a' if append else 'w' , newline='') as file :\n writer = csv.DictWriter(file, fieldnames=MetadataEntity.get_fieldnames())\n if not append: writer.writeheader()\n for e in metadata_list :\n writer.writerow(e.get_values())\n file.close()\n except :\n print ('ERROR: writing csv file: ' + csv_file)\n return False\n return True", "def init_csv(input_path, config_file, quiet):\n\n if not config_file:\n config_file = getConfigPath(input_path)\n\n if not os.path.exists(config_file) or quiet:\n configHandler(config_file).resetConfig()\n click.secho('\\n{} didn\\'t exist and has been created'.format(\n config_file), fg='green')\n\n csv_file = getCsvPath(input_path)\n if not os.path.exists(csv_file) or quiet:\n confirm_overwrite = True\n else:\n confirm_overwrite = click.confirm(\n '{} already exists. Do you want to overwrite it?'.format(csv_file))\n\n if confirm_overwrite:\n if not os.path.exists(config_file):\n configHandler(config_file).resetConfig()\n csvHandler(csv_file).resetCSV(config_file=config_file)\n click.secho('{} created'.format(csv_file), fg='green')", "def writeCSV(csvPath, usedmpicommands, first_table_values,second_table_values,third_table_values, df):\n\n print(\"Saving CSV files in directory '\" + os.path.realpath(csvPath) +\"'\")\n\n #routine Summary by rank metrics table\n metric_csv_table = df.to_csv(sep=';')\n with open(os.path.join(csvPath,'routineSummaryByRank_metric_table.csv'), 'w') as outfileMetricTable:\n outfileMetricTable.write(metric_csv_table)\n outfileMetricTable.close()\n\n #routine Summary by rank data table (just the data from the instrumenation file in csv format)\n with open(os.path.join(csvPath,'routineSummaryByRank_summary.csv'), 'w') as outfileMPICommands:\n wr = csv.writer(outfileMPICommands, delimiter=';')\n wr.writerows(usedmpicommands)\n outfileMPICommands.close()\n\n #application Summary by rank data (first table)\n #Columns: \"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_1st_table.csv'), 'w') as outfile_first_table:\n wr = csv.writer(outfile_first_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc CPU Time\",\"User Portion\", \"User Portion in Percent\", \"System Portion\", \"System Portion in Percent\"])\n wr.writerows(first_table_values)\n outfile_first_table.close()\n \n #application Summary by rank data (second table) \n #Columns: \"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_2st_table.csv'), 'w') as outfile_second_table:\n wr = csv.writer(outfile_second_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc Wall Time\",\"User\" , \"User in Percent\",\"MPI\", \"MPI in Percent\"])\n wr.writerows(second_table_values)\n outfile_second_table.close()\n\n #application Summary by rank data (third table)\n #Columns: \"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"\n with open(os.path.join(csvPath,'applicationSummaryByRank_3rd_table.csv'), 'w') as outfile_third_table:\n wr = csv.writer(outfile_third_table, delimiter=';')\n wr.writerow([\"Rank\",\"Proc MPI Time\",\"Overhead\", \"Overhead in Percent\",\"Blocking\", \"Blocking in Percent\"])\n wr.writerows(third_table_values)\n outfile_third_table.close()\n\n #In case, you are wondering, where the last part of the instrumentation file is (message Summary by rank),\n #it is currently not saved as a csv file. This is because:\n #\n #1st: In the platform_mpi instrumentation file, the data is somehow visualized beautifully\n #2nd: It is very hard to save the data in a 2-dimensional csv file format\n #Therefore we decided, not to export this data in a csv file format", "def exportCSV(self, log, csvFile):\n return 0", "def write_csv(data, output_csv):\n with open(output_csv, 'w') as csvfile:\n fieldnames = ['minute_start',\n 'total_requests',\n 'success_count',\n 'error_count',\n 'mean_respone_time',\n 'data_sent_mb']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n writer.writeheader()\n for row in data:\n row['minute_start'] = row['minute_start'].isoformat()\n writer.writerow(row)", "def convert_reviews_json_file_to_first_last_csv(json_file_path, csv_file_path):\n write_dir = os.path.dirname(csv_file_path)\n if not os.path.isdir(write_dir):\n os.mkdir(write_dir)\n\n def get_last_review():\n rev_dict = {}\n with open(json_file_path) as f:\n for line in f:\n cur_rev = json.loads(line)\n biz_id = cur_rev['business_id']\n cur_rev_date = dt.datetime.strptime(cur_rev['date'], '%Y-%m-%d').date()\n if biz_id in rev_dict:\n if rev_dict[biz_id][0] > cur_rev_date:\n rev_dict[biz_id][0] = cur_rev_date\n elif rev_dict[biz_id][1] < cur_rev_date:\n rev_dict[biz_id][1] = cur_rev_date\n else:\n rev_dict[biz_id] = [cur_rev_date, cur_rev_date]\n return rev_dict\n\n with open(str(csv_file_path), 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n reviews = get_last_review()\n for biz_id, time_tuple in reviews.iteritems():\n writer.writerow([str(biz_id), str(time_tuple[0]), str(time_tuple[1])])", "def csv2json(csvfile, jsonfile=None):\n if not hasattr(csvfile, \"read\"):\n csvfile = open(csvfile, \"r\")\n if (jsonfile is not None) and (not hasattr(jsonfile, \"write\")):\n jsonfile = open(jsonfile, \"w\")\n csvdata = list(csv.reader(csvfile))\n fieldnames = csvdata[0]\n # use 'OrderedDict' to keep fields order\n jsondata = [ OrderedDict(zip(fieldnames, row)) for row in csvdata[1:] ]\n csvfile.close()\n if jsonfile is None:\n return jsondata\n else:\n # 'ensure_ascii=False' to support UTF-8\n json.dump(jsondata, jsonfile, ensure_ascii=False, indent=4)\n jsonfile.close()", "def test_simple_export(self):\n\n self.import_file(\"assessment_full_no_warnings.csv\")\n data = [{\n \"object_name\": \"Assessment\",\n \"filters\": {\n \"expression\": {}\n },\n \"fields\": \"all\",\n }]\n response = self.export_csv(data)\n self.assertIn(u\"\\u5555\", response.data.decode(\"utf8\"))", "def tokenizeFormCsv( input_file, columns,save_file_path):\n # 写文件\n output = open(save_file_path, \"w+\", encoding=\"utf-8\")\n\n\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.DictReader(f)\n i = 0\n for row in reader:\n content_line = ''\n i += 1\n for column in columns:\n content_line += cut(row[column])\n\n output.write(content_line + \"\\n\")\n print(\"line {} is finished : {}\".format(i, content_line))\n\n\n output.close()\n print(\"csv finished\")", "def MaterializeData(self, output_path):\n output_file_name = os.path.join(output_path, self.file_name)\n\n if self.verbose:\n print 'Writing file: %s' % output_file_name\n\n csv_output_file = open(output_file_name, 'wb')\n csv_writer = csv.writer(csv_output_file)\n\n for row in self.table_data:\n csv_writer.writerow(row)\n\n csv_output_file.close()", "def process_header_data(spark, input_dir, output):\n\theader = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__header_*__*.csv\") \\\n\t\t.select(*header_cols) \\\n\t\t.where(col('identifier').isNotNull())\n\n\tbill = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__billgen_*__*.csv\") \\\n\t\t.select(*bill_cols)\n\n\theader_full = header.join(bill, ['identifier'], how='left')\n\n\theader_full.repartition(1).write.mode('overwrite').format(\"csv\") \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.save(f\"{output}/header/\")", "def write_csv(df, output_fn=None):\n if not output_fn:\n output_fn = '{}.csv'.format(generate_unique_id('abc')[0:15])\n for column in df.columns:\n for idx in df[column].index:\n x = df.get_value(idx, column)\n try:\n x = unicode(x.encode('utf-8', 'ignore'),\n errors='ignore') if type(x) == unicode else unicode(str(x), errors='ignore')\n df.set_value(idx, column, x)\n except Exception as e:\n print ('encoding error: {0} {1}'.format(idx, column))\n print ('Err Msg: \\\"{}\\\".'.format(e))\n df.set_value(idx, column, '')\n continue\n try:\n df.to_csv(output_fn, index=False)\n return output_fn\n except Exception as e:\n print ('Ocurrio un fallo al intentar grabar el archivo {}'.format(output_fn))\n print ('Err Msg: \\\"{}\\\".'.format(e))", "def test_add_documents_csv_with_delimiter(empty_index, songs_csv_custom_separator):\n index = empty_index(\"csv-delimiter\")\n response = index.add_documents_csv(songs_csv_custom_separator, csv_delimiter=\";\")\n assert isinstance(response, TaskInfo)\n assert response.task_uid is not None\n task = index.wait_for_task(response.task_uid)\n assert task.status == \"succeeded\"\n assert task.details[\"receivedDocuments\"] == 20\n documents = index.get_documents().results\n assert documents[1].country == \"Europe\"\n assert documents[4].artist == \"Elton John\"", "def encode_to_raw_json(self, feature_collection, csv_f):\n clean_name = str(path.splitext(csv_f)[0]) + \".json\"\n with open(path.join(self.uk_postcodes, clean_name), \"wb\") as json_outfile:\n dump(feature_collection, json_outfile)", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def csv_to_json(file_path: Path) -> dict:\n\n output = {}\n\n with open(file_path, \"r\", ) as f:\n columns = get_columns_from_csv(file_path)\n reader = csv.DictReader(f, fieldnames=columns)\n next(reader, None)\n for row in reader:\n powerplan = row[\"DESCRIPTION\"]\n\n if powerplan not in output and powerplan:\n output[powerplan] = {\n k: v\n for k, v in row.items()\n if not k.startswith(\"PHASE\") and not k.startswith(\"DOT\")\n }\n\n output[powerplan][\"phases\"] = {}\n\n phase = row[\"PHASE_DESCRIPTION\"]\n\n if phase not in output[powerplan][\"phases\"] and phase:\n output[powerplan][\"phases\"][phase] = {\n k: v for k, v in row.items() if k.startswith(\"PHASE\")\n }\n\n output[powerplan][\"phases\"][phase][\"dots\"] = {}\n\n dot = row[\"DOT_DESCRIPTION\"]\n\n if phase:\n if dot not in output[powerplan][\"phases\"][phase][\"dots\"] and dot:\n output[powerplan][\"phases\"][phase][\"dots\"][dot] = {\n k: v for k, v in row.items() if k.startswith(\"DOT\")\n }\n return output", "def json_formatter(components):\n columns = cfg['columns']\n\n newList = [] # New list of only dictionaries with column attributes to marshall\n\n for component in components:\n newComp = {}\n\n for column in columns:\n try:\n newComp[column] = component[column]\n except:\n newComp[column] = cfg['emptyValue']\n\n newList.append(newComp)\n\n result = json.dumps(newList)\n\n # Save the json file\n save_path = args.output_file\n try:\n with open(save_path, \"w\") as file:\n file.write(result)\n\n Logger.Debug(\"Output saved to\", save_path)\n\n return save_path\n\n except:\n Logger.Error(\"Could not save output to\", save_path)", "def outputapidata_csv(filename, data, headers=None):\n with open(filename,'w',encoding='utf-8',newline = \"\", ) as f:\n if headers:\n writer = csv.DictWriter(f,fieldnames = headers)\n writer.writeheader()\n else:\n writer = csv.DictWriter(f)\n writer.writerows(out)", "def test_add_csv_data_00(self, mocker):\n fake_fields = self.fake.pylist(10, True, str)\n fake_data = []\n for _ in range(self.fake.random_digit()):\n fake_entry = {}\n for field in fake_fields:\n fake_entry[field] = self.fake.word()\n fake_data.append(fake_entry)\n\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), [])\n g.spreadsheet = Spreadsheet(None, None)\n g.worksheet = mocker.MagicMock()\n g.worksheet.append_row = mocker.MagicMock()\n g.add_csv_data(fake_fields, fake_data)\n\n assert not g.worksheet.append_row.call_count == len(fake_data)", "def create_csv(output_file, y, tx, ids, header, is_test):\n print('\\nCreate new csv file named ' + str(output_file) + '...')\n with open(output_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, delimiter = ',', fieldnames = header)\n writer.writeheader()\n for idx, y_row, tx_row in zip(ids, y, tx):\n if is_test:\n prediction = '?'\n else:\n prediction = 'b' if y_row == -1 else 's'\n dictionary = {'Id': int(idx),'Prediction': prediction}\n for index in range(len(tx_row)):\n dictionary[header[index + 2]] = float(tx_row[index])\n writer.writerow(dictionary)\n print('\\n... finished.')", "def read_csv():\n global csvdata\n global CONFIG\n if type(csvdata) == type(None):\n if not os.path.exists(CONFIG[\"csvfile\"]):\n csvdata = pandas.read_csv(CONFIG[\"csvrepo\"],\n na_values=[\"-999999\",\"NOT AVAILABLE\"])\n os.makedirs(CONFIG[\"cachedir\"],exist_ok=True)\n csvdata.to_csv(CONFIG[\"csvfile\"])\n else:\n csvdata = pandas.read_csv(CONFIG[\"csvfile\"])\n return csvdata", "def write_csv(outputfile, delimiter, newline, qchar, encoding, header, rows):\n with open(outputfile, 'w', newline=newline, encoding=encoding) as csvfile:\n writer = csv.writer(csvfile, delimiter=delimiter,\n quotechar=qchar, quoting=csv.QUOTE_MINIMAL)\n writer.writerow(header)\n for row in rows:\n writer.writerow(row)", "def read_csv():", "def convert(csv_filepath, output_format):\n valid_data, invalid_data = [], []\n\n logger.info('Started processing the csv.')\n\n with csv_filepath.open(newline='', encoding='utf-8') as f:\n csv_data = (row for row in csv.reader(f) if row)\n next(csv_data) # skip the header rows\n\n for row in csv_data:\n hotel = Hotel.from_row(row)\n if hotel.is_valid():\n valid_data.append(hotel.as_dict())\n else:\n invalid_data.append(hotel)\n\n processor = FORMAT_PROCESSORS[output_format]['processor']\n ext = FORMAT_PROCESSORS[output_format]['ext']\n\n output_filepath = csv_filepath.parent / 'output.{}'.format(ext)\n processor(valid_data, output_filepath)\n\n # TODO implement a way to report errors and invalid data\n logger.info('Finish processing the csv. {} hotels converted.'.format(\n len(valid_data)))", "def write_csv(data_frame, file_name):\n data_frame.coalesce(1).write \\\n .option('header', True).mode('overwrite') \\\n .save(f'outputs/{file_name}', format('csv'))" ]
[ "0.6778147", "0.66215456", "0.6587748", "0.6419415", "0.6071375", "0.60661834", "0.6063572", "0.6040226", "0.60079324", "0.59419805", "0.5918246", "0.5916295", "0.5871361", "0.58706796", "0.58664465", "0.5850558", "0.5849682", "0.5848059", "0.5840836", "0.58217335", "0.57717675", "0.57646155", "0.57580245", "0.5730074", "0.5689477", "0.5650869", "0.5631725", "0.562486", "0.5623761", "0.5615128", "0.5586573", "0.55844843", "0.5580747", "0.5579415", "0.55755424", "0.55713165", "0.5508459", "0.5505283", "0.5499032", "0.5494863", "0.54898083", "0.54600924", "0.5460047", "0.545683", "0.54538333", "0.5451269", "0.5438503", "0.5436928", "0.5430933", "0.542601", "0.5424612", "0.5418763", "0.5414143", "0.5412372", "0.54048705", "0.5404849", "0.54031086", "0.5385575", "0.5381299", "0.5379358", "0.5373085", "0.5344728", "0.5339316", "0.5338685", "0.53366935", "0.5328114", "0.53253776", "0.5323222", "0.5321711", "0.53094685", "0.5304084", "0.5295313", "0.5292597", "0.52893955", "0.5285918", "0.5283589", "0.5280924", "0.5280799", "0.52707094", "0.5267607", "0.5248892", "0.5247855", "0.52454716", "0.52445894", "0.52383786", "0.52247554", "0.52235675", "0.5218785", "0.5216153", "0.5202383", "0.5201475", "0.5199337", "0.5198316", "0.5197626", "0.5196398", "0.51931137", "0.51927656", "0.5192312", "0.5191557", "0.519124" ]
0.6656623
1
builds the test suite.
def test_suite(): suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(PrimesTests)) suite.addTests(unittest.makeSuite(OtherTests)) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setUpSuite():\n global _output_dir\n global _suite_configured\n\n if _suite_configured:\n return\n\n def remove_output_dir():\n global _output_dir\n if _output_dir != '':\n try:\n shutil.rmtree(_output_dir)\n except FileNotFoundError:\n pass\n\n atexit.register(remove_output_dir)\n _output_dir = tempfile.mkdtemp(dir=TESTS_DIR)\n\n os.environ['VOC_BUILD_DIR'] = os.path.join(_output_dir, 'build')\n os.environ['VOC_DIST_DIR'] = os.path.join(_output_dir, 'dist')\n\n # If the code has been precompiled, we don't have to\n # compile it as part of the test suite setup.\n precompile = os.environ.get('PRECOMPILE', 'true').lower() == 'true'\n if not precompile:\n _suite_configured = True\n return\n\n proc = subprocess.Popen(\n \"ant java\",\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n\n try:\n out, err = proc.communicate(timeout=30)\n except subprocess.TimeoutExpired:\n proc.kill()\n out, err = proc.communicate()\n raise\n\n if proc.returncode != 0:\n raise Exception(\"Error compiling java sources: \" + out.decode('ascii'))\n\n _suite_configured = True", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_polarization.test_suite())\n testSuite.addTest(test_xray.test_suite())\n testSuite.addTest(test_emspectrum.test_suite())\n return testSuite", "def test_quick_build(self):\n pass", "def test_build(self):\n self.app.build()", "def test_generate_all_testing(self):\n pass", "def make_suite():\n suite = unittest.TestSuite()\n return suite", "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(TestReversi))\n test_suite.addTest(unittest.makeSuite(TestGuessNumberGame))\n test_suite.addTest(unittest.makeSuite(TestConnectFourGame))\n test_suite.addTest(unittest.makeSuite(TestBuscamina))\n test_suite.addTest(unittest.makeSuite(TestGame))\n test_suite.addTest(unittest.makeSuite(TestDamaGame))\n test_suite.addTest(unittest.makeSuite(TestTateti))\n test_suite.addTest(unittest.makeSuite(TestGameBase))\n test_suite.addTest(unittest.makeSuite(TestFourNumber))\n test_suite.addTest(unittest.makeSuite(test_game_generala))\n test_suite.addTest(unittest.makeSuite(test_categories))\n test_suite.addTest(unittest.makeSuite(test_player))\n test_suite.addTest(unittest.makeSuite(test_throw_class))\n test_suite.addTest(unittest.makeSuite(test_throw_dice))\n test_suite.addTest(unittest.makeSuite(TestBets))\n test_suite.addTest(unittest.makeSuite(TestDeck))\n test_suite.addTest(unittest.makeSuite(TestBlackjackGame))\n test_suite.addTest(unittest.makeSuite(TestHands))\n test_suite.addTest(unittest.makeSuite(PokerTest))\n test_suite.addTest(unittest.makeSuite(PokerGameTest))\n test_suite.addTest(unittest.makeSuite(TestBattleship))\n test_suite.addTest(unittest.makeSuite(TestBoard))\n test_suite.addTest(craps_suite())\n test_suite.addTest(sudoku_suite())\n test_suite.addTest(roulette_suite())\n test_suite.addTest(dungeon_suite())\n test_suite.addTest(unittest.makeSuite(TestSenku))\n test_suite.addTest(unittest.makeSuite(TestAhorcado))\n test_suite.addTest(unittest.makeSuite(TestHanoiTower))\n return test_suite", "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(globalOptimizerTest))\n test_suite.addTest(unittest.makeSuite(recursiveStepTest))\n return test_suite", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_spec(\"test_cmd_parser\"))\n return testSuite", "def suite():\n\n testSuite = common.unittest.TestSuite()\n\n cdatafuncs = [niclassdata] # non-indexing data tests\n cdatafuncs.append(iclassdata) # indexing data tests\n\n heavy = common.heavy\n # Choose which tests to run in classes with autogenerated tests.\n if heavy:\n autoprefix = 'test' # all tests\n else:\n autoprefix = 'test_l' # only light tests\n\n niter = 1\n for i in range(niter):\n # Tests on query data.\n for cdatafunc in cdatafuncs:\n for cdata in cdatafunc():\n class_ = eval(cdata[0])\n if heavy or not class_.heavy:\n suite_ = common.unittest.makeSuite(class_,\n prefix=autoprefix)\n testSuite.addTest(suite_)\n # Tests on query usage.\n testSuite.addTest(common.unittest.makeSuite(ScalarTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(MDTableUsageTestCase))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage1))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage2))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage3))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage4))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage5))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage6))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage7))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage8))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage9))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage10))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage11))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage12))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage13))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage14))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage15))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage16))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage17))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage18))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage19))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage20))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage21))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage22))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage23))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage24))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage25))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage26))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage27))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage28))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage29))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage30))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage31))\n testSuite.addTest(common.unittest.makeSuite(IndexedTableUsage32))\n\n return testSuite", "def test_quick_build1(self):\n pass", "def main():\n run_test_all()", "def suite():\n\tsuite1 = unittest.makeSuite(TestCrop, 'test')\n\tsuite2 = unittest.makeSuite(TestDiag, 'test')\n\tsuite3 = unittest.makeSuite(TestEye, 'test')\n\tsuite4 = unittest.makeSuite(TestMinDim, 'test') \n\tsuite5 = unittest.makeSuite(TestNnz, 'test')\n\tsuite6 = unittest.makeSuite(TestOnes, 'test')\n\tsuite7 = unittest.makeSuite(TestRand, 'test')\n\tsuite8 = unittest.makeSuite(TestRandSym, 'test')\n\tsuite9 = unittest.makeSuite(TestReplace, 'test')\n\tsuite10 = unittest.makeSuite(TestTriu, 'test')\n\tsuite11 = unittest.makeSuite(TestTril, 'test')\n\treturn unittest.TestSuite((suite1, suite2, suite3, suite4, suite5, suite6, suite7, suite8, suite9, suite10, suite11))", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def make_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes():\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n return suite", "def make_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes():\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n return suite", "def testsuite():\n \n tests = unittest.TestSuite()\n\n parse_tests = unittest.makeSuite(ParseTestCase, 'test')\n tests = unittest.TestSuite( (tests, parse_tests) )\n\n return tests", "def build_test_suite(loader, tests, pattern, test_case_factory):\n suite = unittest.TestSuite()\n data_root = os.path.join(os.path.dirname(__file__), '..', '..', '..', 'spec')\n assert os.path.exists(data_root)\n test_files = []\n absroot = os.path.abspath(data_root)\n for (dirpath, dirnames, filenames) in os.walk(absroot):\n for filename in filenames:\n if filename.endswith(\".txt\"):\n test_file = os.path.join(dirpath, filename)\n test_files.append(test_file)\n test_files.sort()\n for test_file in test_files:\n test_name = test_file[len(absroot)+1:]\n spec_test = _read_spec_test(test_file)\n test_class = test_case_factory(test_file, test_name, spec_test)\n if test_class:\n suite.addTests(loader.loadTestsFromTestCase(test_class))\n return suite", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(SourceHeavyFootprintTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def build_suite(self, test_case_list):\n if not test_case_list:\n raise ValueError('No test cases provided.')\n\n loader = unittest.TestLoader()\n\n # TODO(ewiseblatt): 20150521\n # This doesnt seem to take effect. The intent here is to not sort the order\n # of tests. But it still is. So I've renamed the tests to lexographically\n # sort in place. Leaving this around anyway in hopes to eventually figure\n # out why it doesnt work.\n loader.sortTestMethodsUsing = None\n\n suite = unittest.TestSuite()\n for test in test_case_list:\n suite.addTests(loader.loadTestsFromTestCase(test))\n return suite", "def run(cls): \n tests_to_run = cls.config.TESTS # A list of 5-tuple elements specifying the tests to run. See the\n # 'Test Setup' section in config.py.template for more info.\n test_group_name = \"Alchemist Tests\" # A short string identifier for this test run.\n output_dir = cls.config.OUTPUT_DIR # The output file where we write results.\n \n try:\n os.makedirs(output_dir,0o777)\n except:\n pass\n num_tests_to_run = len(tests_to_run)\n\n print(OUTPUT_DIVIDER_STRING)\n if num_tests_to_run == 1:\n print(\"Running %d test in %s\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Running %d tests in %s\" % (num_tests_to_run, test_group_name))\n failed_tests = []\n\n cls.before_run_tests()\n \n spark_settings = []\n for i in cls.config.SPARK_SETTINGS:\n spark_settings.append(i.to_array()[0])\n \n output_settings = []\n for i in cls.config.OUTPUT_SETTINGS:\n output_settings.append(i.to_array()[0])\n \n main_class = \"altest.AlTest\"\n\n for meta_data, opt_sets in tests_to_run:\n print(OUTPUT_DIVIDER_STRING + '\\n')\n# print(\"Running test command: '%s' ... \" % main_class)\n \n meta = {}\n meta_pairs = [i.to_tuple() for i in meta_data]\n for mp in meta_pairs:\n meta[mp[0].replace('-', '_')] = mp[1].replace('0x20', ' ')\n \n meta_settings = []\n for i in meta_data:\n meta_settings.append(i.to_array()[0])\n \n# stdout_filename = \"%s/%s.out\" % (output_dir, meta['short_name'])\n# stderr_filename = \"%s/%s.err\" % (output_dir, meta['short_name'])\n# \n# out_file = open(output_dir + \"/\" + meta['short_name'] + \".out\", 'w')\n\n # Run a test for all combinations of the OptionSets given, then capture\n # and print the output.\n opt_set_arrays = [i.to_array() for i in opt_sets]\n for opt_list in itertools.product(*opt_set_arrays):\n\n cmd = cls.get_spark_submit_cmd(spark_settings, main_class, output_settings, meta_settings, opt_list)\n# print(\"\\nSetting env var SPARK_SUBMIT_OPTS: %s\" % java_opts_str)\n# test_env[\"SPARK_SUBMIT_OPTS\"] = java_opts_str\n print(\"Running command:\")\n print(\"%s\\n\" % cmd)\n Popen(cmd, shell=True, env=test_env).wait()\n\n try:\n src = output_dir + meta['short_name'] + '_latest/'\n src_files = os.listdir(src)\n src_file = src_files[0][:-4]\n new_dir = output_dir + src_file\n os.makedirs(new_dir)\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if (os.path.isfile(full_file_name)):\n shutil.copy(full_file_name, new_dir)\n except:\n pass\n \n# result_string = cls.process_output(config, meta['short_name'], opt_list,\n# stdout_filename, stderr_filename)\n# print(OUTPUT_DIVIDER_STRING)\n# print(\"\\nResult: \" + result_string)\n# print(OUTPUT_DIVIDER_STRING)\n# if \"FAILED\" in result_string:\n# failed_tests.append(meta['short_name'])\n# \n# \n# out_file.write(result_string + \"\\n\")\n# out_file.flush()\n\n if num_tests_to_run == 1:\n print(\"Finished running %d test in %s.\" % (num_tests_to_run, test_group_name))\n else:\n print(\"Finished running %d tests in %s.\" % (num_tests_to_run, test_group_name))\n# print(\"\\nNumber of failed tests: %d, failed tests: %s\" %\n# (len(failed_tests), \",\".join(failed_tests)))\n print(OUTPUT_DIVIDER_STRING)", "def suite():\n\tts = unittest.TestSuite()\n\tfor test_module in __all__:\n\t\tm = importlib.import_module(\"pyroclast.test.\" + test_module)\n\t\tfor n in dir(m):\n\t\t\tc = getattr(m, n)\n\t\t\tif is_test_case(c):\n\t\t\t\ts = unittest.TestLoader().loadTestsFromTestCase(c)\n\t\t\t\tts.addTests(s)\n\treturn ts", "def tests():", "def suite():\n \n suite = unittest.TestSuite()\n suite.addTest(FibonacciTestCase)\n suite.addTest(IntervalTestCase)\n suite.addTest(GameTestCase)\n return suite", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_h5fs(\"test_mode\"))\n testSuite.addTest(test_h5fs(\"test_path_splitting\"))\n testSuite.addTest(test_h5fs(\"test_link_mixing\"))\n return testSuite", "def getTestSuite():\n test_suite = unittest.TestSuite([])\n\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistReaders))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPySnpTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFileCache))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestUtilTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestIntRangeSet))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKrDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpGen))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGenerate))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstMemMap))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpMemMap))\n test_suite.addTests(NaNCNCTestCases.factory_iterator())\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstReader))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKernelReader))\n\n return test_suite", "def test_suite():\n testSuite = unittest.TestSuite()\n testSuite.addTest(test_regulargrid(\"test_indexing\"))\n testSuite.addTest(test_regulargrid(\"test_interpolate\"))\n return testSuite", "def test_buildAll(self):\n builds = []\n builder = NewsBuilder()\n builder.build = lambda path, output, header: builds.append((\n path, output, header))\n builder.blacklist = ['vfs']\n builder._today = lambda: '2009-12-01'\n\n project = self.createFakeTwistedProject()\n builder.buildAll(project)\n\n coreTopfiles = project.child(\"topfiles\")\n coreNews = coreTopfiles.child(\"NEWS\")\n coreHeader = \"Twisted Core 1.2.3 (2009-12-01)\"\n\n conchTopfiles = project.child(\"conch\").child(\"topfiles\")\n conchNews = conchTopfiles.child(\"NEWS\")\n conchHeader = \"Twisted Conch 3.4.5 (2009-12-01)\"\n\n aggregateNews = project.child(\"NEWS\")\n\n self.assertEquals(\n builds,\n [(conchTopfiles, conchNews, conchHeader),\n (coreTopfiles, coreNews, coreHeader),\n (conchTopfiles, aggregateNews, conchHeader),\n (coreTopfiles, aggregateNews, coreHeader)])", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(TestIntegration))\n suite.addTest(unittest.makeSuite(TestSection))\n return suite", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(DeconvolvedPsfPhotometryTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def test_suite():\n\tsuite = unittest.TestSuite()\n\tsuite.addTest(unittest.makeSuite(TestPloneDbFormsManager))\n\treturn suite", "def runTests(self):\n \n pass", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(FitExponentialTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def main_test():\n full = unittest.TestSuite()\n full.addTest(unittest.makeSuite(TestToolOptions))\n full.addTest(unittest.makeSuite(TestBadConfiguration))\n full.addTest(unittest.makeSuite(TestBasicEndpoints))\n full.addTest(unittest.makeSuite(TestMultipleEPG))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpoints))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater))\n full.addTest(unittest.makeSuite(TestExportPolicyRemoval))\n full.addTest(unittest.makeSuite(TestBasicEndpointsWithContract))\n full.addTest(unittest.makeSuite(TestBasicEndpointMove))\n full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract))\n full.addTest(unittest.makeSuite(TestChangeL3Out))\n full.addTest(unittest.makeSuite(TestDuplicates))\n full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs))\n full.addTest(unittest.makeSuite(TestDeletions))\n\n unittest.main()", "def suite():\n # Get a list of all files.\n files = glob.glob(os.path.join(os.path.dirname(os.path.abspath(\n inspect.getfile(inspect.currentframe()))), \"test*.py\"))\n files = [os.path.splitext(os.path.basename(_i))[0] for _i in files]\n\n modules = []\n # try to import all files.\n for module in files:\n try:\n module = __import__(module, globals(), locals())\n except:\n warnings.warn(\"Module %s could not be imported\" % module)\n continue\n modules.append(module)\n\n suite = unittest.TestSuite()\n for module in modules:\n for attrib in dir(module):\n value = getattr(module, attrib)\n try:\n if issubclass(value, unittest.TestCase):\n suite.addTest(unittest.makeSuite(value, \"test\"))\n except:\n pass\n return suite", "def makeTestSuiteV201109():\n suite = unittest.TestSuite()\n suite.addTests(unittest.makeSuite(TrafficEstimatorServiceTestV201109))\n return suite", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(MeasureSourcesTestCase)\n suites += unittest.makeSuite(ForcedMeasureSourcesTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n utilsTests.init()\n\n suites = [\n unittest.makeSuite(RingsTestCase),\n unittest.makeSuite(utilsTests.MemoryTestCase),\n ]\n\n return unittest.TestSuite(suites)", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def main():\n fix_sys_path()\n result = unittest.TextTestRunner(verbosity=2).run(createTestSuite())\n\n if result.testsRun != EXPECTED_TEST_COUNT:\n raise Exception(\n 'Expected %s tests to be run, not %s.' % (EXPECTED_TEST_COUNT, result.testsRun))\n\n if len(result.errors) != 0 or len(result.failures) != 0:\n raise Exception(\n \"Functional test suite failed: %s errors, %s failures of %s tests run.\" % (\n len(result.errors), len(result.failures), result.testsRun))", "def testsuite():\n loader = unittest.TestLoader()\n ts = unittest.TestSuite()\n ts.addTests(loader.loadTestsFromTestCase(api_server_test.ApiServerTestCase))\n ts.addTests(loader.loadTestsFromTestCase(codec_test.CodecTestCase))\n return ts", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(HscDistortionTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(CreateV1TestCase))\n suite.addTest(unittest.makeSuite(CreateV2TestCase))\n return suite", "def suite():\n return unittest.makeSuite(OpenedTestCase)", "def suite():\n tsuite = unittest.TestSuite()\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__]))\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(commandtests))\n tsuite.addTest(unittest.defaultTestLoader.loadTestsFromModule(locktests))\n return tsuite", "def setup_build_tests(self):\n # Now copy the relative files\n self.cache_extra_test_sources(self.build_relpath)\n\n # Ensure the path exists since relying on a relative path at the\n # same level as the normal stage source path.\n mkdirp(self.install_test_root)", "def suite():\n\n utilsTests.init()\n\n suites = []\n suites += unittest.makeSuite(StatisticsTestCase)\n suites += unittest.makeSuite(utilsTests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def __main() :\n launchTests()", "def suite():\n tests.init()\n\n suites = []\n suites += unittest.makeSuite(AngleTestCase)\n suites += unittest.makeSuite(tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def runtest(self):", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def build():", "def test_suite():\n testSuite = unittest.TestSuite()\n\n testSuite.addTest(test_classfactory(\"test_inheritance\"))\n return testSuite", "def test_testutils():\n build()\n sh(\"%s psutil\\\\tests\\\\test_testutils.py\" % PYTHON)", "def test_suite():\n suite = unittest.TestSuite()\n suite.addTests(\n [\n layered(\n doctest.DocFileSuite(\"behaviors.rst\"),\n layer=testing.TEXT_INTEXER_INTEGRATION_TESTING,\n ),\n ]\n )\n return suite", "def suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(TestFunctionalSPF, \"test\"))\n return test_suite", "def gen_suite(tests):\n cases = [gen_case(test) for test in tests]\n return {\n 'cases': cases,\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'\n }", "def suite():\n utilsTests.init()\n suites = []\n suites += unittest.makeSuite(TestTrackingDb)\n return unittest.TestSuite(suites)", "def test_buildTarballsScript(self):\n builds = []\n def myBuilder(checkout, destination):\n builds.append((checkout, destination))\n tarballBuilder = BuildTarballsScript()\n tarballBuilder.buildAllTarballs = myBuilder\n\n tarballBuilder.main([\"checkoutDir\", \"destinationDir\"])\n self.assertEquals(\n builds,\n [(FilePath(\"checkoutDir\"), FilePath(\"destinationDir\"))])", "def suite():\n\n lsst_tests.init()\n\n suites = []\n suites += unittest.makeSuite(DipoleFitAlgorithmTest)\n suites += unittest.makeSuite(DipoleFitTaskTest)\n suites += unittest.makeSuite(DipoleFitTaskEdgeTest)\n suites += unittest.makeSuite(lsst_tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def createTestSuite():\n import tests.functional.tests as functional\n return unittest.TestLoader().loadTestsFromModule(functional)", "def _suite(self):\n import mpi.test_application\n import mpi.test_communicator\n import mpi.test_launcher\n\n test_cases = []\n for mod in [\n mpi.test_application,\n mpi.test_communicator,\n mpi.test_launcher,\n ]:\n test_cases += mod.test_classes()\n \n suite = unittest.TestSuite()\n for test_case in test_cases:\n suite.addTest(unittest.makeSuite(test_case))\n\n return suite", "def run_test(self):\n\n # populate *_ps sets\n self.enter_project_file()\n\n # populate *_dir sets\n self.enter_directories()\n\n # The files in the directories makes up the largest possible set of files\n self.result_files = self.result_files_dir\n self.design_files = self.design_files_dir\n self.design_space_files = self.design_space_files_dir\n self.test_bench_files = self.test_bench_files_dir\n\n # populate *_ms sets\n self.enter_meta_results_file()\n\n # populate *_OK sets\n self.check_analysis_status()\n\n df = {'design_files_dir' : list(self.design_files_dir),'design_files_pr' : list(self.design_files_pr),\n 'design_files_ms' : list(self.design_files_ms), 'design_files_OK' : list(self.design_files_OK)}\n\n ds = {'design_space_files_dir' : list(self.design_space_files_dir),\n 'design_space_files_pr' : list(self.design_space_files_pr)}\n\n rs = {'result_files_dir' : list(self.result_files_dir), 'result_files_ms' : list(self.result_files_ms),\n 'result_files_OK' : list(self.result_files_OK)}\n\n tb = {'test_bench_files_dir' : list(self.test_bench_files_dir),\n 'test_bench_files_ms' : list(self.test_bench_files_ms)}\n\n srl = SummaryReportsLinks(self.result_files_dir)\n\n lf = {'files_linked_from_sum_reps' : srl.get_files(),\n 'folders_linked_from_sum_reps' : srl.get_folders()}\n\n # 'test_bench_files_pr' : list(self.test_bench_files_pr),\n \n json_test = {'design_files' : df, 'design_space_files' : ds, 'result_files' : rs,\n 'test_bench_files' : tb, 'stat_files' : self.stat_files,\n 'files_linked_from_sum_reps' : lf}\n\n with open('test_run.json','wb') as f_out:\n json.dump(json_test, f_out, indent=4)", "def test_build_model(arguments):\n ...", "def suite():\n loader = unittest.TestLoader()\n mysuite = unittest.TestSuite()\n mysuite.addTest(loader.loadTestsFromTestCase(TestUtils))\n \n return mysuite", "def runTestSuites(self):\n \n self.testsuitesToXML()\n \n\n tss = []\n jobStatus = {}\n for t in self.testsuites:\n d = t.testsuitedir\n runner = os.path.join(self.basepath, 'testSuiteRunner.py')\n tdir = os.path.join(d, 'testsuite.out')\n cmd = 'python %s %s>& %s' % (runner, d,tdir)\n #print 'about to popen the cmd: %s' % cmd\n tss.append((t.name, popen2.Popen3(cmd)))\n jobStatus[t.name] = ('running', nowSecs())\n ntests = len(tss)\n printJobStatus(jobStatus)\n\n while tss:\n toRemove = [p for p in tss if p[1].poll() != -1]\n if toRemove:\n [tss.remove(p) for p in toRemove]\n for p in toRemove:\n jobStatus[p[0]] = ('completed', nowSecs())\n\n printJobStatus(jobStatus)\n time.sleep(10)\n\n print 'all %d tests have completed' % ntests", "def build_all():\n client = TestClient()\n client.save({\"conanfile.py\": GenConanfile().with_setting(\"build_type\")})\n client.run(\"export . --name=foo --version=1.0 --user=user --channel=testing\")\n client.save({\"conanfile.py\": GenConanfile().with_require(\"foo/1.0@user/testing\")\n .with_setting(\"build_type\")})\n client.run(\"export . --name=bar --version=1.0 --user=user --channel=testing\")\n client.save({\"conanfile.py\": GenConanfile().with_require(\"foo/1.0@user/testing\")\n .with_require(\"bar/1.0@user/testing\")\n .with_setting(\"build_type\")})\n client.run(\"export . --name=foobar --version=1.0 --user=user --channel=testing\")\n client.run(\"install --requires=foobar/1.0@user/testing --build='*'\")\n return client", "def starlib_test_suite():\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n \n suite.addTests(loader.loadTestsFromModule(test_star))\n suite.addTests(loader.loadTestsFromModule(test_camera))\n suite.addTests(loader.loadTestsFromModule(test_image))\n suite.addTests(loader.loadTestsFromModule(test_star_database))\n suite.addTests(loader.loadTestsFromModule(test_kdtree))\n\n return suite", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def suite():\n return unittest.makeSuite(ClientsTestCase)", "def makeSuite(self):\n expector = self._expector\n\n class TC(unittest.TestCase):\n def runTest(self):\n \"\"\"\n This method spams the logger from the expector from its base level\n up to CRITICAL with a message that is fairly easy to regex match in\n the routines from the methods in test_log_stream.py.\n \"\"\"\n import logging\n lg_name = expector.logger_name\n lg = logging.getLogger(lg_name)\n start_level = logging.getLevelName('DEBUG_9')\n end_level = logging.getLevelName('CRITICAL_0')\n for lvl in range(start_level, end_level):\n lg.log(lvl, 'MATCH-START %s %d(%s) MATCH-END',\n lg_name, lvl, logging.getLevelName(lvl))\n\n return [TC()]", "def master_test_suite( pkg_mod_iter ):\n master_suite= unittest.TestSuite()\n for package, module_iter in pkg_mod_iter:\n for filename, module in module_iter:\n print( package+\".\"+module )\n suite= doctest.DocTestSuite( package+\".\"+module )\n print( \" \", suite )\n master_suite.addTests( suite )\n runner= unittest.TextTestRunner( verbosity=1 )\n runner.run( master_suite )", "def suite():\n\n lsst.utils.tests.init()\n\n suites = []\n suites += unittest.makeSuite(SchemaTestCase)\n suites += unittest.makeSuite(lsst.utils.tests.MemoryTestCase)\n return unittest.TestSuite(suites)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(GetDailyReportV1TestCase))\n suite.addTest(unittest.makeSuite(GetDailyReportV2TestCase))\n return suite", "def build_all(self):\n self.android_build()\n self.generate_patch_build('')\n self.generate_specs_build()\n self.generate_interfaces()", "def build(root):", "def test_create_build_for_all_namespaces(self):\n pass", "def gen_junit(self):\n\n test_attrs = [\n \"polarion-project-id\", \"polarion-custom-description\",\n \"polarion-custom-plannedin\", \"polarion-custom-isautomated\",\n \"polarion-custom-tags\"\n ]\n\n test_attrs_values = [\n self.args.ts, self.args.desc,\n self.args.rel, True, self.args.tags\n ]\n\n # This block allows for a dynamic dictionary to be created\n # depending on arguments passed.\n props = {\n key: value for key, value in zip(test_attrs,\n test_attrs_values)\n if value is not None\n }\n\n self._gen_polarion_property_file(test_attrs, test_attrs_values,\n self.args.tr, self.args.tc,\n property_file=self.args.pf)\n\n test_case = [TestCase(self.args.tc.pop(0), '', self.args.et)]\n\n if len(self.args.tc) >= 1:\n for cases in self.args.tc:\n test_case.append(TestCase(cases, '', self.args.et))\n\n testsuite = [TestSuite(self.args.project, test_case, properties=props)]\n\n with open(self.args.output_f, 'w') as results:\n TestSuite.to_file(results, testsuite)\n if self.args.ur:\n self._upload(self.polarion_url, self.args.output_f,\n self.username, self.password)", "def build_test_cmake(self, test, opts=\"\", outfile=None):\n\n env = {\"AMReX_ROOT\":self.amrex_install_dir}\n\n # super-builds always need a configure now, all other builds might\n # add additional CMake config options and re-configure on existing configured\n # build directory, if additional build cmakeSetupOpts are set\n if self.isSuperbuild or test.cmakeSetupOpts != \"\":\n builddir, installdir = self.cmake_config(\n name=test.name,\n path=self.source_dir,\n configOpts=self.amrex_cmake_opts + \" \" +\n self.source_cmake_opts + \" \" +\n test.cmakeSetupOpts)\n self.source_build_dir = builddir\n\n # compile\n rc, comp_string = self.cmake_build( name = test.name,\n target = test.target,\n path = self.source_build_dir,\n opts = opts,\n env = env,\n outfile = outfile)\n\n # make returns 0 if everything was good\n if rc != 0:\n self.log.fail(\"Failed to build test \" + test.name)\n\n # if we built a binary executable, we need to rename it into a\n # GNUmake-like naming scheme so that the rest of the test logic can\n # pick it up\n elif not test.run_as_script:\n # Find location of executable\n path_to_exe = None\n\n # search by target name\n for root, dirnames, filenames in os.walk(self.source_build_dir):\n if test.target in filenames:\n path_to_exe = os.path.join(root, test.target)\n break\n\n # fallback: pick first executable in CMake output directory\n if path_to_exe is None:\n path_to_bin = None\n cmake_output_dir = \"CMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=\"\n cmake_cache = os.path.join(self.source_build_dir, \"CMakeCache.txt\")\n with open(cmake_cache, \"r\") as cc:\n for ln in cc.readlines():\n if ln.startswith(cmake_output_dir):\n path_to_bin = ln[len(cmake_output_dir):].strip()\n break\n\n if path_to_bin is None:\n if not test.customRunCmd:\n self.log.warn(\"build successful but binary directory not found\")\n rc = 1\n else:\n # Find location of executable\n for root, dirnames, filenames in os.walk(path_to_bin):\n for f in filenames:\n f_path = os.path.join(root, f)\n if os.access(f_path, os.X_OK):\n if not Path(f_path).is_symlink():\n path_to_exe = f_path\n break\n if path_to_exe is not None:\n break\n\n if path_to_exe is None:\n if not test.customRunCmd:\n self.log.warn(\"build successful but executable not found\")\n rc = 1\n else:\n # Copy and rename executable to test dir\n shutil.move(f\"{path_to_exe}\",\n f\"{self.source_dir}/{test.buildDir}/{test.name}.ex\")\n\n return comp_string, rc", "def test_cases():\n CasesTestCase.generate_tests()\n yield CasesTestCase\n yield DocTestsTestCase", "def main():\n # Disable *.pyc files\n sys.dont_write_bytecode = True\n\n # Add \"..\" to module search path\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n top_dir = os.path.abspath(os.path.join(cur_dir, os.pardir))\n sys.path.append(top_dir)\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0,\n help=\"verbosity level, use: [-v | -vv | -vvv]\")\n parser.add_argument(\"-s\", \"--start-directory\", default=None,\n help=\"directory to start discovery\")\n parser.add_argument(\"-p\", \"--pattern\", default=\"test*.py\",\n help=\"pattern to match test files ('test*.py' default)\")\n parser.add_argument(\"test\", nargs=\"*\",\n help=\"test specs (e.g. module.TestCase.test_func)\")\n args = parser.parse_args()\n\n if not args.start_directory:\n args.start_directory = cur_dir\n\n if args.verbose > 2:\n logging.basicConfig(level=logging.DEBUG, format=\"DEBUG: %(message)s\")\n\n loader = unittest.TestLoader()\n if args.test:\n # Add particular tests\n for test in args.test:\n suite = unittest.TestSuite()\n suite.addTests(loader.loadTestsFromName(test))\n else:\n # Find all tests\n suite = loader.discover(args.start_directory, args.pattern)\n\n runner = unittest.TextTestRunner(verbosity=args.verbose)\n result = runner.run(suite)\n return result.wasSuccessful()", "def make_testsuite(testsuite: Dict) -> NoReturn:\n # validate testsuite format\n load_testsuite(testsuite)\n\n testsuite_config = testsuite[\"config\"]\n testsuite_path = testsuite_config[\"path\"]\n testsuite_variables = convert_variables(\n testsuite_config.get(\"variables\", {}), testsuite_path\n )\n\n logger.info(f\"start to make testsuite: {testsuite_path}\")\n\n # create directory with testsuite file name, put its testcases under this directory\n testsuite_path = ensure_file_abs_path_valid(testsuite_path)\n testsuite_dir, file_suffix = os.path.splitext(testsuite_path)\n # demo_testsuite.yml => demo_testsuite_yml\n testsuite_dir = f\"{testsuite_dir}_{file_suffix.lstrip('.')}\"\n\n for testcase in testsuite[\"testcases\"]:\n # get referenced testcase content\n testcase_file = testcase[\"testcase\"]\n testcase_path = __ensure_absolute(testcase_file)\n testcase_dict = load_test_file(testcase_path)\n testcase_dict.setdefault(\"config\", {})\n testcase_dict[\"config\"][\"path\"] = testcase_path\n\n # override testcase name\n testcase_dict[\"config\"][\"name\"] = testcase[\"name\"]\n # override base_url\n base_url = testsuite_config.get(\"base_url\") or testcase.get(\"base_url\")\n if base_url:\n testcase_dict[\"config\"][\"base_url\"] = base_url\n # override verify\n if \"verify\" in testsuite_config:\n testcase_dict[\"config\"][\"verify\"] = testsuite_config[\"verify\"]\n # override variables\n # testsuite testcase variables > testsuite config variables\n testcase_variables = convert_variables(\n testcase.get(\"variables\", {}), testcase_path\n )\n testcase_variables = merge_variables(testcase_variables, testsuite_variables)\n # testsuite testcase variables > testcase config variables\n testcase_dict[\"config\"][\"variables\"] = convert_variables(\n testcase_dict[\"config\"].get(\"variables\", {}), testcase_path\n )\n testcase_dict[\"config\"][\"variables\"].update(testcase_variables)\n\n # override weight\n if \"weight\" in testcase:\n testcase_dict[\"config\"][\"weight\"] = testcase[\"weight\"]\n\n # make testcase\n testcase_pytest_path = make_testcase(testcase_dict, testsuite_dir)\n pytest_files_run_set.add(testcase_pytest_path)", "def test_main(self):\n self.createFakeSphinxProject()\n self.builder.main([self.sphinxDir.parent().path])\n self.verifyBuilt()", "def create_task(testset_path):\n task_suite = unittest.TestSuite() # 测试套件\n testsets = load_testcases_by_path(testset_path)\n print('testsets ----> %s\\n' % testsets)\n for testset in testsets:\n print('testset ----> %s\\n' % testset)\n suite = create_suite(testset)", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(UpdateV1TestCase))\n return suite", "def suite():\n suite_obj = unittest.TestSuite()\n suite_obj.addTest(TestEssentials())\n return suite_obj", "def setup_package():\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('authentication')\n\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n test_config = {}\n\n # Setup environment variables for the test cases.\n host_port_cfg = {'viewer_host': 'localhost',\n 'viewer_port': env.get_free_port(),\n 'viewer_product': 'authentication'}\n\n test_env = env.test_env(TEST_WORKSPACE)\n\n codechecker_cfg = {\n 'check_env': test_env,\n 'workspace': TEST_WORKSPACE,\n 'checkers': []\n }\n\n codechecker_cfg.update(host_port_cfg)\n\n codechecker_cfg['run_names'] = []\n\n test_config['codechecker_cfg'] = codechecker_cfg\n\n # Export configuration for the tests.\n env.export_test_cfg(TEST_WORKSPACE, test_config)\n\n # Enable authentication and start the CodeChecker server.\n env.enable_auth(TEST_WORKSPACE)\n print(\"Starting server to get results\")\n _start_server(codechecker_cfg, test_config, False)", "def build_testsets(base_url, test_structure, test_files = set() ):\n\n tests_out = list()\n test_config = TestConfig()\n testsets = list()\n benchmarks = list()\n #returns a testconfig and collection of tests\n for node in test_structure: #Iterate through lists of test and configuration elements\n if isinstance(node,dict): #Each config element is a miniature key-value dictionary\n node = lowercase_keys(node)\n for key in node:\n if key == u'import':\n importfile = node[key] #import another file\n if importfile not in test_files:\n logging.debug(\"Importing test sets: \" + importfile)\n test_files.add(importfile)\n import_test_structure = read_test_file(importfile)\n with cd(os.path.dirname(os.path.realpath(importfile))):\n import_testsets = build_testsets(base_url, import_test_structure, test_files)\n testsets.extend(import_testsets)\n elif key == u'url': #Simple test, just a GET to a URL\n mytest = Test()\n val = node[key]\n assert isinstance(val,str) or isinstance(val,unicode)\n mytest.url = base_url + val\n tests_out.append(mytest)\n elif key == u'test': #Complex test with additional parameters\n child = node[key]\n mytest = build_test(base_url, child)\n tests_out.append(mytest)\n elif key == u'benchmark':\n benchmark = build_benchmark(base_url, node[key])\n benchmarks.append(benchmark)\n elif key == u'config' or key == u'configuration':\n test_config = make_configuration(node[key])\n testset = TestSet()\n testset.tests = tests_out\n testset.config = test_config\n testset.benchmarks = benchmarks\n testsets.append(testset)\n return testsets", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def setUp(self):\n self.builder = NewsBuilder()\n self.project = FilePath(self.mktemp())\n self.project.createDirectory()\n self.existingText = 'Here is stuff which was present previously.\\n'\n self.createStructure(self.project, {\n 'NEWS': self.existingText,\n '5.feature': 'We now support the web.\\n',\n '12.feature': 'The widget is more robust.\\n',\n '15.feature': (\n 'A very long feature which takes many words to '\n 'describe with any accuracy was introduced so that '\n 'the line wrapping behavior of the news generating '\n 'code could be verified.\\n'),\n '16.feature': (\n 'A simpler feature\\ndescribed on multiple lines\\n'\n 'was added.\\n'),\n '23.bugfix': 'Broken stuff was fixed.\\n',\n '25.removal': 'Stupid stuff was deprecated.\\n',\n '30.misc': '',\n '35.misc': '',\n '40.doc': 'foo.bar.Baz.quux',\n '41.doc': 'writing Foo servers'})", "def generate_build_files(ctx):\n\n project_dir = Path(__file__).parent\n\n directory_of_the_tests = project_dir / \"tests/plugins\"\n directory_to_build_tests = project_dir / \"build/build_directory_for_tests\"\n\n # Clean UP\n if directory_to_build_tests.exists():\n shutil.rmtree(directory_to_build_tests)\n os.makedirs(directory_to_build_tests)\n\n # Finding hook_specs.py, each hook_specs represent a different project with different hooks\n hook_spec_paths = [\n path for path in directory_of_the_tests.glob(\"**/hook_specs.py\") if \"tmp\" not in path.parts\n ]\n\n # CMakeList.txt that includes all sub_directory with tests to be compiled\n root_cmake_list = directory_to_build_tests / \"CMakeLists.txt\"\n cmake_file_of_test_build_dir = [\n f\"add_subdirectory({i.parent.name })\\n\" for i in hook_spec_paths\n ]\n root_cmake_list.write_text(\"\".join(cmake_file_of_test_build_dir))\n\n # For each hook_specs, create a directory for the compilation and generate the files\n for project_hook_spec_path in hook_spec_paths:\n project_dir_for_build = directory_to_build_tests / project_hook_spec_path.parent.name\n project_dir_for_build.mkdir(parents=True)\n\n hm_generator = HookManGenerator(hook_spec_file_path=project_hook_spec_path)\n hm_generator.generate_project_files(dst_path=project_dir_for_build)\n\n # Find folder with Plugins\n plugins_dirs = [\n x\n for x in project_hook_spec_path.parent.iterdir()\n if x.is_dir() and (x / \"assets\").exists()\n ]\n\n # Copy all the plugins to the build dir\n for plugin in plugins_dirs:\n plugin_dir_build = project_dir_for_build / f\"plugin/{plugin.name}\"\n shutil.copytree(src=plugin, dst=plugin_dir_build)\n (plugin_dir_build / \"src/hook_specs.h\").write_text(\n hm_generator._hook_specs_header_content(plugin.stem)\n )\n\n # Create the CMakeFile on root of the project to include others CMake files.\n main_cmakelist = project_dir_for_build / \"CMakeLists.txt\"\n main_cmakelist_content = []\n main_cmakelist_content.append(\"add_subdirectory(cpp)\\nadd_subdirectory(binding)\\n\")\n main_cmakelist_content += [\n f\"add_subdirectory(plugin/{plugin.name}/src)\\n\" for plugin in plugins_dirs\n ]\n main_cmakelist.write_text(\"\".join(main_cmakelist_content))", "def test_buildNews(self):\n builds = []\n newsBuilder = NewsBuilder()\n newsBuilder.buildAll = builds.append\n newsBuilder.main([\"/foo/bar/baz\"])\n self.assertEquals(builds, [FilePath(\"/foo/bar/baz\")])", "def run_tests():\n os.environ['WORKDIR'] = CONFIG['workdir']\n os.environ['REPORTDIR'] = CONFIG['reportFolder']\n stdout = subprocess.DEVNULL\n if CONFIG['verbose']:\n stdout = None\n # cycle throught version\n total = 0\n valid = 0\n start = time.time()\n for version in utils.get_dirs(CONFIG['versionsFolder']):\n os.environ['VERSION'] = version\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version), CONFIG['workdir']\\\n , CONFIG['clearWorkdir'])\n # cycle throught use case\n for usecase in utils.get_dirs(CONFIG['testsFolder']):\n os.environ['TESTDIR'] = usecase\n if not CONFIG['quiet']:\n print('UseCase test: {}'.format(usecase))\n log_msg('info', 'UseCase test: {}'.format(usecase))\n try:\n folder = os.path.join(CONFIG['testsFolder'], usecase)\n with open(os.path.join(folder, CONFIG['useConfig'])) as usefp:\n jconfig = json.load(usefp)\n # clear workdir if desired\n if 'clearWorkdir' in jconfig.keys() and jconfig['clearWorkdir']:\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version)\\\n , CONFIG['workdir'], CONFIG['clearWorkdir'])\n # print('clearing')\n # raise\n cmd = ['py', os.path.join(folder, jconfig['entrypoint'])]\n total += 1\n if jconfig['runType'] == 'single':\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n else:\n for step in range(jconfig['numRuns']):\n if not CONFIG['quiet']:\n print('\\r >Step {}/{} '.format(step+1, jconfig['numRuns'])\\\n , end='', flush=True)\n log_msg('info', 'Step {}/{}'.format(step+1, jconfig['numRuns']))\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n if step+1 != jconfig['numRuns']:\n time.sleep(jconfig['interval'])\n except subprocess.CalledProcessError as excp:\n if not CONFIG['quiet']:\n print('Error msg:{}'\\\n .format(excp.stderr.decode().replace('\\r', '').replace('\\n', '|')))\n log_msg('error', excp.stderr.decode())\n else:\n valid += 1\n if not CONFIG['quiet']:\n print('{}.....Passed'.format(usecase))\n log_msg('info', '{} Passed'.format(usecase))\n\n elapse = time.time()-start\n log_msg('info', 'Ran {} tests in {:.3f}s with {} passed'.format(total, elapse, valid))\n print('-'*20)\n print('Ran {} tests in {:.3f}s with {} passed.'.format(total, elapse, valid))\n return total-valid", "def startTestRun(self):", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(ServicesMenuDropdownListTestCase(\"testServicesMenuDropdownListItems\"))\n return suite" ]
[ "0.72579163", "0.70755035", "0.69487077", "0.693024", "0.68874764", "0.6838446", "0.67889327", "0.6781857", "0.6769531", "0.67448074", "0.6737396", "0.67239064", "0.6713449", "0.67040795", "0.6690259", "0.6676821", "0.6676821", "0.66717535", "0.66633546", "0.66531706", "0.66531706", "0.6643563", "0.66417176", "0.66317904", "0.6626973", "0.661617", "0.66129327", "0.6583488", "0.6554864", "0.65544796", "0.65395844", "0.65235907", "0.6491563", "0.6487425", "0.6485035", "0.64836085", "0.64658564", "0.6462108", "0.6455468", "0.6448754", "0.64441246", "0.6436657", "0.6429698", "0.64109665", "0.6402471", "0.64002126", "0.63904274", "0.6389929", "0.638974", "0.63860965", "0.63844806", "0.63820004", "0.6369194", "0.6335866", "0.63357216", "0.63203496", "0.63029045", "0.6302091", "0.627542", "0.6262334", "0.6258591", "0.62570506", "0.6243647", "0.62373614", "0.62362325", "0.6231175", "0.62290126", "0.6227243", "0.62201923", "0.621864", "0.6216647", "0.62000173", "0.6177784", "0.6176995", "0.61589044", "0.61567026", "0.61543477", "0.6149206", "0.6137732", "0.6119163", "0.6113096", "0.61073434", "0.6103278", "0.610201", "0.6097668", "0.6096554", "0.60920095", "0.60868675", "0.60733134", "0.6071737", "0.60707057", "0.60504025", "0.60464895", "0.60297084", "0.6029379", "0.6018278", "0.60166097", "0.5999914", "0.59930086", "0.59918493" ]
0.6828355
6
Plots the path from start node to goal region as well as the graph (or tree) searched with the Sampling Based Algorithms.
def draw_results(algo_name, path, V, E, env, bounds, object_radius, resolution, start_pose, goal_region, elapsed_time): graph_size = len(V) path_size = len(path) # Calculate path length path_length = 0.0 for i in xrange(len(path)-1): path_length += euclidian_dist(path[i], path[i+1]) # Create title with descriptive information based on environment, path length, and elapsed_time title = algo_name + "\n" + str(graph_size) + " Nodes. " + str(len(env.obstacles)) + " Obstacles. Path Size: " + str(path_size) + "\n Path Length: " + str(path_length) + "\n Runtime(s)= " + str(elapsed_time) # Plot environment env_plot = plot_environment(env, bounds) # Add title env_plot.set_title(title) # Plot goal plot_poly(env_plot, goal_region, 'green') # Plot start buffered_start_vertex = Point(start_pose).buffer(object_radius, resolution) plot_poly(env_plot, buffered_start_vertex, 'red') # Plot Edges explored by ploting lines between each edge for edge in E: line = LineString([edge[0], edge[1]]) plot_line(env_plot, line) # Plot path plot_path(env_plot, path, object_radius)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_global_path(start, goal, path, occupancy_grid):\n # Displaying the map\n fig_astar, ax_astar = display_map(occupancy_grid)\n\n # Plot the best path found and the list of visited nodes\n ax_astar.plot(path[0], path[1], marker=\"o\", color='blue');\n ax_astar.scatter(start[0], start[1], marker=\"o\", color='green', s=200);\n ax_astar.scatter(goal[0], goal[1], marker=\"o\", color='purple', s=200);\n # ax.set_ylim(ax.get_ylim()[::-1])", "def plot_start_and_goal(self, start_node, goal_node):\n start = Marker()\n #visualizations points and lines..\n start.header.frame_id = \"map\"\n start.header.stamp = rospy.get_rostime()\n start.ns = \"markers\"\n start.id = 1\n start.type = start.ARROW\n start.action = start.ADD\n \n start.scale.x = 10*self.rviz_tuning_plt\n start.scale.y = 2*self.rviz_tuning_plt\n start.scale.z = 2*self.rviz_tuning_plt\n start.color.r = 0.0\n start.color.g = 1.0\n start.color.b = 0.0\n start.color.a = 1.0\n # A value of ros::Duration() means never to auto-delete.\n start.lifetime = rospy.Duration()\n # Add x,y,z position to pose\n start.pose.position.x = start_node.x\n start.pose.position.y = start_node.y\n start.pose.position.z = 0\n # add quaternion to pose\n quat = self.euler_to_quaternion(0, 0, start_node.alpha)\n start.pose.orientation.x = quat[0]\n start.pose.orientation.y = quat[1]\n start.pose.orientation.z = quat[2]\n start.pose.orientation.w = quat[3]\n self.pub_start_goal.publish(start)\n \n goal = start\n goal.id = 2\n goal.color.b = 1\n # Add x,y,z position to pose\n goal.pose.position.x = goal_node.x\n goal.pose.position.y = goal_node.y\n goal.pose.position.z = 0\n # add quaternion to pose\n quat = self.euler_to_quaternion(0, 0, goal_node.alpha)\n goal.pose.orientation.x = quat[0]\n goal.pose.orientation.y = quat[1]\n goal.pose.orientation.z = quat[2]\n goal.pose.orientation.w = quat[3]\n\n self.pub_start_goal.publish(goal)", "def plot_path(self, current_path):\n full_path = current_path.copy()\n full_path.insert(0, self.root)\n\n path = Marker()\n id = 1\n\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n path.color.r = 0.0\n path.color.g = 1.0\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n for node in full_path:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.03\n path.points.append(p1)\n\n self.pub_path.publish(path)", "def plot_graph(self) -> None:", "def draw_graph(graph, start, goal, path=[], save_file=None):\n explored = graph.get_explored_nodes()\n node_pos = {n: graph.nodes[n]['pos'] for n in graph.nodes.keys()}\n edge_labels = {}\n for edge in graph.edges():\n edge_labels[edge] = graph[edge[0]][edge[1]]['weight']\n\n labels = {}\n for node in graph:\n labels[node] = node\n\n nx.draw_networkx_nodes(graph, node_pos, node_color='gray') #, nodelist=romania.nodes, node_color='w', node_size=500)\n nx.draw_networkx_edges(graph, node_pos, style='dashed')\n if len(explored) > 0:\n print(\"Explored = \"+str(explored))\n nx.draw_networkx_nodes(graph, node_pos, nodelist=explored, node_color='r')\n\n if len(path) > 0:\n nx.draw_networkx_nodes(graph, node_pos, nodelist= path, node_color='y')\n edgelist = []\n for i in range(1,len(path)):\n edgelist.append((path[i - 1], path[i]))\n nx.draw_networkx_edges(graph, node_pos, edgelist, edge_color='b', width=3)\n nx.draw_networkx_nodes(graph, node_pos, nodelist=[start, goal], node_color='g')\n\n\n\n nx.draw_networkx_labels(graph, node_pos, labels)\n nx.draw_networkx_edge_labels(graph, node_pos, edge_labels, font_size=8)\n\n plt.axis('off')\n plt.show() # display\n if save_file is not None:\n plt.savefig(save_file) # save as png", "def visualize_routes(self):\n visualize_tsp.plotTSP([self.best_solution], self.coords)", "def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()", "def show_grid(self):\n\n if not os.path.exists(self.path_to_results):\n os.mkdir(self.path_to_results)\n\n fig = plt.figure()\n\n if self.show_points == 1:\n plt.scatter(self.x_list_grid, self.y_list_grid, c='blue')\n\n plt.plot(self.x_list_main, self.y_list_main,\n 'green', label='straight path')\n plt.plot(self.x_list, self.y_list, 'red', label='first path')\n plt.plot(self.x_list_filtered, self.y_list_filtered,\n 'blue', label='filtered path')\n plt.title('Paths')\n plt.ylabel('Latitude')\n plt.xlabel('Longitude')\n # plt.legend()\n\n fig.savefig(os.path.join(self.path_to_results, 'Paths.png'))", "def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()", "def print_path(self):\n\n grid = tg.Graph.grid_graph(self.graph.rows,self.graph.cols)\n #tg.draw_grid(self.draw_edges_alt,self.graph.rows,self.graph.cols,grid)\n tg.draw_grid(self.edges,self.graph.rows,self.graph.cols,grid)", "def plot_graph():\n name = request.args.get('instance')\n name = str(name)\n distance = request.args.get('distance')\n path = request.args.get('path')\n if name == 'Custom':\n coords = request.args.get('coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n else:\n nodes = create_nodes(name)\n fig = Figure()\n axis = fig.add_subplot(1, 1, 1)\n\n axis.set_title(name + \" - Distance: \"+ str(distance))\n path = str(path).split(',')\n path = [int(i) for i in path]\n for i in range(len(path) - 1):\n\n start_node = nodes[path[i]]\n x1, y1 = start_node.x, start_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[i]))\n axis.text(x1,y1, str(path[i]))\n end_node = nodes[path[i+1]]\n x2, y2 = end_node.x, end_node.y\n axis.plot([x1,x2], [y1, y2])\n\n last_node = nodes[path[len(path)-1]]\n x1, y1 = last_node.x, last_node.y\n axis.text(x1,y1, str(path[len(path)-1]))\n\n begin_node = nodes[path[0]]\n x2, y2 = begin_node.x, begin_node.y\n axis.scatter(x1, y1, c = 'b', label = str(path[len(path)-1]))\n axis.plot([x1,x2], [y1, y2])\n\n output = io.BytesIO()\n FigureCanvas(fig).print_png(output)\n return Response(output.getvalue(), mimetype=\"image/png\")", "def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()", "def show_path(self):\n\n node = self.goal\n\n while node.parent:\n node.parent.value = 1\n node = node.parent", "def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def quick_plot(solution):\n plt.suptitle('GNLSE solution')\n\n plt.subplot(1, 2, 1)\n plot_wavelength_vs_distance(solution)\n\n plt.subplot(1, 2, 2)\n plot_delay_vs_distance(solution)\n\n plt.show()", "def _plot(self):\n\n #self.best_canvas.Clear()\n self.current_canvas.Clear()\n\n if len(self.results) > 0:\n x_max = self.results[-1][2]\n #self.best_canvas.xSpec = (0, x_max)\n self.current_canvas.xSpec = (0, x_max)\n\n # best_points = [(r.time, r.best.distance) for r in self.results\n # if r.best is not None and\n # isinstance(r.best.distance, int)]\n # best_line = PolyLine(best_points)\n # best_plot = PlotGraphics([best_line],\n # title='Best path distance over time',\n # xLabel='Time [ns]', yLabel='Distance')\n\n current_points = [self.TopLevelParent.solver_view.tsp_view._points[x] for x in self.results[-1][0]] if len(self.results) > 0 else []\n # current_points = [(r[2], r[0]) for r in self.results]\n if len(current_points) > 0:\n current_line = PolyLine(current_points)\n current_plot = PlotGraphics([current_line],\n title='Current path distance over time',\n xLabel='Iter', yLabel='Score')\n\n #self.best_canvas.Draw(best_plot)\n self.current_canvas.Draw(current_plot)", "def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')", "def visualize_path(start, end, distance, path, time_dij, time_ida, ida_terminated):\n \n # get all cities to be plotted to figure\n neighbors = []\n cities_to_figure = set()\n for city in path:\n cities_to_figure.add(city)\n for neighbor_tuple in adjlist[city]:\n neighbor = neighbor_tuple[0]\n if neighbor not in path:\n neighbors.append(neighbor)\n cities_to_figure.add(neighbor)\n for neighbor_of_neighbor_tuple in adjlist[neighbor]:\n neighbor_of_neighbor = neighbor_of_neighbor_tuple[0]\n cities_to_figure.add(neighbor_of_neighbor)\n # get min_x, max_x, min_y, max_y\n min_x, max_x, min_y, max_y = 100, 0, 100, 0\n for city in list(cities_to_figure):\n min_x, max_x, min_y, max_y = min(min_x, coordinates[city][0]), max(max_x, coordinates[city][0]), min(min_y, coordinates[city][1]), max(max_y, coordinates[city][1])\n \n # set size of window to be popped up to user\n height = 9.5\n width = 0.5 * (max_x-min_x) / (max_y-min_y) * height\n fig = plt.figure(figsize=(width, height))\n \n # visited cities and visualization\n xs, ys = [], []\n for city in path:\n x, y = coordinates[city][0], coordinates[city][1]\n xs.append(x)\n ys.append(y)\n plt.annotate(city, (x,y), textcoords=\"offset points\", xytext=(0,5), ha=\"center\")\n plt.scatter(xs, ys, s=50, color=\"blue\")\n plt.plot(xs, ys, color=\"blue\")\n \n # neighboring cities\n xs2, ys2 = [], []\n for neighbor in neighbors:\n if neighbor not in path:\n x, y = coordinates[neighbor][0], coordinates[neighbor][1]\n xs2.append(x)\n ys2.append(y)\n plt.annotate(neighbor, (x,y), textcoords=\"offset points\", xytext=(0,5), ha=\"center\")\n # neighbors of neighbors and visualization\n for neighbor in neighbors:\n for neighbor_of_neighbor_tuple in adjlist[neighbor]:\n neighbor_of_neighbor = neighbor_of_neighbor_tuple[0]\n x, y = coordinates[neighbor_of_neighbor][0], coordinates[neighbor_of_neighbor][1]\n if neighbor_of_neighbor not in path and neighbor_of_neighbor not in neighbors:\n xs2.append(x)\n ys2.append(y)\n plt.annotate(neighbor_of_neighbor, (x,y), textcoords=\"offset points\", xytext=(0,5), ha=\"center\")\n x2, y2 = coordinates[neighbor][0], coordinates[neighbor][1]\n plt.plot([x, x2], [y, y2], color=\"grey\", linestyle=\"dashed\")\n plt.scatter(xs2, ys2, s=5, color=\"grey\")\n\n # duration\n hours = int(distance // 1)\n minutes = int(round(distance % 1 * 60, 0))\n duration_string = str(hours) + \" hours \" + str(minutes) + \" minutes\"\n \n if ida_terminated:\n plt.title(\"Shortest path: \" + start + \" -> \" + end + \"\\n(\" + duration_string + \")\")\n else: # ida did not finish\n plt.title(\"Shortest path: \" + start + \" -> \" + end + \"\\n(\" + duration_string + \")\")\n \n plt.axis('off')\n plt.show()", "def visualize(self):\n\n # Tools that will be displayed on the plots\n tools = \"pan,wheel_zoom,reset,save\"\n\n # Plot displaying the optimized path\n result_plot = figure(\n plot_width=1000,\n plot_height=500,\n tools=tools,\n active_scroll='wheel_zoom')\n result_plot.title.text = \"Optimized Path\"\n\n # Plot displaying the non optimized path\n initial_plot = figure(\n plot_width=1000,\n plot_height=500,\n tools=tools,\n active_scroll='wheel_zoom')\n initial_plot.title.text = \"Initial Path\"\n\n # Add the data to the result plot\n result_plot = self.populate_plot(result_plot, self.result)\n result_plot.legend.location = \"bottom_right\"\n\n # Add the data to the initial plot\n initial_plot = self.populate_plot(initial_plot, self.initial)\n initial_plot.legend.location = \"bottom_right\"\n\n # Add cutting tool to plots\n # Generate the points on which the triangle should move on\n result_lines_x, result_lines_y = self.generate_tool_path(self.result, 1)\n initial_lines_x, initial_lines_y = self.generate_tool_path(self.initial, 1)\n\n # Add cutting tool triangle to optimized path\n result_triangle_position = ColumnDataSource(\n data=dict(\n x=[result_lines_x[0]],\n y=[result_lines_y[0]]\n ))\n result_triangle = Triangle(\n x='x', y='y', line_color=Category10_4[3], line_width=3,\n size=20, fill_alpha=0\n )\n result_plot.add_glyph(result_triangle_position, result_triangle)\n\n # Add cutting tool triangle to initial path\n initial_triangle_position = ColumnDataSource(\n data=dict(\n x=[initial_lines_x[0]],\n y=[initial_lines_y[0]]\n ))\n initial_triangle = Triangle(\n x='x', y='y', line_color=Category10_4[3], line_width=3,\n size=20, fill_alpha=0\n )\n initial_plot.add_glyph(initial_triangle_position, initial_triangle)\n\n # Add button to start moving the triangle\n button = Button(label='Start')\n result_num_steps = result_lines_x.shape[0]\n initial_num_steps = initial_lines_x.shape[0]\n num_steps = max(result_num_steps, initial_num_steps)\n\n # JavaScript callback which will be called once the button is pressed\n callback = CustomJS(args=dict(\n result_triangle_position=result_triangle_position,\n result_lines_x=result_lines_x,\n result_lines_y=result_lines_y,\n result_num_steps=result_num_steps,\n initial_triangle_position=initial_triangle_position,\n initial_lines_x=initial_lines_x,\n initial_lines_y=initial_lines_y,\n initial_num_steps=initial_num_steps,\n num_steps=num_steps\n ),\n code=\"\"\"\n // Animate optimal path plot\n for(let i = 0; i < num_steps; i += 50) {\n setTimeout(function() {\n if (i < result_num_steps) {\n result_triangle_position.data['x'][0] = result_lines_x[i]\n result_triangle_position.data['y'][0] = result_lines_y[i]\n }\n\n if (i < initial_num_steps) {\n initial_triangle_position.data['x'][0] = initial_lines_x[i]\n initial_triangle_position.data['y'][0] = initial_lines_y[i]\n }\n\n result_triangle_position.change.emit()\n initial_triangle_position.change.emit()\n\n }, i)\n }\n \"\"\")\n # Add callback function to button, which starts the whole animation\n button.js_on_click(callback)\n\n # Save the plot\n result_plot = row([result_plot, button])\n plot = column([result_plot, initial_plot])\n output_file(\"visualization.html\", title=\"CNC Path Optimization\")\n save(plot)", "def show_custom_graph(self):\n pass", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def plot(self):\n pass", "def draw_plan(self, end_nodes, colors):\n for agent, nodes in enumerate(end_nodes.values()):\n for node in nodes:\n # self.root.draw_path_from_node(node, color=colors[agent], label='Agent ' + str(agent)) TODO: add back to this\n self.root.draw_path_from_node(self, node, color=colors[agent], agent=agent)", "def plot_nodes(snode, rnode, rseg):\n\n # Prepare plot\n _, ax = plt.subplots()\n plt.title('Best servers by minimal road distance')\n\n # Plot road segments\n line = [[(item['x1'], item['y1']), (item['x2'], item['y2'])]\n for item in rseg]\n c = [cm.get_cmap(COLORMAP)(item['color_num'] / (NUM_COLORS - 1))\n for item in rseg]\n lc = coll.LineCollection(line, colors=c, linewidth=ROAD_WIDTH)\n ax.add_collection(lc)\n\n # Plot road nodes\n x = [item['x'] for item in rnode]\n y = [item['y'] for item in rnode]\n c = [item['color_num'] for item in rnode]\n ax.scatter(x, y, c=c, cmap=COLORMAP, edgecolors='face',\n vmin=0, vmax=(NUM_COLORS - 1), s=ROAD_NODE_SIZE)\n\n # Plot server nodes\n x = [item['x'] for item in snode]\n y = [item['y'] for item in snode]\n c = [item['color_num'] for item in snode]\n ax.scatter(x, y, c=c, cmap=COLORMAP, edgecolors='face',\n vmin=0, vmax=(NUM_COLORS - 1), s=SERVER_SYM_SIZE)\n\n # Display plot and save\n plt.savefig(os.path.join('Output Data', 'Best server.pdf'))\n plt.show()", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def print_path(window, source, dest):\n path = []\n curr_node = dest\n while curr_node.prev:\n path.append(curr_node)\n curr_node = curr_node.prev\n path.append(source)\n path = path[::-1] # reverse the path to display source->dest and not dest->source\n for node in path:\n if not node.is_colored:\n block = get_block_from_node(node)\n block.draw(window, PATH_COLOR)", "def geneticAlgorithmPlot(population, popSize, fittestSize, mutationRate, generations):\n pop = GA.initialPopulation(popSize, population)\n progress = []\n progress.append(1 / GA.rankRoutes(pop)[0][1])\n \n for i in range(0, generations):\n pop = GA.nextGeneration(pop, fittestSize, mutationRate)\n progress.append(1 / GA.rankRoutes(pop)[0][1])\n \n plt.plot(progress)\n plt.ylabel('Distance')\n plt.xlabel('Generation')\n plt.show()", "def plot_analysis(opt):\n LOG.debug(\"Plotting GetLLM analysis.\")\n mdl_analysis = opt.subnode in mdl_subnodes\n\n ps.set_style(\"standard\", MANUAL_STYLE)\n xmin = min(opt.xplot_xmin, opt.yplot_xmin)\n xmax = max(opt.xplot_xmax, opt.yplot_xmax)\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[1, 1])\n ax_x = plt.subplot(gs[0])\n ax_y = None\n ir_pos = None\n\n paths = opt.path.split(',')\n\n if opt.label == 'None':\n if mdl_analysis:\n labels = [\"mo_\" + opt.path.rsplit('/', 1)[-1], \"me_\" + opt.path.rsplit('/', 1)[-1]]\n else:\n labels = paths\n else:\n labels = opt.label.split(',')\n\n for idx, path in enumerate(paths):\n data_x, data_y = get_data(path, opt.mainnode, opt.subnode)\n plot_data(ax_x, data_x, labels, idx, opt.change_marker)\n\n if ir_pos is None:\n ir_pos = get_irpos(data_x, opt.accel)\n\n if data_y is not None:\n if ax_y is None:\n ax_x.axes.get_xaxis().set_visible(False)\n ax_y = plt.subplot(gs[1])\n plot_data(ax_y, data_y, labels, idx, opt.change_marker)\n\n ax_x.set_xlim(xmin, xmax)\n ax_x.set_ylim(opt.xplot_ymin, opt.xplot_ymax)\n set_yaxis_label(ax_x, 'x', opt.subnode)\n\n if ax_y is not None:\n ax_y.set_xlim(xmin, xmax)\n ax_y.set_ylim(opt.yplot_ymin, opt.yplot_ymax)\n set_yaxis_label(ax_y, 'y', opt.subnode)\n ps.set_xaxis_label(ax_y)\n if ir_pos:\n ps.show_ir(ir_pos, ax_y, mode='outside')\n ps.show_ir(ir_pos, ax_x, mode='lines')\n else:\n ax_x.axes.get_xaxis().set_visible(True)\n ps.set_xaxis_label(ax_x)\n if ir_pos:\n ps.show_ir(ir_pos, ax_x, mode='outside')\n\n if int(opt.legendh) > 12:\n show_legend(ax_x, int(opt.legendx), int(opt.legendy))\n return gs", "def plot_sampling(fname, df, of=\"r_neighbor\", show=True):\n xlabel = r\"Neighborhood $r_{c}$\"\n logx = False\n\n if of == \"n_iter\":\n xlabel = \"#Cycles\"\n logx = True\n\n fig, ax = plt.subplots(figsize=(15, 5))\n\n gb = df.groupby([of])\n aggregation = {\"stress\": [np.mean, np.std], \"correlation\": [np.mean, np.std]}\n gb = gb.agg(aggregation)\n\n gb.stress[\"mean\"].plot(yerr=gb.stress[\"std\"], color=\"crimson\", logx=logx)\n\n ax2 = ax.twinx()\n\n gb.correlation[\"mean\"].plot(yerr=gb.correlation[\"std\"],\n color=\"dodgerblue\", logx=logx)\n\n ax.set_xlabel(xlabel, fontsize=20)\n ax.set_ylabel(\"Stress\", fontsize=20)\n ax.set_ylim(0, 0.2)\n\n ax2.set_ylabel(r\"Correlation $\\gamma$\", fontsize=20)\n ax2.set_ylim(0, 1)\n\n plt.savefig(fname, dpi=300, format=\"png\", bbox_inches=\"tight\")\n\n if show:\n plt.show()", "def display_results(start, came_from, goal, time, visited_nodes):\n if time == 0:\n print \"No solution found!\"\n solution = reconstruct_path(came_from, start, goal)\n visualize(solution)\n print 'Solution steps: {0}'.format(', '.join(solution_steps(solution))) + '.'\n print 'Time to find solution of the board: {0}'.format(time) + '.'\n print 'Visited nodes: {0}'.format(visited_nodes) + '.'", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='blue')", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def path_plotter(self, res):\n # define edgepoint of the plot\n x_start = np.min(self.trans_path_x) - self.p * res\n x_end = np.max(self.trans_path_x) + (self.f + self.p) * res\n y_start = np.min(self.trans_path_y) - self.p * res\n y_end = np.max(self.trans_path_y) + (self.f + self.p) * res\n\n # define length of arrays\n x_len = int((x_end - x_start) / res)\n y_len = int((y_end - y_start) / res)\n\n # define x- and y-axis\n self.x = np.arange(x_start, x_end, res)\n self.y = np.arange(y_start, y_end, res)\n\n # define matrix that will be plotted\n self.meas_path = np.ones((y_len, x_len))\n\n # fill the matrix with the measured frames\n for k, frame in enumerate(self.trans_frames):\n start = ((self.trans_frame_start[k][0] - x_start) / res,\n (self.trans_frame_start[k][1] - y_start) / res)\n end = ((self.trans_frame_start[k][0] + (2 * self.p + self.f) * res - x_start) / res,\n (self.trans_frame_start[k][1] + (2 * self.p + self.f) * res - y_start) / res)\n # start = (int((self.trans_path_x[k] - self.p * res - x_start) / res),\n # int((self.trans_path_y[k] - self.p * res - y_start) / res))\n # end = (int((self.trans_path_x[k] + (self.p + self.f) * res - x_start) / res),\n # int((self.trans_path_y[k] + (self.p + self.f) * res - y_start) / res))\n self.meas_path[start[1]:end[1], start[0]:end[0]] = frame\n\n # Plot the path\n fig, ax = plt.subplots(1)\n ax.pcolormesh(self.x, self.y, self.meas_path)\n ax.plot(self.trans_path_x, self.trans_path_y, color='red')\n for k in range(len(self.trans_frames)):\n width = self.f * res\n rect = patches.Rectangle((self.trans_path_x[k], self.trans_path_y[k]), width, width,\n linewidth=1, edgecolor='black', facecolor='none')\n ax.add_patch(rect)\n plt.show()", "def plot_nodes(self, node_list):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 0.0\n points.color.g = 1.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n for node in node_list:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.01\n points.points.append(p1)\n \n self.pub_nodes.publish(points)", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def plot():\n pass", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='green')", "def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return", "def plot_path_statistics( self, path_file=\"tse_ensemble.json\" ):\n from matplotlib import pyplot as plt\n with open(path_file,'r') as infile:\n data = json.load(infile)\n try:\n paths = data[\"transition_paths\"]\n except KeyError:\n paths = [data]\n total_product_indicator = np.zeros(len(paths[0][\"symbols\"]))\n total_reactant_indicator = np.zeros(len(paths[0][\"symbols\"]))\n self.nuc_mc.min_size_product = paths[0][\"min_size_product\"]\n self.nuc_mc.max_size_reactant = paths[0][\"max_size_reactant\"]\n for path in paths:\n product_indicator = []\n reactant_indicator = []\n reactant_indicator, product_indicator = self.get_basin_indicators(path)\n\n total_product_indicator += np.cumsum(product_indicator)/float( len(product_indicator) )\n total_reactant_indicator += np.cumsum(reactant_indicator)/float( len(reactant_indicator) )\n\n total_reactant_indicator /= float( len(paths) )\n total_product_indicator /= float( len(paths) )\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.plot( total_product_indicator, label=\"Product\" )\n ax.plot( total_reactant_indicator, label=\"Reactant\" )\n ax.set_xlabel( \"MC sweeps\" )\n ax.set_ylabel( \"Indicator\" )\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.legend( frameon=False, loc=\"best\" )\n return fig", "def show():\n\tplt.show()", "def show_path_2D(start, end, coordinates, polygons, clear = True):\n global L, N, delta_t\n\n # start interactive mode\n plt.ion()\n\n # crete eempty figure on which data will go and first subplot\n fig = plt.figure()\n\n # get into the correct time step\n for time_step in range(start, end):\n # list of colours used for animation\n colours = cm.rainbow(np.linspace(0, 1, N))\n\n # loop over each particle and colour\n for i in range(N):\n # plot x, y poistion of particle in a given colour and set axis to size of box\n plt.scatter(coordinates[time_step][i][0], coordinates[time_step][i][1], s = 1, color = 'r')\n\n # plot the object\n if i < M:\n polygon = np.array(polygons[time_step][i])\n # get the points of the polygon to plot it\n x, y = polygon.T\n\n # print(x, y)\n\n x = np.append(x, x[0])\n y = np.append(y, y[0])\n\n # print(x, y)\n\n # plot the polygon\n plt.plot(x , y)\n # plt.scatter(polygons_com[time_step][i][0], polygons_com[time_step][i][1], s = 5, color = 'g')\n\n if bound_cond == True:\n plt.axis([0, L, 0, L])\n plt.axis([0, L, 0, L])\n # plt.axis([-L*2, L*2, -L*2, L*2])\n\n # show graph\n plt.show()\n plt.pause(time_pause)\n\n # decide if you want to clear\n if clear == True:\n plt.clf()\n\n return None", "def plot(self):\n\t\tself.plotOfIP().plot()", "def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return", "def path_plot(robot_path, regions, obs):\n\n for robot, path in robot_path.items():\n # prefix path\n if len(path) == 1:\n continue\n x_pre = np.asarray([point[0] + 0.5 for point in path])\n y_pre = np.asarray([point[1] + 0.5 for point in path])\n plt.quiver(x_pre[:-1], y_pre[:-1], x_pre[1:] - x_pre[:-1], y_pre[1:] - y_pre[:-1],\n color=\"#\" + ''.join([random.choice('0123456789ABCDEF') for j in range(6)]),\n scale_units='xy', angles='xy', scale=1, label='prefix path')\n\n plt.savefig('img/path.png', bbox_inches='tight', dpi=600)", "def a_star(self, mapdata, start, goal):\n\n print \"Inside A star\"\n rospy.loginfo(\"Generate path from (%d,%d) to (%d,%d)\" % (start[0], start[1], goal[0], goal[1]))\n if not PathPlanner.is_cell_walkable(mapdata, goal[0], goal[1]):\n rospy.logerr(\"not walkable goal\")\n return[]\n #calculated from goal\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n\n while not frontier.empty():\n frontier_msg = GridCells()\n frontier_cells = []\n for e in frontier.elements:\n frontier_cells.append(PathPlanner.grid_to_world(mapdata, e[1][0], e[1][1]))\n frontier_msg.header = mapdata.header\n frontier_msg.header.stamp = rospy.get_rostime()\n frontier_msg.cell_width = mapdata.info.resolution\n frontier_msg.cell_height = mapdata.info.resolution\n frontier_msg.cells = frontier_cells\n expanded_msg = GridCells()\n expanded_cells = []\n for e in cost_so_far: \n expanded_cells.append(PathPlanner.grid_to_world(mapdata, e[0], e[1]))\n \n expanded_msg.header = mapdata.header\n expanded_msg.header.stamp = rospy.get_rostime()\n expanded_msg.cell_width = mapdata.info.resolution\n expanded_msg.cell_height = mapdata.info.resolution\n expanded_msg.cells = expanded_cells\n self.expanded_pub.publish(expanded_msg)\n rospy.sleep(0.01)\n\n current = frontier.get()\n\n #creates path\n if current == goal:\n entry = goal\n listOfCoord = []\n while entry != None:\n listOfCoord.append(entry)\n entry = came_from[entry]\n listOfCoord.reverse()\n self.expanded_pub.publish(PathPlanner.createGridcells(mapdata, listOfCoord))\n return listOfCoord\n \n for next in PathPlanner.neighbors_of_8(mapdata, current[0], current[1]):\n new_cost = cost_so_far[current] + 1 #assume cost to move each unit is 1\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost + PathPlanner.euclidean_distance(next[0], next[1], goal[0], goal[1])\n frontier.put(next, priority)\n came_from[next] = current\n\n \n return[]", "def a_star_planning(start_x, start_y, goal_x, goal_y, id):\n # extract the index of start node, goal node and obstacles\n start = Point(round(start_x/grid_size), round(start_y/grid_size), 0.0, -1, [0,0,0])\n goal = Point(round(goal_x/grid_size), round(goal_y/grid_size), 0.0, -1, [0,0,0])\n if not_legal(goal, id):\n print ('not a legal goal')\n return False\n \n # time.sleep(10)\n\n # create the open list and close list to store nodes\n openset, closeset = deque(), deque()\n openset.append(start)\n\n while True:\n # find out the min f node to explore\n\n current_node = min(openset,\n key=lambda node: node.g + calculate_heuristic(node,goal))\n\n # pltplt.plot(current_node.x, current_node.y, \"b*\")\n if len(closeset) % 10 == 0:\n plt.pause(0.001)\n\n if current_node.x == goal.x and current_node.y == goal.y:\n print(\"Congratulations! You have found the goal!\")\n goal.parent = current_node\n break\n\n # Remove it from the open list\n openset.remove(current_node)\n # Add it to the close list\n closeset.append(current_node)\n\n # Explore the neighbour\n for motion in motions:\n if motion == current_node.parent_motion:\n turn_cost = 0\n elif (motion[0] == -1 * current_node.parent_motion[0]) and (motion[1] == -1 * current_node.parent_motion[1]):\n turn_cost = 1.5\n else:\n turn_cost = 1\n\n node = Point(current_node.x + motion[0],\n current_node.y + motion[1],\n current_node.g + motion[2] + turn_cost,\n current_node,\n motion,\n )\n\n # ignore it if it is in the close list\n flag = False\n for item in closeset:\n if item.x == node.x and item.y == node.y:\n flag = True\n break\n if flag:\n continue\n # ignore it if it is obstacle\n\n if not_legal(node, id):\n continue\n # update its parent if it is the open list\n flag = True\n for item in openset:\n if item.x == node.x and item.y == node.y:\n flag = False\n # if closer, update the parent\n if node.g <= item.g:\n item.g = node.g\n item.parent = node.parent\n item.parent_motion = node.parent_motion\n break\n # add to the open list if it is not in the open list\n if flag:\n openset.append(node)\n\n # generate the final path\n while True:\n route = deque()\n route.append(goal)\n plt.plot(goal.x, goal.y, \"rx\")\n if goal.parent == -1:\n break\n else:\n goal = goal.parent\n route.appendleft(goal)\n # return route\n # return False\n if NEED_DRAW:\n # draw map\n for i in range(map.gridwidth):\n for j in range(map.gridheight):\n if map.grid[1,i,j] >0:\n plt.plot(i, j, \"xc\")\n\n plt.plot(start.x, start.y, \"ro\")\n plt.plot(goal.x, goal.y, \"go\")\n\n for goal in route:\n plt.plot(goal.x, goal.y, \"rx\")\n plt.show()", "def showGraph(G, mate, label=\"\"):\r\n \r\n # Set the positions for all nodes and the figure size\r\n plt.close('all')\r\n plt.figure( figsize=(10, 10) )\r\n pos = nx.graphviz_layout(G, prog='sfdp', args='')\r\n \r\n # Draw the graph with node labels and a title\r\n plt.title(label)\r\n nx.draw(G, pos, node_size=400, with_labels=True)\r\n \r\n # Draw the matched edges\r\n nx.draw_networkx_edges(G, pos, edgelist=mate.items(),\r\n width=5, alpha=0.4, edge_color='b')\r\n \r\n plt.axis('off')\r\n plt.show()", "def plot(self): \n\t\txandy = sep_xy(self.start, self.end)\n\t\tplt.plot(xandy[0], xandy[1], 'k-', lw=1, color='red')", "def show_grid(grid, edges, path1, path2):\n plt.imshow(grid, origin='lower', cmap='Greys')\n\n if edges is not None:\n for e in edges:\n p1 = e[0]\n p2 = e[1]\n plt.plot([p1[1], p2[1]], [p1[0], p2[0]], 'b-')\n\n # Stepping through each edge\n if path1 is not None:\n p1 = path1[0]\n for p in path1[1:]:\n p2 = p\n plt.plot([p1[1], p2[1]], [p1[0], p2[0]], 'ro')\n p1 = p2\n\n if path2 is not None:\n p1 = path2[0]\n for p in path2[1:]:\n p2 = p\n plt.plot([p1[1], p2[1]], [p1[0], p2[0]], 'g-', linewidth=3)\n p1 = p2\n\n plt.plot(start[1], start[0], 'rx')\n plt.plot(goal[1], goal[0], 'rx')\n plt.grid()\n plt.xlabel('EAST')\n plt.ylabel('NORTH')\n plt.xticks([x for x in range(0, 1000, 50)])\n plt.yticks([y for y in range(0, 1000, 50)])\n plt.show()", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def showpath(self, source, target):\n\n raise NotImplementedError", "def sample_and_plot(S0, K, B, T, N, u, d, q, M, barrier_type):\n paths = sample_paths(S0, N, u, d, q, M)\n p_valid, p_invalid, p_counts = split_paths(paths, B, K, \n barrier_type, option)\n\n times = np.linspace(0, T, N+1)\n\n fig = plt.figure(figsize=(10,7))\n ax1 = plt.subplot2grid((1,1),(0,0))\n ax1.set_ylabel('Stock price (log-scale)')\n ax1.set_xlabel('time')\n for path in p_invalid:\n ax1.plot(times, path, c='lightcoral')\n for path in p_valid:\n ax1.plot(times, path, c='grey')\n for path in p_counts:\n ax1.plot(times, path, c='blue')\n \n custom_lines = [Line2D([0], [0], c='lightcoral', lw=2),\n Line2D([0], [0], c='grey', lw=2),\n Line2D([0], [0], c='blue', lw=2), \n Line2D([0], [0], c='red', ls=':', lw=2), \n Line2D([0], [0], c='navy', ls=':', lw=2)]\n \n ax1.axhline(y=K, lw=4, c = 'navy', ls = ':', label = 'Strike Price')\n ax1.axhline(y=B, lw=4, c = 'red', ls = ':', label = 'Barrier')\n \n plt.yscale('log') \n ax1.legend(custom_lines, ['invalid (barrier)', 'invalid (option)', 'valid', \n 'barrier', 'strike price'])\n #plt.savefig('up-and-out_call.png', transparent=True)\n plt.show()", "def plot(arrivals_file, region): # pragma: no cover\n region = [float(s) for s in region.split()]\n reg = Region(*region)\n\n arrivals = pd.read_csv(arrivals_file, header=None, names=column_names,\n sep=' ')\n arr_file_base = os.path.splitext(arrivals_file.name)[0]\n # import IPython; IPython.embed(); import sys; sys.exit()\n source = _source_or_stations_in_region(\n arrivals, reg, SOURCE_LATITUDE, SOURCE_LONGITUDE,\n 'sources_in_region_{}.png'.format(arr_file_base))\n\n station = _source_or_stations_in_region(\n arrivals, reg, STATION_LATITUDE, STATION_LONGITUDE,\n 'stations_in_region_{}.png'.format(arr_file_base))\n\n # sources and stations both in region\n sources_and_stations = arrivals[source & station]\n\n fig = plt.figure()\n\n _plot_on_map(sources_and_stations,\n SOURCE_LONGITUDE, SOURCE_LATITUDE,\n marker='*', color='r')\n _plot_on_map(sources_and_stations,\n STATION_LONGITUDE, STATION_LATITUDE,\n marker='^', color='b')\n\n plt.title('Sources and stations in \\n region {}'.format(region))\n # plt.xlabel('Longitude')\n # plt.ylabel('Latitude')\n fig.savefig('sources_and_stations_in_region_{}.png'.format(arr_file_base))\n\n # rays originating and terminating in region\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n for i, arr in enumerate(sources_and_stations.iterrows()):\n dat = arr[1]\n ax.add_line(Line2D([dat[SOURCE_LONGITUDE], dat[STATION_LONGITUDE]],\n [dat[SOURCE_LATITUDE], dat[STATION_LATITUDE]],\n color='b', zorder=i))\n ANZ.drawcoastlines(linewidth=2.0, color='k',\n zorder=sources_and_stations.shape[0]+1)\n\n # ax.set_xlim(reg.leftlon - 5, reg.rightlon + 5)\n # ax.set_ylim(reg.bottomlat - 5, reg.upperlat + 5)\n _draw_paras_merids(ANZ)\n plt.title('Ray paths in \\n region {}'.format(region))\n # plt.xlabel('Longitude')\n # plt.ylabel('Latitude')\n fig.savefig('rays_in_region_{}.png'.format(arr_file_base))", "def _plot_map(self):\n\n # Plot points if they exist\n\n if len(self._laserX) > 0:\n self._plot_laser()\n\n if len(self._goalX) > 0:\n self._plot_goal()\n\n if len(self._summitX) > 0:\n self._plot_summit()\n\n self._plot_objects()\n\n # Update Plot\n self._fig.canvas.draw_idle()\n\n plt.pause(0.01)", "def plot_edges(self, node_list):\n tree = MarkerArray()\n id = 1\n for node in self.node_list:\n if node.parent:\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = id\n id += 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n\n path.color.r = 1.0\n path.color.g = 0.7\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n p1 = Point()\n p1.x = node.parent.x\n p1.y = node.parent.y\n p1.z = 0.02\n path.points.append(p1)\n\n p2 = Point()\n p2.x = node.x\n p2.y = node.y\n p2.z = 0.02\n path.points.append(p2)\n \n tree.markers.append(path)\n\n self.pub_edges.publish(tree)", "def draw_nodes(self):\n pass", "def plot_graphy_resilience_targeted():\n \n global counter\n counter += 1\n random_graph = make_random_undirected_graph(1239, 0.004)\n attack_order = fast_targeted_order(random_graph)\n random_resilience = compute_resilience(random_graph, attack_order)\n plt.plot(range(len(random_resilience)), random_resilience, '-b', label= 'random, p =0.004')\n \n synthetic_undirected_graph = make_synthetic_undirected_graph(1239, 5)\n attack_order = fast_targeted_order(synthetic_undirected_graph)\n synthetic_resilience = compute_resilience(synthetic_undirected_graph, attack_order)\n plt.plot(range(len(synthetic_resilience)), synthetic_resilience, '-r', label = 'UPA, m = 5')\n\n network_graph = load_graph(NETWORK_URL)\n attack_order = fast_targeted_order(network_graph)\n network_resilience = compute_resilience(network_graph, attack_order)\n plt.plot(range(len(network_resilience)), network_resilience, '-g', label = 'Network')\n \n plt.legend(loc='upper right')\n \n plt.title(\" plot of graph resilience\")\n plt.xlabel(\"number of nodes removed\")\n plt.ylabel(\"the size of the largest connect component \")\n plt.savefig(\"graph_resilience_\"+str(counter)+\".png\", dpi = 72)\n plt.gcf().clear() # hose-keeping", "def showPlot2():\n raise NotImplementedError", "def plot_scenario(self, ax):\n ax.set_xlim((0,10))\n ax.set_ylim((0,10))\n\n # Unpack region's sizes and positions\n obs_x = self.obstacle_vert[0]\n obs_y = self.obstacle_vert[2]\n obs_w = self.obstacle_vert[1]-obs_x\n obs_h = self.obstacle_vert[3]-obs_y\n\n goal_x = self.goal_vert[0]\n goal_y = self.goal_vert[2]\n goal_w = self.goal_vert[1]-goal_x\n goal_h = self.goal_vert[3]-goal_y\n\n target1_x = self.target1_vert[0]\n target1_y = self.target1_vert[2]\n target1_w = self.target1_vert[1]-target1_x\n target1_h = self.target1_vert[3]-target1_y\n\n target2_x = self.target2_vert[0]\n target2_y = self.target2_vert[2]\n target2_w = self.target2_vert[1]-target2_x\n target2_h = self.target2_vert[3]-target2_y\n\n obstacle = Rectangle((obs_x,obs_y),obs_w,obs_h,color='red',alpha=0.5)\n goal = Rectangle((goal_x,goal_y),goal_w,goal_h, color='green',alpha=0.5)\n\n target1 = Rectangle((target1_x,target1_y),target1_w,target1_h, color='blue',alpha=0.5)\n target2 = Rectangle((target2_x,target2_y),target2_w,target2_h, color='blue',alpha=0.5)\n\n ax.add_patch(obstacle)\n ax.add_patch(goal)\n ax.add_patch(target1)\n ax.add_patch(target2)", "def plot(self):\n return self.graph(edge_labels='words_in_out').plot()", "def __init__(self,\n start,\n goal,\n expand_dis=3.0,\n goal_sample_rate=5,\n ):\n self.start = self.Node(start[0], start[1])\n self.end = self.Node(goal[0], goal[1])\n self.expand_dis = expand_dis\n self.goal_sample_rate = goal_sample_rate\n \n self.stepLength = 50\n self.path_resolution = 1\n self.obstaclelist = []\n self.node_list = []", "def test_path(self, x_path, y_path):\n\t\tplt.plot(x_path, y_path, 'bo')\n\t\tplt.plot(x_path, y_path, 'b-')\n\t\tplt.show()", "def show_graphs ():\n plt.ylim = (0, 300)\n plt.xlim = (0, 300)\n #Set up lidar plot to figure 1\n lidar_plot = plt.figure (1)\n #Assign title\n plt.title ('Lidar data')\n #Assign data\n plt.imshow (lidar_clean)\n #Set up radar plot to figure 2\n radar_plot = plt.figure (2)\n #Assign title\n plt.title ('Radar data')\n #Assign data\n plt.imshow (radar_clean)\n #Show plots\n plt.show ()", "def show_plot(self):\n runs = self.GetParent().runs\n if len(runs) <= 0: return\n\n t1 = time.time()\n total_width = self.GetParent().total_width\n\n newwidth = total_width * (self.GetParent().zoom / 100)\n newmid = total_width * (self.GetParent().pan/100)\n newxmin = newmid - (newwidth/2)\n newxmax = newxmin + newwidth\n\n if newxmin < 0:\n newxmin = 0\n newxmax = newwidth\n elif newxmax > total_width:\n newxmax = total_width\n newxmin = newxmax - newwidth\n\n assert newxmin >= 0 and newxmin <= total_width\n\n #print \"**** Zoom: %s, pan: %s, total_width: %s, newwidth: %s, newmid: %s, newxmin: %s, newxmax: %s\" \\\n # %(self.GetParent().zoom,self.GetParent().pan,total_width,newwidth,newmid,newxmin,newxmax)\n\n left = 0\n width_so_far = 0\n self.figure.clear()\n braggsmax = max(flex.max(r.culled_braggs) for r in runs)\n braggsmin = min(flex.min(r.culled_braggs) for r in runs)\n distsmax = max(flex.max(r.culled_distances) for r in runs)\n distsmin = min(flex.min(r.culled_distances) for r in runs)\n sifomax = max(flex.max(r.culled_sifoils) for r in runs)\n sifomin = min(flex.min(r.culled_sifoils) for r in runs)\n wavemax = max(flex.max(r.culled_wavelengths) for r in runs)\n wavemin = min(flex.min(r.culled_wavelengths) for r in runs)\n\n #above tricks don't work for hit rates as they can be empty if the run is new\n goodruns = []\n for run in runs:\n if len(run.hit_rates) > 0: goodruns.append(run)\n if len(goodruns) > 0:\n hitsmax = max(flex.max(r.hit_rates) for r in goodruns)\n hitsmin = min(flex.min(r.hit_rates) for r in goodruns)\n else:\n hitsmax = hitsmin = 0\n\n first_run = True\n for run in runs:\n right = left + run.width()\n\n if right < newxmin or left > newxmax:\n left += run.width()\n #print \"Not showing run %s\"%run.runId\n continue\n\n if left < newxmin:\n xmin = run.min() + (newxmin - left)\n else:\n xmin = run.min()\n\n if right > newxmax:\n xmax = run.min() + (newxmax - left)\n else:\n xmax = run.max()\n\n #print \"Run: %s, run.width(): %s, left: %s, right: %s, run.min(): %s, run.max(): %s, xmin: %s, xmax: %s, width_so_far: %s, xmax-xmin: %s\" \\\n #%(run.runId,run.width(),left,right,run.min(),run.max(),xmin,xmax,width_so_far,xmax-xmin)\n\n ax1 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.05, 0.9*(xmax-xmin)/newwidth, 0.4])\n ax2 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.45, 0.9*(xmax-xmin)/newwidth, 0.2], sharex=ax1)\n ax3 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.65, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax4 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.75, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax5 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.85, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n left += run.width()\n width_so_far += (xmax-xmin)\n\n ax1.grid(True, color=\"0.75\")\n ax2.grid(True, color=\"0.75\")\n ax3.grid(True, color=\"0.75\")\n ax4.grid(True, color=\"0.75\")\n ax5.grid(True, color=\"0.75\")\n ax1.plot(run.culled_bragg_times.select(run.culled_indexed),\n run.culled_braggs.select(run.culled_indexed), 'd', color=[0.0,1.0,0.0])\n ax1.plot(run.culled_bragg_times.select(~run.culled_indexed),\n run.culled_braggs.select(~run.culled_indexed), 'd', color=[0.0,0.5,1.0])\n ax2.plot(run.hit_rates_times, run.hit_rates, 'o-', color=[0.0,1.0,0.0])\n ax3.plot(run.culled_bragg_times, run.culled_wavelengths, '^', color=[0.8,0.0,0.2])\n ax4.plot(run.culled_bragg_times, run.culled_sifoils, '<', color=[0.8,0.0,0.2])\n ax5.plot(run.culled_bragg_times, run.culled_distances, '>', color=[0.8,0.0,0.2])\n ax1.set_ylabel(\"# of Bragg spots\")\n ax2.set_ylabel(\"Hit rate (%)\")\n ax3.set_ylabel(\"WaveL\")\n ax4.set_ylabel(\"SiFoils(mm)\")\n ax5.set_ylabel(\"Dist (mm)\")\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(braggsmin, braggsmax)\n ax2.set_ylim(hitsmin, hitsmax)\n ax3.set_ylim(wavemin, wavemax)\n ax4.set_ylim(sifomin-10, sifomax+10)\n ax5.set_ylim(distsmin-3, distsmax+3)\n ax1.set_xlabel(\"Time\")\n for ax in ax1, ax2, ax3, ax4, ax5:\n if (ax is not ax1) :\n for label in ax.get_xticklabels():\n label.set_visible(False)\n ax.get_yticklabels()[0].set_visible(False)\n if not first_run:\n ax.get_yaxis().set_visible(False)\n\n ax1.xaxis.set_major_formatter(ticker.FuncFormatter(status_plot.format_time))\n ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.3f\"))\n ax5.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.0f\"))\n ax5.set_title(\"%d:%d/%d:%.1f%% I:%d\"%(run.runId, run.hits_count, len(run.braggs), 100*run.hits_count/len(run.braggs),run.indexed.count(True)))\n\n labels = ax1.get_xticklabels()\n for label in labels:\n label.set_rotation(30)\n\n first_run = False\n\n self.figure.autofmt_xdate()\n self.canvas.draw()\n self.parent.Refresh()\n\n t2 = time.time()\n print(\"Plotted in %.2fs\" % (t2 - t1))", "def plot(self):\n\t\tself.plotOfSpect()", "def plot(self):\n layout = self.graph.layout(\"kk\")\n bbox = igraph.BoundingBox(600, 600)\n figure = igraph.Plot(bbox=bbox, background=\"white\")\n bbox = bbox.contract(100)\n figure.add(self.graph, layout = layout, bbox=bbox)\n figure.show()", "def __init__(self, outdir, data_key=rewards_key, line_color='blue'):\n self.outdir = outdir\n self.data_key = data_key\n self.line_color = line_color\n\n #styling options\n matplotlib.rcParams['toolbar'] = 'None'\n plt.style.use('ggplot')\n plt.xlabel(\"Episodes\")\n plt.ylabel(data_key)\n fig = plt.gcf().canvas.set_window_title('simulation_graph')", "def show(self):\n plt.show()", "def plot_blocked_nodes(self, node_list, visited_set):\n points = Marker()\n #visualizations points and lines..\n points.header.frame_id = \"map\"\n points.header.stamp = rospy.get_rostime()\n points.ns = \"markers\"\n points.id = 0\n points.type = points.POINTS\n points.action = points.ADD\n points.pose.orientation.w = 1.0\n points.scale.x = 2*self.rviz_tuning_plt\n points.scale.y = 2*self.rviz_tuning_plt\n points.color.r = 1.0\n points.color.g = 0.0\n points.color.b = 0.0\n points.color.a = 1.0\n points.lifetime = rospy.Duration()\n\n # Nodes blocked by obstacles\n for node in node_list:\n if node.cost == float('Inf'):\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.011\n points.points.append(p1)\n\n # Nodes blocked by planner\n for node in visited_set:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.03\n points.points.append(p1)\n \n self.pub_blocked_nodes.publish(points)", "def show():\n setup()\n plt.show()", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()", "def sample_and_plot(S0, K, B, T, N, u, d, q, M, barrier_type):\n paths = sample_paths(S0, N, u, d, q, M)\n p_valid, p_invalid, p_counts = split_paths(paths, B, K,\n barrier_type, option)\n\n times = np.linspace(0, T, N + 1)\n\n plt.figure(figsize=(10, 7))\n ax1 = plt.subplot2grid((1, 1), (0, 0))\n ax1.set_ylabel('Stock price (log-scale)')\n ax1.set_xlabel('time')\n for path in p_invalid:\n ax1.plot(times, path, c='lightcoral')\n for path in p_valid:\n ax1.plot(times, path, c='grey')\n for path in p_counts:\n ax1.plot(times, path, c='blue')\n\n custom_lines = [Line2D([0], [0], c='lightcoral', lw=2),\n Line2D([0], [0], c='grey', lw=2),\n Line2D([0], [0], c='blue', lw=2),\n Line2D([0], [0], c='red', ls=':', lw=2),\n Line2D([0], [0], c='navy', ls=':', lw=2)]\n\n ax1.axhline(y=K, lw=4, c='navy', ls=':', label='Strike Price')\n ax1.axhline(y=B, lw=4, c='red', ls=':', label='Barrier')\n\n plt.yscale('log')\n ax1.legend(custom_lines, ['invalid (barrier)', 'invalid (option)', 'valid',\n 'barrier', 'strike price'])\n # plt.savefig('up-and-out_call.png', transparent=True)\n plt.show()", "def __init__(self, my_graph: VehicleRoutingProblemGraph, path_queue: MPQueue):\r\n self.my_graph = my_graph\r\n self.nodes = my_graph.graph_nodes\r\n self.warehouse_index = my_graph.warehouse_index\r\n self.figure = plt.figure(figsize=(10, 10))\r\n self.figure_ax = self.figure.add_subplot(1, 1, 1)\r\n self.path_queue = path_queue\r\n self.warehouse_color = 'darkblue'\r\n self._customer_color = 'crimson'\r\n self._line_color = 'darksalmon'\r\n self.line_color_list = ['lime', 'gold', \r\n 'deepskyblue', 'orangered', 'magenta', 'blueviolet', \r\n 'royalblue', 'lawngreen', 'indigo', 'deeppink',\r\n 'darkturquoise', 'springgreen', 'aquamarine', 'darkorange',\r\n 'mediumslateblue', 'aqua']", "def show_dag(self, expand=set()):\n from matplotlib.pyplot import show as pltshow\n\n G = self.make_dag(expand=expand)\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n ## Plotting\n edge_labels = dict(\n [((u, v,), d[\"label\"]) for u, v, d in G.edges(data=True)]\n )\n n = G.size()\n\n ## Manual layout\n # if n == 2:\n if False:\n pos = {\n \"(var)\": [-0.5, +0.5],\n \"(out)\": [+0.5, -0.5],\n }\n pos[self.functions[0].name] = [+0.5, +0.5]\n ## Optimized layout\n else:\n try:\n ## Planar, if possible\n pos = nx.planar_layout(G)\n except nx.NetworkXException:\n ## Scaled spring layout\n pos = nx.spring_layout(\n G,\n k=0.6 * n,\n pos={\n \"(Inputs)\": [-0.5 * n, +0.5 * n],\n \"(Outputs)\": [+0.5 * n, -0.5 * n],\n },\n fixed=[\"(var)\", \"(out)\"],\n threshold=1e-6,\n iterations=100,\n )\n\n # Generate colormap\n color_map = []\n for node in G:\n if G.nodes[node][\"parent\"] == self.name:\n color_map.append(\"blue\")\n else:\n color_map.append(\"green\")\n\n # Draw\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n nx.draw(G, pos, node_size=1000, with_labels=True, node_color=color_map)\n pltshow()", "def showGraph(self):\r\n self.graph_button['state'] = 'disabled'\r\n # Draw connection Graph\r\n self.axGraph.set_visible(True)\r\n nx.draw(self.G, ax=self.axGraph, with_labels=True)\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def plot_ratios(path='/Volumes/OptiHDD/data/pylith/3d/agu2014/output',\n\t\t\t\tsteps=['step01','step02'],\n\t\t\t\t#labels='',\n\t\t\t\tshow=True,\n\t\t\t\txscale=1e3,\n\t\t\t\tyscale=1e-2):\n\tplt.figure()\n\t#path = '/Users/scott/Desktop/elastic'\n\n\t# Deep source\n\t#labels = ['no APMB', 'APMB']\n\t#if labels == '':\n\tlabels = steps\n\tdeep = {}\n\t#uzmax = 0.824873455364\n\t# NOT sure why hardcoded...\n\tuzmax = 1\n\tfor i,outdir in enumerate(steps):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\t\tprint(pointsFile)\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t\t#normalize\n\t\tuz_fem = uz_fem / uzmax\n\t\tur_fem = ur_fem / uzmax\n\t\tx_fem = x_fem / 30.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'o-',ms=4,lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'o--',ms=4,lw=4,color=l.get_color()) #mfc='none' transparent\n\t\tdeep[outdir] = uz_fem/uz_fem\n\n\t'''\n\t# Shallow Source\n\tshallow = {}\n\tuzmax = 0.949652827795\n\tfor i,outdir in enumerate(['step11','step12']):\n\t\tpointsFile = os.path.join(path, outdir, 'points.h5')\n\n\t\tx,y,z,ux,uy,uz = pu.extract_points(pointsFile)\n\n\t\tX = x / xscale\n\t\tY1 = ux / yscale\n\n\t\tx_fem = X #/ xscale #double scaling!\n\t\tur_fem = Y1 #/ yscale\n\t\tuz_fem = uz / yscale\n\n\t\t#print(pointsFile)\n\t\tprint(ur_fem.min(), ur_fem.max(), uz_fem.min(), uz_fem.max(), uz_fem.max() / ur_fem.max())\n\n\t#normalize\n\tuz_fem = uz_fem / uzmax\n\tur_fem = ur_fem / uzmax\n\tx_fem = x_fem / 20.0\n\n\t\tl, = plt.plot(x_fem,uz_fem,'.-', mfc='w', lw=4,label=labels[i])\n\t\tplt.plot(x_fem,ur_fem,'.--',lw=4, mfc='w',color=l.get_color()) #mfc='none' transparent\n\n\t\tshallow[outdir] = uz_fem/ur_fem\n\t'''\n\n\t# Annotate\n\tplt.axhline(color='k',lw=0.5)\n\t#plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\t#plt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.legend()\n\tplt.grid()\n\t#plt.ylim(-0.5, 3.5)\n\t#plt.savefig('deep.png',bbox_inches='tight')\n\t#plt.savefig('shallow.png',bbox_inches='tight')\n\n\t# normalized\n\tplt.ylim(-0.5, 4)\n\tplt.xlim(0,10)\n\tplt.xlabel('Normalized Radial Distance [R / D]')\n\tplt.ylabel('Normalized Displacement [U / Uz_max]')\n\t#plt.savefig('normalized_deep.png',bbox_inches='tight')\n\tplt.savefig('normalized_shallow.png',bbox_inches='tight')\n\n\n\t# Plot ratios of uz versus NOTE: this plot is confusing,,, just keep ratio of uz_max to ur_max\n\t'''\n\tplt.figure()\n\tplt.plot(x_fem, deep['step01'], label='Deep no APMB')\n\tplt.plot(x_fem, deep['step02'], label='Deep w/ APMB')\n\tplt.plot(x_fem, shallow['step11'], label='Shallow no APMB')\n\tplt.plot(x_fem, shallow['step12'], label='Shallow w/ APMB')\n\tplt.xlabel('Distance [km]') #NOTE: maybe plot normailzed X-axis (R-d)\n\t#plt.xlabel('Normalized Distance [R/d]')\n\tplt.ylabel('Ratio [Uz/Ur]')\n\tplt.title('Ratio of vertical to radial displacement')\n\tplt.legend()\n\tplt.show()\n\t'''", "def plot(self, routePoints=None):\n return plot(routePoints, self.profiles)", "def show_graphs(self):\n self.frequency_plot_graph.show()\n self.resistance_graph.show()\n self.temperature_plot_graph.show()\n self.pressure_plot_graph.show()\n self.humidity_plot_graph.show()\n self.overview_graph.show()\n self.overview_graph.setXRange(-1000, 5000)", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def draw_tree(self, agent, color='b'):\n for edge in self.all_edges[agent]:\n parent, child = edge\n for cords in self.xy_cords:\n plt.plot([parent.state[cords[0]], child.state[cords[0]]],\n [parent.state[cords[1]], child.state[cords[1]]], c=color)\n plt.xlim(self.Xi[0])\n plt.ylim(self.Xi[1])\n plt.show()", "def planning(self, sx, sy, gx, gy):\n\n start_node = self.Node(self.calc_xy_index(sx, self.min_x),\n self.calc_xy_index(sy, self.min_y), 0.0, -1)\n goal_node = self.Node(self.calc_xy_index(gx, self.min_x),\n self.calc_xy_index(gy, self.min_y), 0.0, -1)\n\n open_set_A, closed_set_A = dict(), dict()\n open_set_B, closed_set_B = dict(), dict()\n open_set_A[self.calc_grid_index(start_node)] = start_node\n open_set_B[self.calc_grid_index(goal_node)] = goal_node\n\n current_A = start_node\n current_B = goal_node\n meet_point_A, meet_point_B = None, None\n\n while 1:\n if len(open_set_A) == 0:\n print(\"Open set A is empty..\")\n break\n\n if len(open_set_B) == 0:\n print(\"Open set B is empty..\")\n break\n\n c_id_A = min(\n open_set_A,\n key=lambda o: self.find_total_cost(open_set_A, o, current_B))\n\n current_A = open_set_A[c_id_A]\n\n c_id_B = min(\n open_set_B,\n key=lambda o: self.find_total_cost(open_set_B, o, current_A))\n\n current_B = open_set_B[c_id_B]\n\n # show graph\n if show_animation: # pragma: no cover\n plt.plot(self.calc_grid_position(current_A.x, self.min_x),\n self.calc_grid_position(current_A.y, self.min_y),\n \"xc\")\n plt.plot(self.calc_grid_position(current_B.x, self.min_x),\n self.calc_grid_position(current_B.y, self.min_y),\n \"xc\")\n # for stopping simulation with the esc key.\n plt.gcf().canvas.mpl_connect(\n 'key_release_event',\n lambda event: [exit(0) if event.key == 'escape' else None])\n if len(closed_set_A.keys()) % 10 == 0:\n plt.pause(0.001)\n\n if current_A.x == current_B.x and current_A.y == current_B.y:\n print(\"Found goal\")\n meet_point_A = current_A\n meet_point_B = current_B\n break\n\n # Remove the item from the open set\n del open_set_A[c_id_A]\n del open_set_B[c_id_B]\n\n # Add it to the closed set\n closed_set_A[c_id_A] = current_A\n closed_set_B[c_id_B] = current_B\n\n # expand_grid search grid based on motion model\n for i, _ in enumerate(self.motion):\n\n c_nodes = [self.Node(current_A.x + self.motion[i][0],\n current_A.y + self.motion[i][1],\n current_A.cost + self.motion[i][2],\n c_id_A),\n self.Node(current_B.x + self.motion[i][0],\n current_B.y + self.motion[i][1],\n current_B.cost + self.motion[i][2],\n c_id_B)]\n\n n_ids = [self.calc_grid_index(c_nodes[0]),\n self.calc_grid_index(c_nodes[1])]\n\n # If the node is not safe, do nothing\n continue_ = self.check_nodes_and_sets(c_nodes, closed_set_A,\n closed_set_B, n_ids)\n\n if not continue_[0]:\n if n_ids[0] not in open_set_A:\n # discovered a new node\n open_set_A[n_ids[0]] = c_nodes[0]\n else:\n if open_set_A[n_ids[0]].cost > c_nodes[0].cost:\n # This path is the best until now. record it\n open_set_A[n_ids[0]] = c_nodes[0]\n\n if not continue_[1]:\n if n_ids[1] not in open_set_B:\n # discovered a new node\n open_set_B[n_ids[1]] = c_nodes[1]\n else:\n if open_set_B[n_ids[1]].cost > c_nodes[1].cost:\n # This path is the best until now. record it\n open_set_B[n_ids[1]] = c_nodes[1]\n\n rx, ry = self.calc_final_bidirectional_path(\n meet_point_A, meet_point_B, closed_set_A, closed_set_B)\n\n return rx, ry", "def plot_scenario(self, ax):\n ax.set_xlim((0,12))\n ax.set_ylim((0,12))\n\n # Unpack region's sizes and positions\n obs_x = self.obstacle_vert[0]\n obs_y = self.obstacle_vert[2]\n obs_w = self.obstacle_vert[1]-obs_x\n obs_h = self.obstacle_vert[3]-obs_y\n\n tar_x = self.goal_vert[0]\n tar_y = self.goal_vert[2]\n tar_w = self.goal_vert[1]-tar_x\n tar_h = self.goal_vert[3]-tar_y\n\n obstacle = Rectangle((obs_x,obs_y),obs_w,obs_h,color='red',alpha=0.5)\n target = Rectangle((tar_x,tar_y),tar_w,tar_h, color='green',alpha=0.5)\n\n ax.add_patch(obstacle)\n ax.add_patch(target)", "def plot_graph(self, graphinfo):\n\n WIDTH = 450\n HEIGHT = WIDTH * 0.55\n opts = []\n\n # Generate outfile name\n if not self.rrdfile:\n self.outfiles[graphinfo.name] = self.SKIPPED\n return\n\n logging.info(\"Plotting %s graph for %s\" % (graphinfo.name, self.node))\n self.outfiles[graphinfo.name] = \"%s/%s_%s_%s.png\" % (self.topdir,\n self.file_prefix,\n self.node,\n graphinfo.name)\n opts = opts + [self.outfiles[graphinfo.name]]\n\n # Generate general image options\n opts = opts + [\"--width\", str(WIDTH),\n \"--height\", str(HEIGHT),\n \"--slope-mode\"]\n\n # Generate title\n if graphinfo.title:\n opts = opts + [\"--title\", \"%s (%s)\" % (graphinfo.title, node)]\n\n # Generate X-axis options\n start, end, step = ds.get_time_info()\n duration = end - start\n mg_step = duration / 10\n bg_step = mg_step / 5\n label_step = mg_step\n if mg_step == 0 or bg_step == 0:\n # This is unlikely to happen, but just to be on the safe side.\n x_grid = \"SECOND:1:SECOND:10:SECOND:10:0:%R\"\n else:\n x_grid = \"SECOND:%s:SECOND:%s:SECOND:%s:0:%%R\" % \\\n (bg_step, mg_step, label_step)\n opts = opts + [\"--start\", str(self.start),\n \"--end\", str(self.end),\n \"--step\", str(self.rrdtool_step),\n \"--x-grid\", x_grid]\n\n # Generate Y-axis options\n if graphinfo.y_axis_label:\n opts = opts + [\"--vertical-label\", graphinfo.y_axis_label]\n if graphinfo.y_axis_min_value == 0 or graphinfo.y_axis_min_value:\n opts = opts + [\"--lower-limit\", str(graphinfo.y_axis_min_value)]\n if graphinfo.y_axis_max_value == 0 or graphinfo.y_axis_max_value:\n opts = opts + [\"--upper-limit\", str(graphinfo.y_axis_max_value)]\n if graphinfo.y_axis_rigid:\n opts = opts + [\"--rigid\"]\n\n # Generate metric parameters\n stack_opt = \"\"\n if graphinfo.stack:\n stack_opt = \":STACK\"\n deflist = []\n cdeflist = []\n arealist = []\n for i in graphinfo.metrics:\n name, name_in_graph, unit_in_graph, color = i\n if unit_in_graph:\n new_unit, rate = unit_in_graph\n newname = \"%s_%s\" % (name, new_unit)\n deflist.append(\"DEF:%s=%s:%s:AVERAGE\" %\n (name, self.rrdfile, name))\n cdeflist.append(\"CDEF:%s=%s,%s,/\" %\n (newname, name, rate))\n arealist.append(\"AREA:%s%s:%s%s\" %\n (newname, color, name_in_graph, stack_opt))\n else:\n deflist.append(\"DEF:%s=%s:%s:AVERAGE\" %\n (name, self.rrdfile, name))\n arealist.append(\"AREA:%s%s:%s%s\" %\n (name, color, name_in_graph, stack_opt))\n opts = opts + deflist + cdeflist + arealist\n\n self.rrdtool_cmd(\"graph\", opts, log_level=logging.DEBUG)", "def scan2plot(datafolder, start, end, first, last,\n theta_range, theta_bins, chi_range, chi_bins,\n gamma, delta,\n ci, cj, w, h, SDD, pxl_size, ph, d5i=None,\n fraction=1):\n chi_bins = int(chi_bins) # make sure the input is an integer\n theta_bins = int(theta_bins) # make sure the input is an integer\n chi_ax = np.linspace(chi_range[0], \n chi_range[1], chi_bins) # init chi axis\n tth_ax = np.linspace(theta_range[0], \n theta_range[1], theta_bins) # init 2th axis\n int_bin = np.zeros((chi_bins, theta_bins)) # init intensity plot\n tth_weight = np.zeros(theta_bins) # init weight normalization\n # (i.e., the number of times a certain bin has been filled)\n for i in range(first, last + 1):\n print(\"delta = \" + str(delta[i - start]) + \", gamma = \" + \n str(gamma[i - start]) + \n \", status: \" + str(i - start) + \"/\" + \n str(last - first)) # print info on current status\n fname = finder(\"*\" + str(i) + \n \".tif\", datafolder).find() # find image with index i\n with Image.open(fname) as img:\n tth_map, chi_map, PL = angle_maps(gamma[i - start], \n delta[i - start], \n ci, cj, w, h, \n SDD, pxl_size, ph) # angle calculations\n det_img = np.array(img) # convert image to numpy array\n if d5i.any() != None:\n det_img = det_img/(d5i[i - start]) # normalize data to monitor\n det_img /= PL # correct by Lorentz-pol.\n # data binning:\n for j in range(int(h/2*(1-fraction)), int(h/2*(1+fraction))):\n for k in range(int(w/2*(1-fraction)), int(w/2*(1+fraction))):\n # find bin on the 2th axis\n idx = closest(tth_ax, np.rad2deg(tth_map[j][k]))\n # find bin on the chi axis\n jdx = closest(chi_ax, np.rad2deg(chi_map[j][k]))\n # fill bin\n int_bin[jdx][idx] += det_img[j][k]\n # every time a bin is filled add 1 to the weight function\n tth_weight[idx] += 1\n print(\"Done!\")\n return tth_ax, chi_ax, int_bin, tth_weight", "def plot_path(path):\n s = np.linspace(0, path.total_length, 1000, endpoint=False)\n twists = np.array(list(path.target_state(si) for si in s))\n print(twists.shape)\n plt.plot(twists[:,0], twists[:,1])\n plt.show()", "def plotPaths(self, simulationIndex, numberOfPaths):\n for k in range(numberOfPaths):\n path = self.getPath(simulationIndex + k);\n plt.plot(path)\n plt.xlabel('Time')\n plt.ylabel('Realizations of the process')\n plt.show()", "def _plot_goal(self):\n\n # Remove Existing Annotation\n if self._goalAnnotation is not None:\n self._goalAnnotation.remove()\n\n # Set Offsets\n self._goalPoints.set_offsets(np.c_[self._goalX, self._goalZ])\n\n # Update Annotation\n self._goalAnnotation = self._scatPlot.annotate(\"Goal\", (self._goalX[0], self._goalZ[0]))", "def arrr_starrr_graph(self):\n\n plt.figure()\n total_cost = 0\n\n # plot batteries\n counter = 0\n for batt in self.grid.batteries:\n plt.plot(batt.x, batt.y, marker='x',\n color=colors[counter], markersize=10)\n counter += 1\n\n # iterate over houses and path\n for house in self.grid.houses:\n battery = self.grid.batteries[house.connection]\n\n # get path coordinates\n path_data = house.path\n\n # plot path and house\n plt.plot(path_data[0][0], path_data[0][1],\n color=colors[house.connection], linewidth=.3)\n plt.plot(house.x, house.y, marker='p',\n color=colors[house.connection])\n total_cost += path_data[1]\n plt.draw()\n plt.pause(0.000000001)\n\n plt.title(f\"total cost = {total_cost}\")", "def main():\r\n PathGenerator = TrajectoryGenerator()\r\n \r\n ## coordinate \r\n # Y \r\n # ^ /\r\n # | /\r\n # | / <theta>\r\n # o -- -- -- >X\r\n\r\n x_0 = 0.0 # initial x position\r\n y_0 = 0.0 # initial y position\r\n theta_0 = 0.0 *np.pi/180 # initial heading angle of the vehicle \r\n kappa_0 = 0.0 *np.pi/180 # initial steering angle \r\n initial_state = [x_0, y_0, theta_0, kappa_0] \r\n \r\n x_f = 13.0 # final x position\r\n y_f = 8.0 # final y position\r\n theta_f = 0.0 *np.pi/180 # final heading angle of the vehicle \r\n kappa_f = 0.0 *np.pi/180 # final steering angle \r\n final_state = [x_f, y_f, theta_f, kappa_f] \r\n\r\n traject = PathGenerator.compute_spline(initial_state, final_state)\r\n point_array = np.asarray(traject)\r\n plt.plot(point_array[:,0], point_array[:,1],'o')\r\n \r\n sample_resolution = 0.5\r\n temp_goal_list = []\r\n for i in range(-2, 3):\r\n temp_final_state = np.copy(final_state)\r\n temp_final_state[1] = temp_final_state[1] + float(i)*sample_resolution\r\n temp_goal_list.append(temp_final_state)\r\n \r\n start = time.time()\r\n point_list = []\r\n for i in range(0, 5):\r\n temp_goal = temp_goal_list[i]\r\n traject = PathGenerator.compute_spline(initial_state, temp_goal)\r\n point_list.append(traject)\r\n end = time.time()\r\n print('Executed time is %f'%(end - start))\r\n \r\n # pdb.set_trace()\r\n for i in range(0,5):\r\n point_array = np.asarray(point_list[i])\r\n plt.plot(point_array[:,0], point_array[:,1],'o')\r\n \r\n plt.axis('equal')\r\n plt.show()", "def plot(self, x_unit='ft', y_unit='psi'):\n\n #Find the grid centers (where the solution exists)\n x_pos = np.cumsum(self.dx_arr) - self.dx_arr[0] / 2.0\n\n #Loop over all stored solutions and plot stair-step line (because\n #pressure is constant over grid block). We skip the first stored values\n #because they are just the initialization values.\n plt.figure()\n for P in self.P_plot:\n plt.plot(x_pos, P)\n\n #Labels, etc.\n plt.xlabel('Reservoir position (' + x_unit + ')')\n plt.ylabel('Pressure (' + y_unit + ')')\n plt.xlim([0, self.res_length])\n plt.show()", "def show_paths(self):\r\n print(\"------------------------\")\r\n print(\"######### ALL PATHS #########\")\r\n\r\n if self.size == 0:\r\n print(\"Empty tree!\")\r\n else:\r\n for i in range(1, self.root.size_tree + 1):\r\n node = self.select(i)\r\n if node.size_tree == 1:\r\n print(\"|\" + self.str_single_path(node))\r\n\r\n print(\"------------------------\")", "def plot(self):\n\t\tself.plotOfXray().plot()" ]
[ "0.66848135", "0.6656645", "0.65066147", "0.6478617", "0.64643204", "0.6413952", "0.63114595", "0.630562", "0.6239466", "0.61826295", "0.61755365", "0.61461246", "0.611502", "0.6114483", "0.6070725", "0.60319483", "0.6004294", "0.5978453", "0.5932531", "0.58990204", "0.585567", "0.5847063", "0.58378553", "0.5824596", "0.5818627", "0.57909566", "0.57790655", "0.5768143", "0.5743771", "0.56419075", "0.5629646", "0.56280226", "0.56182986", "0.5616727", "0.5616136", "0.56097996", "0.56015784", "0.5593378", "0.5581426", "0.5566796", "0.5559179", "0.555916", "0.55500376", "0.554704", "0.5544804", "0.5525481", "0.55243796", "0.55234396", "0.5522718", "0.55218613", "0.5509742", "0.5507346", "0.55030507", "0.54983985", "0.54968077", "0.54957825", "0.547188", "0.54543513", "0.54400206", "0.5434478", "0.5433969", "0.5418773", "0.5414325", "0.5408371", "0.54069203", "0.54064786", "0.54063356", "0.5402164", "0.54006064", "0.53921866", "0.53867465", "0.53728276", "0.53707033", "0.5367051", "0.5359465", "0.5355368", "0.53510255", "0.53459865", "0.5337088", "0.5336978", "0.5332911", "0.5332195", "0.5323519", "0.5322619", "0.5322619", "0.5322619", "0.531934", "0.5316315", "0.5315947", "0.5315616", "0.53134674", "0.5312676", "0.53102785", "0.5309913", "0.53095275", "0.53066444", "0.5296298", "0.5292471", "0.52899855", "0.52836245" ]
0.5336481
80
Delete a log file.
def delete_log(file_path): if os.path.exists(file_path): print('Deleting log %s...' % file_path) os.remove(file_path) else: raise ValueError("File %r doesn't exists - cannot delete." % file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_log():\n log_path = Path.cwd() / \"premise.log\"\n if log_path.exists():\n log_path.unlink()", "def delete_file(fileName):\n os.remove(fileName)\n print (\"Deleteing file: \" + str(fileName))\n write_log()\n read_log()", "def delete_log(filename):\n log_directory = os.path.dirname(os.path.abspath(__file__)) + LOG_FOLDER\n response_code = 400\n response = \"\"\n if filename in os.listdir(log_directory):\n try:\n os.remove(os.path.join(log_directory, filename))\n response = f\"File {filename} was successfully deleted.\"\n response_code = 200\n except IsADirectoryError:\n response = f\"{filename} exists, but is a directory and not a file. Deletion failed.\"\n else:\n response = f\"File {filename} does not exist and so couldn't be deleted.\"\n return make_response(jsonify({'message': response}), response_code)", "def delete_log(self):\n os.system('rm -rf *.log')\n os.system('rm -rf *.log~')\n os.system('rm -rf *.last')\n os.system('rm -rf *.last~')", "def deleteGmlLoaderLogFile(logFile, command, logger):\n \n if os.path.isfile(logFile) == True:\n reader = open(logFile)\n \n for line in reader:\n if re.search(\"TransactionHandler - Rollback transaction\", line) != None:\n logger.error(\"TransactionHandler - Rollback transaction for \" + command)\n \n reader.close()\n message = \"Delete \" + logFile + \" \" + str(time.strftime(\"%d.%m.%Y %H:%M:%S\", time.gmtime(os.path.getmtime(logFile)))) + \" \" + str(os.path.getsize(logFile)) + \" bytes\"\n logger.info(message)\n os.remove(logFile)", "def delete(self, filename):\n pass", "def delete_record_file(self, record_file, logStat):\n result = self.storage_delete_file(record_file.group, record_file.storage)\n if result:\n logStat(deleted=True, file_obj=record_file)\n record_file.delete()\n return result", "def delete_file(self, lfile):\n raise NotImplementedError('delete_file')", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)", "def delete_logs(self):\n if self.etw_log is not None:\n files = sorted(glob.glob(self.etw_log + '*'))\n for path in files:\n try:\n os.remove(path)\n except Exception:\n pass", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _clear_log(log_path):\n\n\twith logging._lock:\n\t\twith open(log_path, 'w'):\n\t\t\tpass", "def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete_file(path):\n return files.delete_file(path)", "def delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete(self):\n\n try:\n remove(self.file)\n except OSError:\n pass", "def delete_file(self, filepath):\n self.ftp.delete(filepath)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def _delete_file(path):\n if os.path.isfile(path):\n os.remove(path)", "def delete(self, filename):\n raise NotImplementedError", "def delete_file(self):\n if (not self.exists()):\n raise IOError(\"File at '{}' does not exist.\".format(self.location))\n os.remove(self.location)", "def delete_file(file: str) -> None:\n\tuux.show_info(\"Deleting \" + file)\n\n\tif not os.path.exists(file):\n\t\t# Files does not exist\n\t\treturn\n\n\tos.remove(file)", "def delete(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def safe_delete(self, filename):\n try:\n os.remove(filename)\n except OSError:\n pass", "def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)", "def delete_file(input_fn):\r\n if os.path.isfile(input_fn):\r\n os.remove(input_fn)", "def clear_log_files(log_files):\n for log_file in log_files:\n try:\n open(log_file, 'w', 0).close()\n except IOError:\n pass", "def _delete(filename):\n return os.remove(filename)", "def Delete_File(self,txn,filename):\n opid = self.new_opid()\n xaction = DeleteFile_Operation(os.path.join(self.home,filename),opid)\n self._add_operation(txn,xaction)", "def exitLogCleanup(*args):\n for logFile in args:\n os.unlink(logFile)\n return None", "def delete_file(self, path):\n if not path_exists(path, self._store_folder):\n raise NotFoundException(\"\")\n os.remove(path)", "def delete_tempfile(path):\n try:\n unlink(path)\n except:\n pass", "def tearDown(self):\n if os.path.isfile(LOGFILENAME):\n os.remove(LOGFILENAME)", "def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "def delete_file(self, name):\n del self.files[name]", "def cleanup(self, keep_logs : bool = False):\n if self.path.exists():\n try:\n shutil.rmtree(self.path)\n except Exception as e:\n warn(e)\n if self.log_path.exists() and not keep_logs:\n try:\n os.remove(self.log_path)\n except Exception as e:\n warn(e)", "def delete_data_file(path):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n os.remove(path)\n except FileNotFoundError:\n pass", "def _removeFile(self, filename):\n try:\n #delete the output file\n os.remove(filename)\n except:\n #print (\"Failed to remove the file: \" + filename)\n pass", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def delete(self, host, file):", "def test_despasito_log_file():\n\n fname = \"despasito_{}.log\".format(random.randint(1, 10))\n despasito.initiate_logger(log_file=fname, verbose=10)\n logger.info(\"test\")\n\n if os.path.isfile(fname):\n flag = True\n despasito.initiate_logger(log_file=False)\n try:\n os.remove(fname)\n except Exception:\n print(\"Error removing log file\")\n else:\n flag = False\n\n assert flag", "def delete_file(self, timeout=None, **kwargs):\n # type: (Optional[int], Optional[Any]) -> None\n try:\n self._client.file.delete(timeout=timeout, **kwargs)\n except StorageErrorException as error:\n process_storage_error(error)", "def clearLog():\n logPath = getLogPath()\n\n with open(logPath, 'w') as f:\n f.write('')", "def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)", "def delete_httpd_session_file(self):\n for filepath in (self.HTTPD_SESSION_FILE, self.HTTPD_SESSION_FILE_EXPIRED):\n if os.path.isfile(filepath):\n os.remove(filepath)\n logger.info('deleted file %s' % (filepath))", "def _close_file_logger(self):\n if self._file_log_handler is not None:\n self._file_log_handler.flush()\n self._file_log_handler.close()\n self.logger.removeHandler(self._file_log_handler)\n self._file_log_handler = None\n self.logger.propagate = True", "def delete_access_token_file():\n if os.path.isfile(AccessData.ACCESS_TOKEN_FILE):\n os.remove(AccessData.ACCESS_TOKEN_FILE)\n logger.info('deleted file %s' % (AccessData.ACCESS_TOKEN_FILE))", "def delete_local_file(file_path):\r\n try:\r\n os.remove(file_path)\r\n except OSError as e:\r\n print(f\"Error deleting file {file_path}: {e}\")", "def remove_file(filename, verbose=True):\r\n if verbose:\r\n LOG.info('Deleting file %s' % os.path.relpath(filename, BASE_DIR))\r\n if not os.path.exists(filename):\r\n LOG.warn(\"File does not exist: %s\" % os.path.relpath(filename, BASE_DIR))\r\n else:\r\n os.remove(filename)", "def __del__(self):\n\n self.logfd.close()", "async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):", "def deleteSingleFile(filename):\n os.popen('rm {}'.format(filename))", "def delete_file(self, path):\n raise HTTPError(\n 501,\n \"Narrative deletion not implemented here. Deletion is handled elsewhere.\",\n )", "def clear_logs_folder():\n script_dir = os.path.dirname(os.path.abspath(__file__))\n folder = os.path.join(script_dir, '../logs/')\n for the_file in folder:\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)", "def rm_file(filename):\n try:\n os.unlink(filename)\n except FileNotFoundError:\n pass", "def file_delete(self, path):\n params = {'root': self.session.root, 'path': format_path(path)}\n\n url, params, headers = self.request(\"/fileops/delete\", params)\n\n return self.rest_client.POST(url, params, headers)", "def _remove_mp_logs(self):\n for i, fn in enumerate(self.logfiles):\n os.remove(fn)", "def _delete_file(file_id):\n log.warning(f\"Deleting file '{file_id}'...\")\n _drive_service.files().delete(fileId=file_id).execute()\n log.info(f\"Deleting file '{file_id}' - done.\")", "def Delete_File(self,tx,filename):\n if tx != self.tx:\n raise InvalidTransaction(tx)\n\n fullname = os.path.join(self.home,filename)\n win32_txf.DeleteFileTransacted(fullname,transaction = tx)", "def clean_file_before_test():\n\n if os.path.exists(LOG_FOLDER):\n for file in os.listdir(LOG_FOLDER):\n os.remove(LOG_FOLDER + \"/\" + file)", "def PurgeLog(log):\n try:\n fh = open(log, 'w')\n try:\n fh.write('')\n finally:\n fh.close()\n except IOError, e:\n raise Error(e)", "def delete_file(self, repo, file_name):\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'write')\n\n file_path = user_data_path(self.repo_base, repo, file_name)\n os.remove(file_path)", "def delete_file(self, key):\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n os.unlink(path)\n else:\n raise ValueError(f\"No such file: {key}\")", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def delete_file(self, filename):\n if not filename in self.files:\n raise IOError('File %s Not Found' % filename)\n\n for nodename in self.files[filename]:\n node = self.datanodes[nodename]\n node.delete_file(filename)\n del self.files[filename]\n logging.info('file %s deleted' % filename)", "def delete_file(self, hash):\n self.tree.delete(hash)\n query = \"delete from files where hash='%s'\"%hash\n self.connection.execute(query)\n self.connection.commit()", "def rm_file(file_):\n Path(file_).unlink(missing_ok=True)", "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def del_file(name_del_file):\n\n try:\n os.remove(config_tools.full_dest+name_del_file)\n except OSError:\n print(f\"Удалить файл {name_del_file} не удалось, файл не найден.\")\n else:\n print(f\"Файл успешно удален {name_del_file}\")", "def DeleteFile(self, file_id):\n self.service.files().delete(fileId=file_id).execute()", "def delete_file(filename):\n if os.path.isfile(filename):\n return os.remove(filename)", "def remove_log_path(self, monitor_name, log_path):\n pass", "def remove_file(path: str) -> None:\n\tremove(path)", "def storage_delete_report_file(self, report_pk):\n self._get_queryset(pk=report_pk).delete()", "def cleanup_file(path_to_file):\n print \"Removing generated file: %s\" % path_to_file\n os.remove(path_to_file)", "def remove_file(self):\n if self.file_exists:\n os.remove(self.file_name)", "def close_log():\n\n global log_file\n if log_file is not None:\n try:\n log_file.flush()\n finally:\n log_file.close()", "def _remove(path, force):\n if not os.path.exists(path):\n return\n elif os.path.isfile(path) and force:\n os.remove(path) # remove the file\n elif os.path.isdir(path) and force:\n import shutil\n shutil.rmtree(path) # remove dir and all contains\n else:\n print('Logdir contains data. Please, set `force` flag to overwrite it.')\n import sys\n sys.exit(0)", "def safe_delete(filename):\r\n try:\r\n os.unlink(filename)\r\n except OSError as e:\r\n if e.errno != errno.ENOENT:\r\n raise", "def delete_file(self, path):\n return self.client._perform_empty(\n \"DELETE\", \"/projects/%s/managedfolders/%s/contents/%s\" % (self.project_key, self.odb_id, utils.quote(path)))", "def teardown_class(cls):\n os.remove(logfilename)", "def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))", "def delete(self):\n if not pdbox._args.get(\"dryrun\"):\n result = execute(pdbox.dbx.files_delete_v2, self.path)\n pdbox.debug(\"Metadata response: %s\" % result.metadata)\n pdbox.info(\"Deleted %s\" % self.uri)", "def delete_file(sender, instance, *args, **kwargs):\n if instance.file:\n _delete_file(instance.file.path)", "def remove_file(self, path):\n pass", "def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()", "def delete_activity(self, activity_log_id):\n self._db.execute(\"\"\"\n DELETE FROM exception_log\n WHERE activity_log = ?\"\"\", (activity_log_id, ))", "def delete_file(self, filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError", "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def remove(path):\n try:\n os.remove(path)\n except FileNotFoundError:\n _logger.error('file does not exist %s; stack: %s', path, stack_trace())", "def delete_file(filename, sudo=True):\n LOG.info(\"Deleting file {}\".format(filename))\n cmd = \"rm {}\".format(filename)\n _exec_cmd(cmd=cmd, sudo=sudo, fail_ok=False)", "def delete_file(filename: str):\n\t\tif filename == \"ALL\":\n\t\t\tfor file in os.listdir(\"data/music/\"):\n\t\t\t\tdeleted = False\n\t\t\t\twhile not deleted:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(f\"data/music/{file}\")\n\t\t\t\t\t\tdeleted = True\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"Not removed, waiting 1 second...\")\n\t\t\t\t\t\tasyncio.sleep(1)\n\t\telse:\n\t\t\tprint(\"File--: \", filename)", "def filedelete(fname):\n\n if os.path.exists(fname):\n try:\n if os.path.isdir(fname):\n # delete folder\n shutil.rmtree(fname)\n return\n else:\n # delete file\n os.remove(fname)\n return\n except:\n return\n else:\n return", "def del_file(self, pid, fd):\n del self.imap[pid][fd]", "def delete(self, filepath: str) -> None:\n if self.isfile(filepath):\n storage_services.delete(\n self._bucket_name, self._get_gcs_file_url(filepath))\n else:\n raise IOError('File does not exist: %s' % filepath)", "def delete(self):\n backend = self._get_backend()\n if not backend:\n raise NotImplementedError(\"No deleting backend provided\")\n backend.logbook_destroy(self.uuid)" ]
[ "0.77605826", "0.7727856", "0.73945826", "0.7313134", "0.72794944", "0.7045915", "0.7009386", "0.6897459", "0.6797494", "0.6758738", "0.6743915", "0.6732991", "0.671415", "0.66890925", "0.6686626", "0.6566584", "0.6561632", "0.6561151", "0.65555596", "0.6545941", "0.6545941", "0.6531315", "0.65161645", "0.65148914", "0.6387", "0.63710403", "0.6344273", "0.6332207", "0.62937456", "0.6291752", "0.6290496", "0.6286669", "0.62834173", "0.6279233", "0.62754893", "0.62478065", "0.62360734", "0.6229581", "0.6214024", "0.6205561", "0.6202018", "0.617444", "0.61694294", "0.6163506", "0.61528736", "0.61465997", "0.6146118", "0.6131082", "0.6113097", "0.61097854", "0.6102765", "0.6097921", "0.609697", "0.6093105", "0.60722643", "0.6067023", "0.6064898", "0.6059516", "0.6044464", "0.6038995", "0.603496", "0.60266477", "0.6003933", "0.60009193", "0.5996364", "0.5995791", "0.5986739", "0.5986604", "0.59859234", "0.59820706", "0.59652424", "0.5964226", "0.5959238", "0.5933512", "0.59285736", "0.59190524", "0.59168744", "0.59110606", "0.5909686", "0.5880142", "0.58779716", "0.58738476", "0.5861334", "0.5857797", "0.585535", "0.5846182", "0.58440435", "0.58419645", "0.58408636", "0.5840817", "0.5837202", "0.58307016", "0.582814", "0.58274484", "0.5822229", "0.5822084", "0.5821845", "0.581566", "0.5810628", "0.58060175" ]
0.8215973
0
Create a new Logger.
def __init__(self, file_path, print_too=True, override=False): self.file_path = file_path self.print_too = print_too if override: if os.path.exists(file_path): print('Overriding - deleting previous log...') os.remove(file_path) os.makedirs(os.path.dirname(file_path), exist_ok=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger", "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def _create_logger(self, log_dir: str) -> logging.Logger:\n self.log_dir = log_dir\n self.log_file = os.path.join(log_dir, self.name)\n os.makedirs(self.log_dir, exist_ok=True)\n logger = logging.getLogger(self.log_file)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(self.log_file)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s\",\n datefmt=\"%Y-%m-%d-%H:%M:%S\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)", "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def create_logger():\n logger = logging.getLogger(\"punctuation_logger\")\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.NOTSET) # Set Logger's level to NOTSET, default is WARNING\n\n # create the logging file handler\n if options.log_file is not None:\n fh = logging.FileHandler(options.log_file)\n \n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n fh.setLevel(logging.NOTSET)\n # add handler to logger object\n logger.addHandler(fh)\n return logger", "def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger", "def create_logger(app_name: str) -> logging.Logger:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir(os.path.join(os.getcwd(), 'logs'))\n\n app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')\n\n logger = logging.getLogger(f\"{app_name}-logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)\n handler.setLevel(logging.DEBUG)\n\n # Set the formatter\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n # Set it as the base handler\n logger.base_handler = handler\n\n # Also add a newline handler to switch to later\n newline_handler = logging.FileHandler(filename=app_logfile, mode='a')\n newline_handler.setLevel(logging.DEBUG)\n newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format\n \n logger.newline_handler = newline_handler\n\n # Also add the provision for a newline handler using a custom method attribute\n logger.newline = types.MethodType(add_newlines, logger)\n\n # Also add a StreamHandler for printing to stderr\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n \n logger.addHandler(console_handler)\n\n return logger", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def create_logger(logger_name,\n log_format=None,\n log_level=logging.INFO,\n log_path=None):\n logger = logging.getLogger(logger_name)\n assert (len(logger.handlers) == 0)\n logger.setLevel(log_level)\n if log_path is None:\n handler = logging.StreamHandler()\n else:\n os.stat(os.path.dirname(os.path.abspath(log_path)))\n handler = logging.FileHandler(log_path)\n handler.setLevel(log_level)\n if log_format is not None:\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def create_logger(logging, tool_name, level):\n logger = logging.getLogger(tool_name)\n\n # Create handlers\n handler = logging.StreamHandler()\n handler.setLevel(level)\n\n # Create formatters and add it to handlers\n logformat = logging.Formatter(\n '[%(name)s - %(asctime)s] %(levelname)s: %(message)s')\n handler.setFormatter(logformat)\n\n # Add handlers to the logger\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def create_logger_service(program_id, processor_id):\n logger = logging.getLogger(__name__)\n logger = logging.LoggerAdapter(logger,\n extra={'program_id': program_id,\n 'processor_id': processor_id})\n return logger", "def create_logger(name, log_file=None):\n l = logging.getLogger(name)\n formatter = logging.Formatter('[%(asctime)s] %(message)s')\n l.setLevel(logging.DEBUG)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.INFO)\n l.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n l.addHandler(fh)\n\n return l", "def create_logger(log_dir):\n logger = logging.getLogger(__file__)\n logger.setLevel(logging.INFO)\n\n # file logger\n log_filename = \"probabilist_connectogram_%s.log\" % time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n if log_dir:\n log_path = os.path.join(log_dir, log_filename)\n else:\n log_path = log_filename\n file_handler = logging.FileHandler(log_path)\n formatter = logging.Formatter('%(asctime)s :: %(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n # console logger\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n logger.info(\"Log path: %s\" % log_path)\n\n return logger", "def build_logger(self):\n pass", "def create_logger(name, format='%(levelname)s: %(message)s', datefmt=None,\n stream=None, level=logging.INFO, filename=None, filemode='w',\n filelevel=None, propagate=True):\n\n # Get a logger for the specified name\n logger = logging.getLogger(name)\n logger.setLevel(level)\n fmt = logging.Formatter(format, datefmt)\n logger.propagate = propagate\n\n # Remove existing handlers, otherwise multiple handlers can accrue\n for hdlr in logger.handlers:\n logger.removeHandler(hdlr)\n\n # Add handlers. Add NullHandler if no file or stream output so that\n # modules don't emit a warning about no handler.\n if not (filename or stream):\n logger.addHandler(logging.NullHandler())\n\n if filename:\n hdlr = logging.FileHandler(filename, filemode)\n if filelevel is None:\n filelevel = level\n hdlr.setLevel(filelevel)\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n\n if stream:\n hdlr = logging.StreamHandler(stream)\n hdlr.setLevel(level)\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n\n return logger", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def construct_logger(in_logger_file_path):\n logger_configfile_path = in_logger_file_path + \"/log.properties\"\n # print logger_configfile_path\n logging.config.fileConfig(logger_configfile_path)\n logger = logging.getLogger(\"ITR2\")\n return logger", "def __create_logger(who, level):\n global loggers\n global toconsole\n global LEVELS\n global console\n global logfile\n loggers[who] = logging.getLogger(who)\n loggers[who].setLevel(level)\n format = logging.Formatter(\"%(asctime)s - %(name)s - \"\\\n \"%(levelname)s - %(message)s\")\n if (toconsole):\n if (console == None):\n console = logging.StreamHandler()\n console.setFormatter(format)\n loggers[who].addHandler(console)\n else:\n if (logfile == None):\n logfile = logging.handlers.RotatingFileHandler('/var/log/yapc.log',\n maxBytes=10485760,\n backupCount=10)\n logfile.setFormatter(format)\n loggers[who].addHandler(logfile)\n loggers[GENERIC_LOG_NAME].log(LEVELS[\"VDBG\"],\n \"Add logger for \"+who+\" at level \"+str(level))", "def logger_setup(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger_path = \"/tmp/\" + logger.name\n logger_format = '%(asctime)s %(name)s %(levelname)s %(lineno)d %(message)s'\n\n # set up logging to file\n logging.basicConfig(\n level=logging.INFO,\n format=logger_format,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logger_path,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which for console use\n formatter = logging.Formatter(logger_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n return logger", "def get_logger():\n # Prepare log directory.\n try:\n os.mkdir('logs')\n except FileExistsError:\n pass\n\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n 'logs/log.txt', when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def get_logger(name: str) -> logging.Logger:\n try:\n p = Path(name)\n if p.exists():\n name = str(p.absolute().relative_to(Path.cwd()).as_posix())\n except:\n pass\n logger = logging.getLogger(name)\n # logger.addHandler(TqdmLoggingHandler())\n return logger", "def _generate_log(path):\n # Create a logger and set the level.\n logger = logging.getLogger(\"Log_info\")\n # Check handler exists\n if len(logger.handlers) > 0:\n return logger # Logger already exists\n # set logger level\n logger.setLevel(logging.DEBUG)\n # Create file handler, log format and add the format to file handler\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(path)\n\n # See https://docs.python.org/3/library/logging.html#logrecord-attributes\n # for log format attributes.\n log_format = \"%(levelname)s %(asctime)s %(message)s\"\n formatter = logging.Formatter(log_format)\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger", "def create_logger(log_level):\n log_formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_TIMESTAMP_FORMAT)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n logger = logging.getLogger('blockip')\n logger.setLevel(log_level)\n logger.addHandler(console_handler)\n return logger", "def get_logger(logger_name, logging_format, file_name, level=logging.INFO):\n path, prepared = '', True\n for cat in file_name.split('/')[1:-1]:\n path += '/%s' % cat\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except PermissionError:\n prepared = False\n break\n if not prepared:\n file_name = '/tmp/%s' % file_name.split('/')[-1]\n logging.basicConfig(level=level, format=logging_format)\n log = logging.getLogger(logger_name)\n handler = logging.FileHandler(file_name, encoding='utf8')\n handler.setFormatter(logging.Formatter(logging_format))\n log.addHandler(handler)\n log.setLevel(level=level)\n return log", "def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger", "def default_logger_creator(config):\n return UnifiedLogger(config, logdir, loggers=None)", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def create_logger(log_file=None, file_=True, console=True,\n with_time=False, file_level=2, console_level=2,\n propagate=False, clear_exist_handlers=False, name=None):\n if file_:\n prefix = strftime('%Y%m%d%H%M%S', localtime(time()))\n if log_file is None:\n log_file = os.path.join(os.path.dirname(__file__), prefix)\n elif with_time:\n log_file = os.path.join(os.path.dirname(log_file), prefix + \"_\" + os.path.basename(log_file))\n\n logger = logging.getLogger(name)\n\n if clear_exist_handlers:\n logger.handlers.clear()\n\n logger.setLevel(levels[1])\n logger.propagate = propagate\n\n formatter = MyFormatter(\"(User) %(asctime)s: %(levelname).1s %(message)s\")\n\n if file_:\n # Create file handler\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(levels[file_level])\n file_handler.setFormatter(formatter)\n # Register handler\n logger.addHandler(file_handler)\n\n if console:\n # Create console handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(levels[console_level])\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger", "def logger() -> logging.Logger:\n return logging.getLogger(__name__)", "def create_logger(**kwargs):\n\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n\n # Create Log Format(s)\n f_format = logging.Formatter('%(asctime)s:%(processName)s:%(name)s:%(levelname)s:%(message)s')\n\n # Create Handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(logging.INFO)\n c_handler.setFormatter(f_format)\n log.addHandler(c_handler)\n\n for filename, level in kwargs.items():\n handler = logging.FileHandler(filename=filename)\n handler.setLevel(level)\n handler.setFormatter(f_format)\n log.addHandler(handler)\n\n return log", "def setupLogger(logger=None, log_format=\"%(asctime)s %(levelname)s [\"+APP_NAME+\"] %(message)s\", level=logging.INFO, log_name=APP_NAME+\".log\", logger_name=APP_NAME):\r\n\tif logger is None:\r\n\t\tlogger = logging.getLogger(logger_name)\r\n\t\r\n\tlogger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n\tlogger.setLevel(level)\r\n\t\r\n\tfile_handler = logging.handlers.RotatingFileHandler(make_splunkhome_path([\"var\", \"log\", \"splunk\", log_name]), maxBytes=2500000, backupCount=5)\r\n\tformatter = logging.Formatter(log_format)\r\n\tfile_handler.setFormatter(formatter)\r\n\t\r\n\tlogger.handlers = []\r\n\tlogger.addHandler(file_handler)\r\n\t\r\n\treturn logger", "def get_logger(name, file_name_path='yang.log'):\n # check if file exists\n exists = False\n if os.path.isfile(file_name_path):\n exists = True\n FORMAT = '%(asctime)-15s %(levelname)-8s %(name)5s => %(message)s - %(lineno)d'\n DATEFMT = '%Y-%m-%d %H:%M:%S'\n logging.basicConfig(datefmt=DATEFMT, format=FORMAT, filename=file_name_path, level=logging.INFO)\n logger = logging.getLogger(name)\n # if file didn t exist we create it and now we can set chmod\n if not exists:\n os.chmod(file_name_path, 0o664 | stat.S_ISGID)\n return logger", "def get_logger(self, logname, logfile, loglevel, propagate):\n # TODO: simplify\n logger = logging.getLogger(logname)\n logger_handler = WatchedFileHandler(logfile, mode='w')\n # removed \\t%(name)-6s\n log_fmt = '%(asctime)s\\t%(levelname)-8s\\t%(message)s'\n logger_handler.setFormatter(\n logging.Formatter(log_fmt, '%b %d %H:%M:%S'))\n logger.addHandler(logger_handler)\n logger.propagate = propagate\n logger.setLevel(loglevel)\n return logger", "def create_logger(job_name, log_file=None, debug=True):\n logging.basicConfig(level=5,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M')\n logging.root.handlers = []\n if debug:\n chosen_level = 5\n else:\n chosen_level = logging.INFO\n logger = logging.getLogger(job_name)\n formatter = logging.Formatter(fmt='%(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S')\n if log_file is not None:\n log_dir = osp.dirname(log_file)\n if log_dir:\n if not osp.exists(log_dir):\n os.makedirs(log_dir)\n # cerate file handler\n fh = logging.FileHandler(log_file)\n fh.setLevel(chosen_level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # Colored stream handler\n sh = ColorStreamHandler()\n sh.setLevel(chosen_level)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def get_logger(self, name=\"amulet-logger\", level=logging.DEBUG):\n log = logging\n logger = log.getLogger(name)\n fmt = log.Formatter(\"%(asctime)s %(funcName)s \"\n \"%(levelname)s: %(message)s\")\n\n handler = log.StreamHandler(stream=sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(fmt)\n\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def construct_logger(name, save_dir):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n file_no_ext = out_file_core()\n\n fh = logging.FileHandler(os.path.join(save_dir, file_no_ext + \".txt\"), encoding=\"utf-8\")\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(name)s %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n gitdiff_patch = os.path.join(save_dir, file_no_ext + \".gitdiff.patch\")\n os.system(f\"git diff HEAD > {gitdiff_patch}\")\n\n return logger", "def setup_logger(name, log_file, level=logging.DEBUG):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n if name in ( \"\", None ):\n raise \"No name\"\n return\n\n if log_file in ( \"\", None ):\n raise \"No log_file\"\n return\n\n formatter = logging.Formatter(\n fmt = '%(asctime)s.%(msecs)03d %(levelname)s File: \"%(pathname)s\", line %(lineno)d, in %(module)s - %(funcName)s: %(message)s',\n datefmt= '%Y-%m-%d %H:%M:%S'\n )\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def open_log(self, log_name='autotest'):\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(self.console_handler)\n\n self.__logtofile(log_name)\n\n return logger", "def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger", "def build_logger(self, parent_module):\n self._loggers[parent_module] = logger = logging.getLogger(parent_module)\n return logger", "def init_logger(name, path=None):\n import logging.handlers\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = 0\n _nf = ['[%(asctime)s]',\n '[%(name)s]',\n '[%(filename)20s:%(funcName)15s:%(lineno)5d]',\n '[%(levelname)s]',\n ' %(message)s']\n _cf = ['$GREEN[%(asctime)s]$RESET',\n '[%(name)s]',\n '$BLUE[%(filename)20s:%(funcName)15s:%(lineno)5d]$RESET',\n '[%(levelname)s]',\n ' $CYAN%(message)s$RESET']\n nformatter = logging.Formatter('-'.join(_nf))\n cformatter = ColoredFormatter('-'.join(_cf))\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(cformatter)\n\n if path:\n path += '/' + name + '.log'\n else:\n path = get_path('log') + '/' + name + '.log'\n rf = logging.handlers.RotatingFileHandler(path, maxBytes=5 * 1024 * 1024, backupCount=5)\n rf.setLevel(logging.DEBUG)\n rf.setFormatter(nformatter)\n\n logger.addHandler(ch)\n logger.addHandler(rf)\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'GNN-{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def setup_logger(logger_name, logfile='crawler.log'):\n _logger = logging.getLogger(logger_name)\n _logger.setLevel(logging.INFO)\n h = logging.handlers.RotatingFileHandler(filename=logfile,\n maxBytes=10e6, backupCount=1)\n f = logging.Formatter(\n '%(asctime)s %(processName)-10s %(levelname)-8s %(message)s')\n h.setFormatter(f)\n _logger.addHandler(h)\n return _logger", "def get_logger(name: str):\n # setup logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def setup_logger():\n LOG_DIR = unicode( os.environ.get(u'usep_gh__LOG_DIR') )\n LOG_LEVEL = unicode( os.environ.get(u'usep_gh__LOG_LEVEL') )\n filename = u'%s/usep_gh_handler.log' % LOG_DIR\n formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )\n logger = logging.getLogger( __name__ )\n # logger = logging.getLogger( u'usep_gh_handler' )\n level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }\n logger.setLevel( level_dict[LOG_LEVEL] )\n file_handler = logging.FileHandler( filename )\n file_handler.setFormatter( formatter )\n logger.addHandler( file_handler )\n logger.debug( u'in utils.log_helper.setup_logger(); log initialized at %s' % unicode(datetime.datetime.now()) )\n return logger", "def getLogger(self, *args, **kwargs):\r\n return loggers.getLogger(*args, **kwargs)", "def logger(name=None):\r\n\r\n log = logging.getLogger(name or 'logging')\r\n if HANDLER and HANDLER not in log.handlers:\r\n log.addHandler(HANDLER)\r\n\r\n return log", "def get_logger(name: str, level: str = LOG_LEVEL) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n coloredlogs.install(\n level=level, logger=logger, fmt='%(asctime)s %(name)s: %(lineno)s %(levelname)s: %(message)s', field_styles=FIELD_STYLES\n )\n return logger", "def default_logger_creator(config):\n cfg = config[\"logger_config\"].copy()\n cls = cfg.pop(\"type\")\n # Provide default for logdir, in case the user does\n # not specify this in the \"logger_config\" dict.\n logdir_ = cfg.pop(\"logdir\", logdir)\n return from_config(cls=cls, _args=[cfg], logdir=logdir_)", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(logging_formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def getLogger(\n verbose: int = 0,\n filename: Optional[str] = None,\n name: str = \"ttslearn\",\n add_stream_handler: bool = True,\n) -> Logger:\n global _initialized\n logger = logging.getLogger(name)\n if verbose >= 10:\n logger.setLevel(logging.DEBUG)\n elif verbose > 0:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARN)\n\n if _initialized.get(name, False):\n return logger\n else:\n _initialized[name] = True\n\n if add_stream_handler:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(format))\n logger.addHandler(stream_handler)\n\n if filename is not None:\n Path(filename).parent.mkdir(parents=True, exist_ok=True)\n file_handler = logging.FileHandler(filename=filename)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter(format))\n logger.addHandler(file_handler)\n\n return logger", "def create_logger(level=logging.NOTSET):\n _test = os.path.join(os.path.join(os.getcwd(), 'gnupg'), 'test')\n _now = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n _fn = os.path.join(_test, \"%s_test_gnupg.log\" % _now)\n _fmt = \"%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s\"\n\n ## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module:\n logging.addLevelName(GNUPG_STATUS_LEVEL, \"GNUPG\")\n logging.Logger.status = status\n\n if level > logging.NOTSET:\n logging.basicConfig(level=level, filename=_fn,\n filemode=\"a\", format=_fmt)\n logging.logThreads = True\n if hasattr(logging,'captureWarnings'):\n logging.captureWarnings(True)\n colouriser = _ansistrm.ColorizingStreamHandler\n colouriser.level_map[9] = (None, 'blue', False)\n colouriser.level_map[10] = (None, 'cyan', False)\n handler = colouriser(sys.stderr)\n handler.setLevel(level)\n\n formatr = logging.Formatter(_fmt)\n handler.setFormatter(formatr)\n else:\n handler = NullHandler()\n\n log = logging.getLogger('gnupg')\n log.addHandler(handler)\n log.setLevel(level)\n log.info(\"Log opened: %s UTC\" % datetime.ctime(datetime.utcnow()))\n return log", "def get_logger(level=None, name=None, filename=None, log_dir=None):\n if isinstance(log_dir, str):\n log_dir = Path(log_dir)\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if len(logger.handlers) == 0:\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime(\"%Y_%m_%d\")\n\n if not log_dir:\n log_dir = settings.logs_folder\n\n log_filename = log_dir / \"{}_{}.log\".format(filename, todays_date)\n\n # if the logs folder does not already exist, create it\n if not log_dir.exists():\n log_dir.makedirs_p()\n # create file handler and log formatter and set them up\n formatter = lg.Formatter(\n \"%(asctime)s [%(process)d] %(levelname)s - %(name)s - %(\" \"message)s\"\n )\n if settings.log_file:\n handler = lg.FileHandler(log_filename, encoding=\"utf-8\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if settings.log_console:\n handler = lg.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def getLogger(self):\n logger = logging.getLogger(self.name)\n logger.setLevel(self.level)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # add a rotating handler\n if not logger.handlers:\n handler = RotatingFileHandler(self.path, self.maxBytes, self.backupCount)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n # Log to stream for debugging\n streamHandler = logging.StreamHandler(sys.stdout)\n streamHandler.setFormatter(formatter)\n logger.addHandler(streamHandler)\n\n return logger", "def construct_logger(self,in_logger_file_path):\n if os.path.exists(in_logger_file_path):\n logging.config.fileConfig(in_logger_file_path)\n logger = logging.getLogger(os.path.basename(__file__))\n else:\n # If logger property/configuration file doesn't exist,\n # and logger object will be constructed with default properties.\n logger = logging.getLogger(os.path.basename(__file__))\n logger.setLevel(logging.DEBUG)\n # Create a new logger file\n logger_file_path_object = open(in_logger_file_path, 'a+')\n logger_file_path_object.close()\n # create a file handler\n handler = logging.FileHandler(in_logger_file_path)\n handler.setLevel(logging.INFO)\n # create a logging format\n formatter = logging.Formatter('[%(asctime)s - %(name)s - %(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(handler)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n logger.warning(\"The logger configuration file %s doesn't exist, \"\n \"so logger object will be constructed with default properties.\", in_logger_file_path)\n return logger", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n # Console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)\n\n return logger", "def logger(self) -> Logger:\n logger = getLogger(\"WatchTheDoor\")\n logger.setLevel(INFO)\n return logger", "def logger_initiate():\n logger.setLevel(logging.DEBUG)\n return logging.basicConfig(\n format=(\n '%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s %(message)s'),\n datefmt='%Y-%m-%d %H:%M:%S')", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def getLogger(log_file, level=logging.INFO):\n name = \"new_logger\"\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n stream = logging.StreamHandler()\n stream.setFormatter(formatter)\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n logger.addHandler(stream)\n return logger", "def setup_logger(level):\n logger = loguru.logger\n logger.remove()\n\n # Hearth logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Hearth,\n format=LoggerFormats.Hearth\n )\n\n # Stethoscope logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Stethoscope,\n format=LoggerFormats.Stethoscope\n )\n\n return logger", "def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\n datefmt=\"%m-%d %H:%M:%S\"):\n formatter = logging.Formatter(_format, datefmt)\n logger = logging.getLogger()\n logger.setLevel(level)\n\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\n _dir = os.path.dirname(log_path)\n if not os.path.isdir(_dir):\n os.makedirs(_dir)\n\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def setup_logger(level, name, use_rotating_handler=True):\r\n \r\n logger = logging.getLogger(name)\r\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n logger.setLevel(level)\r\n \r\n log_file_path = os.path.join( os.environ['SPLUNK_HOME'], 'var', 'log', 'splunk', 'radius_auth_rest_handler.log' )\r\n \r\n if use_rotating_handler:\r\n file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=25000000, backupCount=5)\r\n else:\r\n file_handler = logging.FileHandler(log_file_path)\r\n \r\n formatter = logging.Formatter('%(asctime)s %(levelname)s ' + name + ' - %(message)s')\r\n file_handler.setFormatter(formatter)\r\n \r\n logger.addHandler(file_handler)\r\n \r\n return logger", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\r\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\r\n datefmt=\"%m-%d %H:%M:%S\"):\r\n formatter = logging.Formatter(_format, datefmt)\r\n logger = logging.getLogger()\r\n logger.setLevel(level)\r\n\r\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\r\n _dir = os.path.dirname(log_path)\r\n if not os.path.isdir(_dir):\r\n os.makedirs(_dir)\r\n\r\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\r\n handler.setLevel(logging.WARNING)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n handler = logging.StreamHandler()\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n return logger", "def _logger(self):\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger", "def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def get_logger(name, log_dir, config_dir):\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(__lvl__)\n ch = logging.StreamHandler()\n ch.setLevel(__lvl__)\n preformat = f'[{logger.name}]'\n # [%(threadName)s/%(levelname)s] = [MainThread/INFO]\n ch.setFormatter(logging.Formatter(fmt=preformat + ' %(levelname)s [%(asctime)s] %(message)s',\n datefmt='%H:%M:%S'))\n logger.addHandler(ch)\n return logger", "def Logger(name, level=None):\n logger = logging.getLogger(name)\n if level:\n logger.setLevel(level)\n return logger", "def setup_logger(log_file_path =\"\"):\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'purple',\n }\n )\n logging.basicConfig(handlers=[logging.FileHandler(log_file_path, 'w', 'utf-8')],\n format=\"%(message)s\"\n )\n logger = logging.getLogger('')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def logger():\n return logging.getLogger(__name__)", "def setup_logger():\n formatter = ColoredFormatter(\n (\n '%(log_color)s%(levelname)-5s%(reset)s '\n '%(yellow)s[%(asctime)s]%(reset)s'\n '%(green)s %(name)s %(purple)s %(filename)s %(purple)s %(funcName)s %(purple)s:%(lineno)d%(reset)s '\n '%(bold_blue)s%(message)s%(reset)s'\n ),\n datefmt='%y-%m-%d %H;%M:%S',\n log_colors={\n 'DEBUG': 'blue',\n 'INFO': 'yellow',\n 'WARNING': 'red',\n 'ERROR': 'blue,bg_bold_red',\n 'CRITICAL': 'red,bg_white',\n }\n )\n\n logger = logging.getLogger('shen-yue-is-beautiful')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def get_logger(name, filename=None, stream_loglevel=\"INFO\", file_loglevel=\"DEBUG\"):\n if name in loggers:\n return loggers[name]\n logger = logging.getLogger(name)\n logger.propagate = False\n\n with_color = supports_color()\n\n pre1, suf1 = hash_coloured_escapes(name) if with_color else (\"\", \"\")\n pre2, suf2 = hash_coloured_escapes(name + \"salt\") if with_color else (\"\", \"\")\n formatter = logging.Formatter(\n \"%(asctime)s %(levelname)s {}+{}+{} \"\n \"%(name)s: %(message)s\".format(pre1, pre2, suf1),\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n if filename is not None:\n ch_file = logging.handlers.RotatingFileHandler(\n filename, maxBytes=5 * 1024 * 1024, backupCount=10\n )\n ch_file.setLevel(file_loglevel)\n ch_file.setFormatter(formatter)\n logger.addHandler(ch_file)\n ch = logging.StreamHandler()\n ch.setLevel(stream_loglevel)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n loggers[name] = logger\n\n logger.once_dict = {}\n\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def make_default_logger(file_path=LOG_FILENAME):\n logger = logging.getLogger(\"Logger\")\n if not len(logger.handlers):\n logger.setLevel(logging.DEBUG)\n # Create a handler and attach it to the logger\n try:\n handler = logging.handlers.RotatingFileHandler(\n file_path, maxBytes=5120000, backupCount=7\n )\n except OSError as e:\n if e.errno == 2:\n errprint(\n \"\\nWarning: %s: %s. \"\n \"Have you created the directory for the log?\"\n % (\n e.strerror,\n file_path,\n )\n )\n elif e.errno == 13:\n errprint(\n \"\\nWarning: %s: %s. \"\n \"Cannot access file as user: %s\"\n % (\n e.strerror,\n file_path,\n getpass.getuser(),\n )\n )\n else:\n errprint(\n \"\\nIOError [%s]: %s\\n%s\"\n % (e.errno, e.strerror, traceback.format_exc())\n )\n errprint(\n \"Juriscraper will continue to run, and all logs will be \"\n \"sent to stderr.\"\n )\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s: %(message)s\")\n )\n logger.addHandler(handler)\n return logger", "def get_logger(log_file):\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n log_file, when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def start_logger(app_name, calling_function):\n # Create logs directory if not present\n Path('logs').mkdir(parents=True, exist_ok=True)\n\n # Start logger\n logger.start_logger(app_name)\n\n module_logger = logging.getLogger('{app_name}.{calling_function}'.\\\n format(app_name=app_name, calling_function=calling_function))\n\n return module_logger", "def __CreateLog(self, log_name, log_level=NOTSET, log_handler=FILE,\n stream=sys.stderr):\n logger = logging.getLogger(log_name)\n\n # Update log level to reflect changes. If a higher log level is given\n # the logger should raise it's boundary.\n if log_level < logger.level or logger.level == logging.NOTSET:\n logger.setLevel(log_level)\n\n if (log_name in self.__log_table and\n self.__log_table[log_name] == Logger.FILE_AND_CONSOLE):\n # Don't add any more handlers.\n return\n\n # Create an entry for log name.\n if log_name not in self.__log_table:\n self.__log_table[log_name] = Logger.NONE\n\n if log_handler != Logger.NONE:\n fmt = ('[%(asctime)s::%(levelname)s::' + self.__lib_sig +\n '] %(message)s')\n # Add FILE handler if needed.\n if (log_handler == Logger.FILE or\n log_handler == Logger.FILE_AND_CONSOLE and\n self.__log_table[log_name] != Logger.FILE):\n if not os.path.exists(self.__log_path):\n os.makedirs(self.__log_path)\n fh = logging.FileHandler(os.path.join(self.__log_path,\n '%s.log' % log_name))\n fh.setLevel(log_level)\n fh.setFormatter(logging.Formatter(fmt))\n logger.addHandler(fh)\n # Binary arithmetic to yield updated handler.\n self.__log_table[log_name] = self.__log_table[log_name] + Logger.FILE\n\n # Add CONSOLE handler if needed.\n if (log_handler == Logger.CONSOLE or\n log_handler == Logger.FILE_AND_CONSOLE and\n self.__log_table[log_name] != Logger.CONSOLE):\n ch = logging.StreamHandler(stream)\n ch.setLevel(log_level)\n ch.setFormatter(logging.Formatter(fmt))\n logger.addHandler(ch)\n # Binary arithmetic to yield updated handler.\n self.__log_table[log_name] = self.__log_table[log_name] + Logger.CONSOLE", "def make_logger(name=str(os.getpid())):\n if not sys.platform.startswith(\"win\") and sys.stderr.isatty():\n def add_color_emit_ansi(fn):\n \"\"\"Add methods we need to the class.\"\"\"\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new\n log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit)\n log_file = os.path.join(gettempdir(), str(name).lower().strip() + \".log\")\n log.basicConfig(level=-1, filemode=\"w\", filename=log_file)\n log.getLogger().addHandler(log.StreamHandler(sys.stderr))\n adrs = \"/dev/log\" if sys.platform.startswith(\"lin\") else \"/var/run/syslog\"\n try:\n handler = log.handlers.SysLogHandler(address=adrs)\n except:\n log.debug(\"Unix SysLog Server not found, ignored Logging to SysLog.\")\n else:\n log.getLogger().addHandler(handler)\n log.debug(\"Logger created with Log file at: {0}.\".format(log_file))\n return log", "def get_logger(level=logging.INFO, quite=False, debug=False, to_file=''):\n assert level in [logging.DEBUG, logging.INFO, logging.WARNING, logging.CRITICAL]\n logger = logging.getLogger('main')\n formatter = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')\n if debug:\n level = logging.DEBUG\n logger.setLevel(level=level)\n if not quite:\n if to_file:\n fh = logging.FileHandler(to_file)\n fh.setLevel(level=level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level=level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(name, level='INFO', terminal_log=True, file_log=False,\n file_name=None, file_max_bytes=1048576, file_backup_count=3,\n email_on_warnings=True, email_on_errors=True):\n # Get the root logger and set the level\n log_level = getattr(logging, level.upper())\n root_logger = logging.getLogger('')\n root_logger.setLevel(log_level)\n\n handlers = []\n # Form the handler(s) and set the level\n if terminal_log:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(log_level)\n handlers.append(stream_handler)\n\n # Create email warning handler\n if email_on_warnings:\n # Note, the placeholder in the subject will be replaced by the hostname\n warning_email_handler = CustomSMTPWarningHandler(\n mailhost=MAIL_HOST, fromaddr=WARNING_EMAIL,\n toaddrs=[WARNING_EMAIL], subject='Warning from: {}')\n warning_email_handler.setLevel(logging.WARNING)\n handlers.append(warning_email_handler)\n\n # Create email error handler\n if email_on_errors:\n # Note, the placeholder in the subject will be replaced by the hostname\n error_email_handler = CustomSMTPHandler(\n mailhost=MAIL_HOST, fromaddr=ERROR_EMAIL,\n toaddrs=[ERROR_EMAIL], subject='Error from: {}')\n error_email_handler.setLevel(logging.ERROR)\n handlers.append(error_email_handler)\n\n # Create rotating file handler\n if file_log:\n if file_name is None:\n file_name = name + '.log'\n file_handler = RotatingFileHandler(file_name, maxBytes=file_max_bytes,\n backupCount=file_backup_count)\n file_handler.setLevel(log_level)\n handlers.append(file_handler)\n\n # Add formatters to the handlers and add the handlers to the root_logger\n formatter = logging.Formatter(\n '%(asctime)s:%(name)s: %(levelname)s: %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Create a named logger and return it\n logger = logging.getLogger(name)\n return logger", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def get_logger(name):\n filename = \"file_sync.log\"\n _create_log_dir()\n filepath = os.path.join(FLASK_APP.config[\"LOG_DIR\"], filename)\n logger = logging.getLogger(name)\n handler = TimedRotatingFileHandler(filepath, when=\"midnight\")\n logger.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n handler.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n log_format = (\"%(asctime)s %(levelname)s %(pathname)s\"\n \":%(funcName)s: %(lineno)d - %(message)s\")\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def __setup_logger(name, log_file, level=logging.WARNING, stream=True):\n log_format = logging.Formatter(\"%(asctime)s%(filename)s:%(lineno)-3d %(levelname)s %(message)s\")\n handler = logging.FileHandler(log_file)\n handler.setFormatter(log_format)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n if stream is True:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_format)\n logger.addHandler(stream_handler)\n return logger", "def init_logger(self, logger_path,\n logger_name='Experiment') -> logging.Logger:\n self.logger = logging.getLogger(logger_name)\n\n self.logger.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(logger_path) # TOD bug here\n formatter = logging.Formatter('%(asctime)s||%(message)s')\n file_handler.setFormatter(formatter)\n self.logger.addHandler(file_handler)" ]
[ "0.82856834", "0.7896472", "0.77731115", "0.7539057", "0.7495882", "0.7493682", "0.7396028", "0.73747236", "0.7198107", "0.71252424", "0.71039444", "0.7093442", "0.7016825", "0.69982064", "0.6978802", "0.69182163", "0.6911099", "0.6907273", "0.6900183", "0.6848638", "0.6835359", "0.68348426", "0.6834405", "0.6825177", "0.6819852", "0.6809382", "0.68091196", "0.67921066", "0.67820925", "0.678005", "0.67744654", "0.66990095", "0.6695709", "0.66901445", "0.6690004", "0.66721797", "0.6666718", "0.665656", "0.6643656", "0.66378945", "0.66354424", "0.66354424", "0.66354424", "0.66354424", "0.6631505", "0.6624898", "0.6622792", "0.6616053", "0.660862", "0.6594702", "0.6592152", "0.6591533", "0.6587552", "0.6584888", "0.6584398", "0.65825605", "0.6582057", "0.6582034", "0.6579204", "0.6578505", "0.6578289", "0.65697604", "0.6567522", "0.6567368", "0.6560785", "0.6559942", "0.65598905", "0.6542689", "0.65370536", "0.653086", "0.6523016", "0.65110755", "0.65092796", "0.6504975", "0.6501715", "0.6501306", "0.6499509", "0.6488695", "0.64831364", "0.64814365", "0.6475978", "0.64686733", "0.64664185", "0.64651394", "0.64401597", "0.64376926", "0.642411", "0.6408168", "0.64028454", "0.6401269", "0.64004594", "0.6395406", "0.63914806", "0.6389481", "0.63886017", "0.6383294", "0.6377627", "0.6364525", "0.63520616", "0.6340461", "0.63338685" ]
0.0
-1
retorna o valor de graus Farenheit convertido para Celsius
def toCelsius(farenheit): return (farenheit - 32)*5 / 9
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_f_to_c(temp_in_farenheit): ## ##\n celsiustemp = round((temp_in_farenheit - 32) * 5/9, 1) ##\n return celsiustemp ##", "def convert_f_to_c(temp_in_farenheit):\n celcius_temp = round(float((temp_in_farenheit) - 32)*(5/9),1)\n return(celcius_temp)", "def convert_f_to_c(temp_in_farenheit):\n temp_in_celcius = ((temp_in_farenheit - 32) * 5) / 9\n temp_in_celcius = round(temp_in_celcius, 1)\n return temp_in_celcius", "def convert_f_to_c(temp_in_farenheit):\n cel = round((((temp_in_farenheit - 32) * 5) / 9),1)\n return cel", "def fahr_to_celsius(temp):\n tempInCel = (temp - 32) * 5/9\n return tempInCel", "def convert_f_to_c(temp_in_farenheit):\n \n temp=round((float(temp_in_farenheit)-32)*5/9,1)\n \n return (temp)", "def temperature() -> float:", "def fahrenheit_to_celsius():\n fahrenheit = ent_temperature.get()\n celsius = (5 / 9) * (float(fahrenheit) - 32)\n lbl_result[\"text\"] = f\"{round(celsius, 2)} \\N{DEGREE CELSIUS}\"", "def fahrenheit_to_celsius(temp):\n return (temp - 32) * 5/9", "def fahr_to_celcius(temp_fahr):\n temp_celcius = (temp_fahr - 32) * 5/9\n return temp_celcius", "def celsius(fahrenheit):\n return 5 / 9 * (fahrenheit - 32)", "def tempConvert(temp, unit):\n if unit == 'F':\n celsius = (temp - 32) * 5 / 9\n return celsius\n else:\n return temp", "def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius", "def fahrenheit_to_celsius(fahrenheit):\n offset = 32\n multiplier = 5 / 9\n celsius = (fahrenheit - offset) * multiplier\n print(\"inside function:\", fahrenheit, offset, multiplier, celsius)\n return celsius", "def celsius_to_fahr(temp):\n return temp * (9/5) + 32", "def fahrenheit(celsius):\n return 9 / 5 * celsius + 32", "def GetFahrenheit(self):\n return self.GetCelcius()*1.8+32", "def cels_fahr(cels):\n temp = cels * 9.0 / 5 + 32\n return temp", "async def c(self, f : float):\n c = (f-32) * 5/9\n await self.bot.say(\"{0} Celsius\".format(c))", "def convert_to_celsius(fahrenheit):\n return (fahrenheit - 32) * 5 / 9", "def toCelcius (x):\r\n\r\n\tc = x-32\r\n\tc = 5*c/9\r\n\treturn c", "def translate_from_farenheit_to_celsius(farenheit: float) -> float:\n return (farenheit - 32) * 5./9.", "def fahr_to_celsius(fahr):\n result_in_celsius = (fahr - 32) + 5/9\n return result_in_celsius", "def celsius_conv(self, f):\n if f == 0:\n return -17.7778\n else:\n return (f - 32.0) * (5.0 / 9.0)", "def cels_to_fahr():\n while True:\n celsius = input(\"Podaj temperaturę w stopniach Celsjusza: \")\n try:\n int(celsius)\n break\n except ValueError:\n try:\n float(celsius)\n break\n except ValueError:\n print(\"Nieprawidłowe dane, podaj temperaturę jako wartość liczbową.\")\n print('''Wzór na przeliczanie stopni Celsjusza na stopnie Fahrenheita:\n [\\u00b0F] = [\\u00b0C] * 9/5 + 32''')\n print(\"Podana temperatura przeliczona na stopnie Fahnrenheita: \", end=\"\")\n print(float(celsius) * 9 / 5 + 32)", "def f2c_qa_function():\n F = float(input(\"Provide a Fahrenheit temperature in degrees: \"))\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def kelvin_to_celsius(temp):\n return temp - 273.15", "def fahrenheit(celsius):\n return ((celsius/5)*9)+32", "def convert_to_celsius(self):\n try:\n self.root.ids.fahrenheit_input.hint_text = 'Enter amount in Fahrenheit'\n self.root.ids.celsius_input.text = '{:.2f}'.format((float(self.root.ids.fahrenheit_input.text) - 32)\n * 5 / 9)\n except ValueError:\n self.root.ids.fahrenheit_input.text = ''\n self.root.ids.fahrenheit_input.hint_text = 'Invalid number'", "def fahrenheitToCelcius(fahrenheit:float, ndigits = 2)->float:\n return round((float(fahrenheit) - 32) * 5 / 9, ndigits)", "def tempAir(sample):\n sample *= 1.0\n sample /= 1000\n celsius = (sample - 0.5) * 100\n return round(celsius,2)", "def c_to_f(celsius):\n fahrenheit = round((celsius * 1.8) + 32, 2)\n return fahrenheit", "def _celsius_to_fahrenheit(self) -> None:\n if self.units == \"celsius\":\n self.value = (((self.value / 5) * 9) + 32).__round__(2)\n self.units = \"fahrenheit\"\n else:\n msg = (\n \"Not a valid unit conversion, expected units to be in 'celsius' but instead \"\n + f\"units were in {self.units}.\"\n )\n raise ValueError(msg)", "def fahrenheit(T_in_celsius):\n return (T_in_celsius * 9 / 5) + 32", "def celciusToFahrenheit(celcius: float, ndigits: int = 2)->float:\n return round((float(celcius) *9 / 5) + 32 , ndigits)", "def target_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_c)", "def CtoF (celsius):\n f=(1.8)*celsius+32\n return int(round(f))", "def temperature(self) -> float:\n # Start a measurement then poll the measurement finished bit.\n self.temp_start = 1\n while self.temp_running > 0:\n pass\n # Grab the temperature value and convert it to Celsius.\n # This uses the same observed value formula from the Radiohead library.\n temp = self._read_u8(_REG_TEMP2)\n return 166.0 - temp", "def fahrenheit(self):\n return (self.celsius * 9 / 5) + 32", "def _calculate_temp_in_c(temp):\r\n return str((temp * 9 / 5.0 + 32) if temp else \"\")", "def target_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_c\"))\r\n return kelvin_to_celsius(self._target_temperature)", "def celsius_to_fahrenheit(celsius):\n fahrenheit = (celsius * (9.0/5.0)) + 32.0\n return fahrenheit", "def celcius_to_fahrenheit(celcius_float):\n return celcius_float * 1.8 + 32", "def eco_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_low_c)", "def get_temperature(self): # This function implements the equations needed to convert the digital data to degrees celsius\n C_1, C_2, C_3, C_4, C_5, C_6=self.calibration_constants()\n self.digital_temp_data() \n dT = self.tempadc-(C_5*(2**8))\n temperature=(2000+(dT*(C_6/(2**23))))/100\n return temperature, dT", "def c2f(t):\r\n return round(9*t/5 + 32)", "def convert(temp_in_c):\n \n return temp_in_c * (9/5) + 32", "def GetCelcius(self):\n ADCMax = (self.ADDevice.ADSamples * 1023) /(2**self.ADDevice.ADBitshift)\n sample=self.Get()\n R = self.RefVoltage / ADCMax\n Volt = sample*R-.5 \n return Volt/self.VoltPerDegree", "def convert_to_fahrenheit(self):\n try:\n self.root.ids.celsius_input.hint_text = 'Enter amount in Celsius'\n self.root.ids.fahrenheit_input.text = '{:.2f}'.format(float(self.root.ids.celsius_input.text)\n * 9.0 / 5 + 32)\n except ValueError:\n self.root.ids.celsius_input.text = ''\n self.root.ids.celsius_input.hint_text = 'Invalid number'", "def eco_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_low_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_low)", "def farenheit(ctemp):\n return round(9.0/5.0 * ctemp + 32)", "def get_temp(val):\n if val in ['', 32767]:\n return None\n return temperature(val / 100., 'C').value('F')", "def eco_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_f\"))\r\n return celsius_to_fahrenheit(self.eco_temperature_high_c)", "def convert_c_to_f(temp_c):\n try:\n temp_f = (temp_c * 1.8) + 32\n temp_f = round(temp_f, 2)\n except TypeError:\n temp_f = False\n return temp_f", "def to_fahrenheit(celsius):\n\n return (1.8*celsius) + 32", "def convertCelsiusToFahrenhe(C):\n if isinstance(C, str) == True:\n raise ValueError(\"Celsius cannot be a string value\")\n if isinstance(C,complex) == True:\n raise ValueError(\"Celsius cannot be a complex value\")\n if isinstance(C,int) == True:\n raise ValueError(\"Celsius should be a float value, example: 90.00\")\n \n F = (9.0/5.0 * C + 32.0)\n return F", "def temperature(self):\r\n self._read_temperature()\r\n return self._t_fine / 5120.0", "def convertFarenheitToCelsius(F):\n if isinstance(F, str) == True:\n raise ValueError(\"Farenheit cannot be a string value\")\n if isinstance(F,complex) == True:\n raise ValueError(\"Farenheit cannot be a complex value\")\n if isinstance(F,int) == True:\n raise ValueError(\"Farenheit should be a float value, example: 120.50\")\n \n C = (F-32)/1.8\n return C", "def convert_temperature(self, event):\n try:\n #Compare other unit to one unit(celsius) then compare that unit to celsius\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Celsius\": current_value * 1.0, \"Fahrenheit\": (current_value - 32) / 1.8, \"Kelvin\": current_value - 273.15, \"Reaumur\": current_value / 0.8, \"Rankine\": (current_value - 491.67) / 1.8, \"Newton\": current_value / 0.33, \"Romer\": (current_value - 7.5) / 0.525, \"Delisle\": 100 - current_value * 0.66666667}\n new_value={\"Celsius\": unit_comp[current_unit], \"Fahrenheit\": unit_comp[current_unit] * 1.8 + 32, \"Kelvin\": unit_comp[current_unit] + 273.15, \"Reaumur\": unit_comp[current_unit] * 0.8, \"Rankine\": unit_comp[current_unit] * 1.8 + 491.67, \"Newton\": unit_comp[current_unit] * 0.33, \"Romer\": unit_comp[current_unit] * 0.525 + 7.5, \"Delisle\": (100 - unit_comp[current_unit]) * 1.5}\n printer = \"Value is invalid.\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(new_value[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def eco_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"eco_temperature_high_c\"))\r\n return kelvin_to_celsius(self._eco_temperature_high)", "def convert_celsius_to_fahrenheit(celsius):\n return celsius * 9.0 / 5 + 32", "def temperature_unit(self):\n return TEMP_FAHRENHEIT", "def temperature(self):\n return self.read_short(65) / 340.0 + 36.53", "def conversion(temp, mode):\n if mode == 1:\n c_to_f = (temp * 9/5) + 32\n return c_to_f\n else:\n f_to_c = (temp - 32) * 5 / 9\n return f_to_c", "def kelvinToCelcius(kelvin:float, ndigits = 2)->float:\n return round(float(kelvin) - 273.15, ndigits)", "def ambient_temperature_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_c\"))\r\n return kelvin_to_celsius(self._ambient_temperature)", "def f2c(t):\r\n return round(5/9 * (t-32))", "def kelvin_to_fahr(temp):\n temp_c = kelvin_to_celsius(temp)\n result = celsius_to_fahr(temp_c)\n return result", "def celsius_to_fahr(degrees_celsius: float) -> float:\n return (degrees_celsius * 9.0 / 5.0) + 32.0", "def cu_energy(self,val,units=\"1/cm\"):\n if units in self.units[\"energy\"]:\n x = conversion_facs_energy[units]\n i_val = x*val\n \n cu = self.current_units[\"energy\"] \n if cu != \"1/fs\":\n y = conversion_facs_energy[units] \n return i_val/y\n \n return i_val", "def temperature_unit(self):\n return TEMP_CELSIUS", "def temperature_unit(self):\n return TEMP_CELSIUS", "def temperature_unit(self):\n return TEMP_CELSIUS", "def temperature_unit(self):\n return TEMP_CELSIUS", "def temperature_unit(self):\n return TEMP_CELSIUS", "def temperature_unit(self):\n return TEMP_CELSIUS", "def temperature_unit(self):\n return TEMP_CELSIUS", "def ctof(temp):\n return temp * 9/5 + 32 # functions should be surrounded by 2 blank lines", "def ambient_temperature_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"ambient_temperature_f\"))\r\n return celsius_to_fahrenheit(self.ambient_temperature_c)", "def fahrenheitToKelvin(fahrenheit:float, ndigits = 2)->float:\n return round(((float(fahrenheit) - 32) * 5 / 9) + 273.5, ndigits)", "def convert(df,celsius):\r\n converted_temp=(df[celsius]*(9/5))+32\r\n return converted_temp", "def celsius(self):\n return self._value", "def target_temperature_high_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperatue_high_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_high_c)", "def celcius_2_kelvin(x):\n return x + 273.15", "def temperature_f(self, tuple_data, status):\r\n fahr_search = Temperature.fahr.search(status)\r\n temperature = None\r\n try:\r\n if fahr_search != None:\r\n temperature = fahr_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n else:\r\n celcius_search = Temperature.celcius.search(status)\r\n if celcius_search != None:\r\n temperature = celcius_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n temperature = ((9.0/5) * temperature) + 32\r\n except ValueError:\r\n print \"Encoding error on '%s'\" % (status)\r\n return temperature", "def target_temperature_low_f(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_f\"))\r\n return celsius_to_fahrenheit(self.target_temperature_low_c)", "def _temperature(self, p_input:float) -> float:\n if self._unit_in == 'R':\n temp_K = p_input*5.0/9.0\n elif self._unit_in == 'F':\n temp_K = (p_input+459.67)/9.0*5.0\n elif self._unit_in == 'C':\n temp_K = p_input+273.15\n elif self._unit_in == 'K':\n temp_K = p_input\n \n if self._unit_out == 'R':\n return (temp_K*9.0/5.0)\n elif self._unit_out == 'F':\n return (temp_K*9.0/5.0-459.67) \n elif self._unit_out == 'C':\n return (temp_K-273.15)\n elif self._unit_out == 'K':\n return temp_K", "def temperature_unit(self) -> str:\n return TEMP_CELSIUS", "def target_temperature_low_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_low_c\"))\r\n return kelvin_to_celsius(self._target_temperature_low)", "def kelvin_to_fahrenheit(kelvin_temp):\n\n\treturn math.floor(9/5 * (kelvin_temp - 273) + 32)", "def temperature(self):\n return float(self._current_observation['temp_c'])", "def read_object_temperatureF(self, ):\n return self.read_object_temperatureC() * (9.0/5.0) + 32.0", "def calculateTemperature(self):\n \n # CIE XYZ space\n self.X = (1/0.17697)*((0.49)*self.R + (0.31)*self.G + (0.2)*self.B)\n self.Y = (1/0.17697)*((0.17697)*self.R + (0.81240)*self.G + (0.01063)*self.B)\n self.Z = (1/0.17697)*((0)*self.R + (0.010)*self.G + (0.99)*self.B)\n\n # CIE Chromaticities xy\n self.x = self.X/(self.X + self.Y + self.Z)\n self.y = self.Y/(self.X + self.Y + self.Z)\n \n # CIE Chromaticities uv\n #self.u = (0.4661*self.x + 0.1593*self.y)/(self.y - 0.15735*self.x + 0.2424)\n #self.v = (0.6581*self.y)/(self.y - 0.15735*self.x + 0.2424)\n \n # constant for McCamy's/Hernandez-Andrés formula\n n = (self.x - self.x_e)/(self.y - self.y_e)\n \n # Correlated color temperature according to Hernández-Andrés (1999)\n self.color_temp = ( self.A_0 + \n self.A_1*np.exp(-n/self.t_1) + \n self.A_2*np.exp(-n/self.t_2) + \n self.A_3*np.exp(-n/self.t_3) )\n \n # Delete too high values\n self.color_temp[self.color_temp > 30000] = 0\n \n # Affichage de la CCT\n self.mean_temp = int(round(self.color_temp.mean()))\n self.mean_temp_label.setText(\"Temperature moyenne = \"+str(self.mean_temp))\n self.mean_temp_label.adjustSize()\n \t\n # Affichage de l'illuminance (Y)\n self.mean_illu = int(round((self.Y.mean())))\n self.illuminance_label.setText(\"Illuminance moyenne = \"+str(self.mean_illu))\n self.illuminance_label.adjustSize()", "def f2c_cml_function():\n import sys\n\n F = float(sys.argv[1])\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def convert_temp(self, temperature):\n return 1.8 * (temperature - 273) + 32", "def locked_temp_min_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_min_c\"))\r\n return kelvin_to_celsius(self._locked_temp_min)", "def degc_to_kelvin(x):\r\n return x + 273.15", "def target_temperature_high_c(self) -> float:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"target_temperature_high_c\"))\r\n return kelvin_to_celsius(self._target_temperature_high)", "def unit_of_measurement(self):\n return TEMP_CELCIUS", "def convertCelsiusToFahrenheit(degrees):\n degrees = str(degrees)\n convert = (decimal.Decimal(degrees) / decimal.Decimal('5') * 9) + 32\n return float(convert)" ]
[ "0.8010471", "0.7904015", "0.7749893", "0.7748052", "0.77476305", "0.77396774", "0.76843536", "0.7657182", "0.76182777", "0.7557405", "0.74652946", "0.72664905", "0.7263454", "0.7263454", "0.7261374", "0.72476894", "0.7239142", "0.72153395", "0.7209384", "0.7199643", "0.7175355", "0.7162034", "0.7154658", "0.7147282", "0.71151745", "0.70987517", "0.7053965", "0.70083064", "0.700558", "0.6982957", "0.6981367", "0.69684786", "0.6961572", "0.69499016", "0.69443196", "0.6916001", "0.68968564", "0.68962073", "0.68711567", "0.6831326", "0.6810449", "0.6808604", "0.67843974", "0.6777066", "0.6761134", "0.67477846", "0.6736599", "0.67267615", "0.6720862", "0.67073286", "0.6700202", "0.66992635", "0.66739964", "0.66685766", "0.66563916", "0.665091", "0.66460514", "0.66429204", "0.65956753", "0.65946835", "0.65875953", "0.6586171", "0.6577967", "0.65728134", "0.65390694", "0.6531973", "0.6525116", "0.6517951", "0.65020937", "0.64879143", "0.64874405", "0.64874405", "0.64874405", "0.64874405", "0.64874405", "0.64874405", "0.64874405", "0.6463782", "0.64636755", "0.64306253", "0.6428252", "0.64260596", "0.6417062", "0.641526", "0.6408631", "0.6404456", "0.64039415", "0.6381438", "0.63737154", "0.6355811", "0.63506603", "0.6349255", "0.6325552", "0.6321395", "0.63189137", "0.6317488", "0.6314238", "0.6308162", "0.62945455", "0.6277615" ]
0.8065382
0
Return github API URL as string
def get_api_url(self): url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \ self.repo, self.product) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def github_url(self):\n return self.github.replace('.git', '')", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def repo_link(repo):\n return \"https://github.com/\" + repo", "async def _api_url(self) -> URL:\n return await self._gitlab_api_url(\"\")", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def get_api_url() -> str:\n\n site = pywikibot.Site()\n url = site.protocol() + \"://\" + site.hostname() + site.apipath()\n return url", "def getProjectURL():", "def get_api_url() -> str:\n\n\tsite = pywikibot.Site()\n\turl = site.protocol() + \"://\" + site.hostname() + site.apipath()\n\treturn url", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def repo_url(self):\n return self._repo_url", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def api_url(self, command: str) -> str:\n base_url = self.base_url\n path = \"/\".join(x for x in f\"{base_url.path}/api/v2\".split(\"/\") if x != \"\")\n return URL.build(\n scheme=base_url.scheme,\n host=base_url.host,\n port=base_url.port,\n path=f\"/{path}\",\n query={\"apikey\": self.api_token, \"cmd\": command},\n ).human_repr()", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def git_remote_url(self):\n return self._git_remote_url", "def api_url(self):\n return self.get_api_url()", "def _get_api_url(self):\n return \"%s/%s/\" % (settings.API_URL, settings.API_VERSION)", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_api_url(settings: Settings) -> str:\n return _get_control(settings) \\\n .get('provider', {}).get('arguments', {}) \\\n .get('api_url', '')", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def url(self, api_name):\n return \"https://%s/api/%s/%s/\" % (self.host, self.api_version, api_name)", "def api_url(url_base):\n return f\"{url_base}/api/v2\"", "def api_url(self):\n return self._api_url", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def GetGerritFetchUrl(host):\n return 'https://%s/' % host", "def api_url(self) -> httpx.URL:\n return self._client.base_url", "def BASE_URL():\n BASE_URL = \"http://api.zippopotam.us/\"\n return BASE_URL", "def url(self):\n _, body = self.request('/v1.1/url', 'GET')\n return body.get('url', None)", "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def apiurl(self):\n return self._apiurl", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def get_api_url(self, query_, api):\n api_url = \"%s%s%s\" % (api, query_, self.api_key)\n\n return api_url", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth=requests.auth.HTTPBasicAuth(self.credentials['username'], self.credentials['token']),\n params=params)\n return res.json()", "def getBuildbotURL():", "def apiurl (self):\n return Links.createAPIURL (\n '/boards.json'\n )", "def Url(self) -> str:", "def __http_build_url(self, url_path):\n\n return '{}://{}{}'.format(_GOVEE_API_PROTOCOL, _GOVEE_API_HOST, url_path)", "def github(code, input):\n syntax = 'Syntax: \\'.github <user|user/repo>\\''\n failed = 'Failed to get data from Githubs API :('\n if len(input.group(2).strip().split()) != 1:\n return code.say(syntax)\n\n spacer = ' {blue}|{c} '\n\n if '/' not in input.group(2):\n # Assume a single username\n try:\n tmp = web.json(user_api % input.group(2).strip())\n response = {}\n # Remove dem ugly nulled values. It's a dictionary so we have to\n # loop differently.\n for key, value in tmp.iteritems():\n if value != '' or len(value) != 0 or value != 'null':\n response[key] = value\n print response\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n\n # Here is where we build the response\n output = []\n if 'name' in response:\n output.append('%s (%s)' % (response['name'], response['login']))\n else:\n output.append(response['login'])\n if 'location' in response:\n output.append(response['location'])\n if 'email' in response:\n output.append(response['email'])\n if 'public_repos' in response:\n output.append('%s Repos' % response['public_repos'])\n if 'followers' in response:\n output.append('%s Followers' % response['followers'])\n if 'following' in response:\n output.append('Following %s' % response['following'])\n if 'public_gists' in response:\n output.append('%s Gists' % response['public_gists'])\n if 'html_url' in response:\n output.append(response['html_url'])\n\n return code.say(spacer.join(output))\n\n else:\n # Assume Username/Repo\n try:\n response = jweb.json(repo_api % input.group(2).strip())\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n # Here is where we build the response\n output = []\n output.append('%s (%s)' %\n (response['name'], response['owner']['login']))\n output.append(response['description'])\n output.append('%s %s' % (response['stargazers_count'], u'\\u2605'))\n output.append('%s %s' % (response['watchers_count'], u'\\u231A'))\n output.append('%s %s' % (response['forks_count'], u'\\u2442'))\n output.append('%s %s' % (response['open_issues_count'], u'\\u2602'))\n output.append('%s %s' % (response['network_count'], u'\\U0001F46C'))\n output.append('%s %s' % (response['subscribers_count'], u'\\u2764'))\n output.append(response['html_url'])\n return code.say(spacer.join(output))", "def build_api_url(project, method, base_url):\n return API_URL_TEMPLATE.format(\n api_base=base_url, api_version=API_VERSION, project=project, method=method\n )", "def get_base_url(self):\n try:\n return self.get_metadata()['api_endpoint']\n except requests.exceptions.RequestException:\n raise", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def scm_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"scm_url\")", "def api_url(self, endpoint):\n\n return '{}/{}'.format(self.api_root, endpoint)", "def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']", "def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url", "def _build_url(self):\n url = BASE_URL.format(self._host, self._port)\n _LOGGER.debug(\"TOON fetch URL: %s\", url)\n return url", "def __str__(self):\n return repr(self.api_url)", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_repository_uri(self) -> str:\n raise NotImplementedError", "def getAPI(self):\n return self.api_url", "def svn_info_t_repos_root_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]", "def url(self) -> str:\n return self.url_as()", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def url():\n url = \"https://pypi.python.org/pypi/{package}/json\"\n\n return url", "def get_contribs_api_base_url(request_url=None, deployment=\"contribs\"):\n if is_localhost() and SETTINGS.API_EXTERNAL_ENDPOINT:\n return f\"https://{deployment}-api.materialsproject.org\"\n\n if has_request_context() and (not request_url):\n request_url = request.url\n\n return parse_request_url(request_url, f\"{deployment}-api\")", "def url(self):\n return self._client.url", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")", "def api_base_url(self):\n\n\t\treturn self._api_base_url", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")", "def url(vmanage_host,vmanage_port,api):\r\n \"\"\" function to get the url provide api endpoint \"\"\"\r\n \r\n return f\"https://{vmanage_host}:{vmanage_port}{api}\"", "def format_url(endpoint, cmd):\n url = base_url + endpoint + cmd + '&key=' + bart_api_key + json\n return url", "def api_url(host = None,\n path = None,\n port = None,\n protocol = None\n ):\n\n host = api_local_host() if host is None else str(host)\n # Force the host into something valid for DNS\n # See http://stackoverflow.com/a/25103444/180674\n try:\n host = host.encode('idna').decode(\"ascii\")\n except UnicodeError:\n raise ValueError(\"Invalid host '%s'\" % (host))\n host = __host_per_rfc_2732(host)\n\n if path is not None and path.startswith('/'):\n path = path[1:]\n\n if protocol is None:\n protocol = 'http'\n\n return protocol + '://' \\\n + host \\\n + ('' if port is None else (':' + str(port))) \\\n + api_root() \\\n + ('' if path is None else '/' + str(path))", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def githubclient(token):\n return github.Github(token)", "def mock_github_get(url):\n mock_repo_key = url.split(\"/\")[-1]\n\n result = requests.Response()\n result.status_code = 200\n result.encoding = \"utf-8\"\n result._content = repos[mock_repo_key].encode()\n\n return result", "def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name", "def get_pypi_url(requirement, version=None, base_url=PYPI_BASE_URL):\n return \"{base}/{req}/json\".format(base=base_url, req=requirement, version=version)", "def get_url():\n key = _get_key()\n return key.generate_url(300)", "async def github(self, ctx):\n await ctx.send('https://github.com/nick411077/nickcan_bot')", "def build_bitbucket_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://bitbucket.org/{namespace}/{name}\".format(\n namespace=namespace, name=name\n )\n if version:\n url = \"{url}/src/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def url(self, path):\n return '%s://%s/v2/%s' % (self.scheme, self.host, path)", "def _getRemoteUrlTheOldWay(self):\n utool = getUtility(IURLTool)\n if self.remote_url:\n return utool() + '/' + self.remote_url\n else:\n return utool()", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"url\")", "def get_url(self):\n return self.url.format(\n base_url=self.base_url,\n description=urllib.quote_plus(self.description),\n location=urllib.quote_plus(self.location),\n )", "def get_github_student_url(netid):\n url = 'https://raw.githubusercontent.com/CT-CS5356-Fall2017/cs5356/master/README.md'\n r = requests.get(url)\n assert r.ok\n text = r.text\n for l in text.split('\\n'):\n if netid in l:\n return extract_netid_and_url(l)\n return None, None, None", "def scm_url(self):\n return self._data.get('scm_url')", "def make_req_url(user, repo, endpoint, limit=50, queries=None):\n url = \"%s%s/%s/%s\" % (API_BASE_URL, user, repo, endpoint)\n\n # Set limit is given and is above 50, set limit to 50\n if limit and limit > 50:\n limit = 50\n url += \"?limit=%d\" % limit\n\n # Add additional query parameters\n if queries:\n for key in queries:\n url += \"&%s=%s\" % (key, queries[key])\n return url", "def test_github_without_url(self):\n url = reverse_lazy('authenticate:github')\n response = self.client.get(url)\n\n data = response.data\n details = data['details']\n status_code = data['status_code']\n\n self.assertEqual(2, len(data))\n self.assertEqual(details, 'No callback URL specified')\n self.assertEqual(status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def url(self):\n url = self.url\n return url", "def _get_url(self, *args):\n if self._baseUrl not in args:\n args.insert(0, self._baseUrl)\n args = filter(lambda item: item is not None, args)\n return \"/\".join(args)" ]
[ "0.79492223", "0.72487265", "0.7181632", "0.7154561", "0.7106888", "0.6985907", "0.6885024", "0.68606704", "0.682998", "0.6809253", "0.6746705", "0.67292404", "0.6727469", "0.67142564", "0.6706738", "0.66876775", "0.66692704", "0.66497993", "0.6614878", "0.6613082", "0.6584799", "0.65657115", "0.6516051", "0.65140766", "0.64963007", "0.6494963", "0.6471926", "0.641741", "0.6413179", "0.6396774", "0.63789684", "0.6374613", "0.6347608", "0.6343196", "0.6337058", "0.6316324", "0.62954384", "0.6265318", "0.6265318", "0.6265318", "0.6265318", "0.6265318", "0.6265318", "0.6265318", "0.6262628", "0.6243532", "0.62103575", "0.619068", "0.6183068", "0.61819524", "0.6174379", "0.61418104", "0.6126059", "0.6124447", "0.6124447", "0.6121767", "0.6119041", "0.6113046", "0.6103566", "0.61031723", "0.6096287", "0.6087147", "0.6084202", "0.6078369", "0.6074643", "0.60540956", "0.6052577", "0.60389644", "0.60091454", "0.5998718", "0.59899545", "0.59897935", "0.59824187", "0.59824187", "0.59788734", "0.59757066", "0.5971854", "0.59665316", "0.59639513", "0.59566677", "0.59346664", "0.59325737", "0.59292257", "0.5922126", "0.5916872", "0.5914707", "0.591216", "0.5910961", "0.59027183", "0.59012115", "0.58953434", "0.58953434", "0.58953434", "0.5887078", "0.58788675", "0.58769345", "0.5863253", "0.5849901", "0.5847508", "0.58440256" ]
0.83484757
0
Get all tags as json from Github API.
def get_tags(self): return self.get_url_data(self.api_url + 'refs/tags')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def get_tags():\n\treturn jsonify(tags=[i.serialise for i in Tag.query.all()])", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def api_get_tags(request):\n\n # TODO Get favorite tags for the given user ID\n\n tags = Tag.objects.get_not_empty_tags()\n tag_names = []\n for tag in tags:\n tag_names.append(tag.name)\n\n return HttpResponse(content=json.dumps(tag_names))", "def get_tags():\n\n error_on_unauthorized()\n\n tags = Tag.query.order_by(Tag.id)\n total_num = tags.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n \n return jsonify(total=total_num, tags=[t.to_dict() for t in tags.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]", "def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result", "def json_taglist():\n tags = Tag.query.all()\n out = {'tags': []}\n for tag in tags:\n out['tags'].append(tag.value)\n\n return jsonify(out)", "def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)", "def get(self, request):\n serializer = self.serializer_class(self.queryset.all(), many=True)\n return Response({'tags':serializer.data}, status=status.HTTP_200_OK)", "def tags_JSON(request):\n tags_as_json = serializers.serialize('json', Tag.objects.all())\n return HttpResponse(json.dumps(tags_as_json), content_type='json')", "def get_tags(self, obj):\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data", "def get_tags(request):\n try:\n tags = []\n for tag in Tag.objects.all():\n tags.append({\"title\": tag.title, \"id\": tag.pk})\n\n return format_ajax_response(True, \"Knowledgebase tags retrieved successfully.\", {\"tags\": tags})\n except Exception as ex:\n logger.error(\"Failed to get_tags: %s\" % ex)\n return format_ajax_response(False, \"There was an error retrieving the knowledgebase tags.\")", "def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def view_tags():\n tags = dict([ [k[8:],v] for k,v in os.environ.items() if k.startswith(\"HTTPBIN_\") ])\n\n if not tags:\n return Response(response=\"{}\", status=404, mimetype=\"application/json\")\n\n return jsonify(tags)", "def handle_tags(self, request):\n \"\"\"\n @api {get} /tags List tags\n @apiName GetTags\n @apiGroup Misc\n @apiVersion 1.0.0\n\n @apiDescription List currenty used tags\n\n @apiSuccessExample {json} Example response:\n [\n \"tag1\",\n \"tag2\"\n ]\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n tags = []\n\n for task in self.cluster.config.get('tasks').values():\n if 'tags' in task:\n tags += task['tags']\n\n tags = list(set(tags))\n\n return HTTPReply(code = 200, body = json.dumps(tags), headers = headers)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json", "def show_tags(config, args):\n for item in lib.input_json_lines():\n yield config.repo.tag(item)", "def get_all(self, endpoint, params=None):\n merged_json = []\n\n # Continue fetching pages until we reach an empty one. GitHub doesn't return a count of the total number of\n # pages, so there's no alternative.\n page = 1\n get_next_page = True\n while get_next_page:\n json = self.get(endpoint, page, params)\n merged_json += json\n if not len(json) > 0:\n get_next_page = False\n page += 1\n\n return merged_json", "def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth=requests.auth.HTTPBasicAuth(self.credentials['username'], self.credentials['token']),\n params=params)\n return res.json()", "async def getTags(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getTags()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getTags\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getTags\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/tags\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def process_repo(self, url, multiple=False):\n json_data = loads(self.get_from_net(url)) #TODO add code to detect error messages in JSON from API\n if not multiple: json_data = [json_data]\n repo_dets = []\n for i in json_data:\n dets = {\n 'full_name': i['full_name'],\n 'name': i['name'],\n 'fork': i['fork'],\n 'url': i['url'],\n 'language': '',\n 'created': '',\n 'id': i[\"id\"] #for use in pagination\n }\n if 'language' in i: dets['language'] = i['language']\n if 'created_at' in i: dets['created'] = i['created_at']\n repo_dets.append(dets)\n return repo_dets", "def get_objects(self) -> Response:\n tags = [tag for tag in request.args.get(\"tags\", \"\").split(\",\") if tag]\n # filter types\n types = [type_ for type_ in request.args.get(\"types\", \"\").split(\",\") if type_]\n\n try:\n tagged_objects = TagDAO.get_tagged_objects_for_tags(tags, types)\n result = [\n self.object_entity_response_schema.dump(tagged_object)\n for tagged_object in tagged_objects\n ]\n return self.response(200, result=result)\n except TagInvalidError as ex:\n return self.response_422(message=ex.normalized_messages())\n except TagCreateFailedError as ex:\n logger.error(\n \"Error creating model %s: %s\",\n self.__class__.__name__,\n str(ex),\n exc_info=True,\n )\n return self.response_422(message=str(ex))", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def find_all(self, params={}, **options):\n return self.client.get_collection(\"/tags\", params, **options)", "def do_list_tags(cs, args):\n resp, tags = cs.repositories.list_tags(args.repository)\n tags = [{\"Tag\": t} for t in tags]\n utils.print_list(tags, [\"Tag\"], sortby=\"Tag\")", "def _grab_tags(self, url):\n a = self._api_request(url)\n return bs4.BeautifulSoup(a,features=\"html.parser\")", "def github_parsing(query):\n logging.info(\"GET request github parsing is working\")\n host = 'github'\n GLOBAL_VARIABLE.set_host_name(host)\n json_all = query.json()\n json_items = json_all['items']\n clear_list_name = []\n clear_list_created_at = []\n clear_list_commits_url = []\n\n for items in json_items:\n clear_list_name += {items['name']}\n clear_list_created_at += {items['updated_at']}\n clear_list_commits_url += {items['commits_url']}\n\n return clear_list_name, clear_list_created_at, clear_list_commits_url", "def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList", "def info(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/{0}?access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n print(request.headers)\n return request.json()", "def tag_cmd(context, json, name):\n store: Store = context.obj[\"store\"]\n LOG.info(\"Fetch tags\")\n tag_objs = store.get_tags()\n template = schema.TagSchema()\n result = []\n for tag_obj in tag_objs:\n if name and (tag_obj.name not in name):\n continue\n LOG.debug(\"Use tag %s\", tag_obj.name)\n result.append(template.dump(tag_obj))\n if not result:\n LOG.info(\"Could not find any of the specified tags [%s]\", \", \".join(name))\n return\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_tags_table(result))", "def get_tags(request):\n as_list = request.params.get('as_list')\n if as_list:\n return [\n tag.name\n for tag in Tag.query.all()\n ]\n else:\n return [\n {\n 'name': tag.name,\n 'id': tag.id\n }\n for tag in Tag.query.all()\n ]", "def listTags(self, authenticationToken):\r\n pass", "def get_all_tags(self,\r\n access_token,\r\n group_id=None):\r\n\r\n # Prepare query URL\r\n _url_path = '/tags'\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'access_token': access_token,\r\n 'group_id': group_id\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 0:\r\n raise APIException('Unexpected error.', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, GetAllTagsResponse.from_dictionary)", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def get_pulls(self):\n url = self.base_url + 'pulls'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "def get_tags(directory=None):\n out = check_output('git tag -l', shell=True, cwd=directory)\n return [l.strip() for l in out.splitlines()]", "def api_json(self):\n if not self._api_json:\n resp = requests.get(\n GitHubManager.RELEASE_API.format(repo=self.repo)\n )\n if not resp.ok:\n resp.raise_for_status()\n\n self._api_json = resp.json()\n\n return self._api_json", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def get_repos(github_id):\r\n\r\n url = 'https://api.github.com/users/{}/repos'.format(github_id)\r\n response = requests.get(url)\r\n todos = json.loads(response.text)\r\n\r\n repo_list = []\r\n \r\n for data in todos:\r\n repo_list.append(data['name'])\r\n\r\n return repo_list", "def userlog_tags(self):\n url = (yield self.get_sitemap())['userlogs'] + '/tags'\n response = yield self._http_client.fetch(url)\n raise tornado.gen.Return(json.loads(response.body))", "def hashtags(max: int = None):\n for hashtag in client.hashtags(max=max):\n print(json.dumps(hashtag))", "def tags():", "def get_pulls_list(project, github_api=3):\r\n if github_api == 3 :\r\n url = \"https://api.github.com/repos/{project}/pulls\".format(project=project)\r\n else :\r\n url = \"http://github.com/api/v2/json/pulls/{project}\".format(project=project)\r\n response = requests.get(url)\r\n response.raise_for_status()\r\n if github_api == 2 :\r\n return json.loads(response.text)['pulls']\r\n return json.loads(response.text)", "def json_sluglist_by_tag(tag):\n tagobj = Tag.query.filter(Tag.value==tag).first()\n if tagobj is None:\n abort(404)\n posts = posts_base.filter(Post.tags.contains(tagobj))\n out = {'posts': []}\n for post in posts:\n out['posts'].append(post[0].slug)\n\n return jsonify(out)", "def get_tags(self):\n\n base_url = self.get_parent().url\n tags = self.tags.all()\n\n for tag in tags:\n tag.url = f\"{base_url}tags/{tag.slug}/\"\n\n return tags", "async def refs(self, user, repo):\n ref_types = (\"branches\", \"tags\")\n ref_data = [None, None]\n\n for i, ref_type in enumerate(ref_types):\n with self.catch_client_error():\n response = await getattr(self.github_client, \"get_%s\" % ref_type)(\n user, repo\n )\n ref_data[i] = json.loads(response_text(response))\n\n return ref_data", "def get_tags(filter, api_site_parameter, page = 1, pagesize = 10, sort = 'popular'):\n path = \"tags\"\n \n query_filter = ')(Yb(vlSfU'\n \n results = __fetch_results(path, api_site_parameter, inname= filter, page = page, pagesize = pagesize, filter = query_filter, sort = sort)\n return results", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def tags_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(repository_id, \"tags\", access_token)", "def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags", "def get_status_json(pr_id, tags):\n # Check status of PR\n cmds = [github_cli, 'pr', 'view', str(pr_id), '--json', tags]\n with subprocess.Popen(cmds, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n print(err)\n\n data = json.loads(result)\n\n if ',' in tags:\n return data\n else:\n return data[tags]", "def get_all(self):\n\n remote = json.loads(self.get_remote(), object_pairs_hook=OrderedDict)\n local = json.loads(self.get_local(), object_pairs_hook=OrderedDict)\n\n remote.update(local)\n\n # set the tag\n response = {\"inspection\": remote}\n\n return utility.serialize_data(response)", "async def get_trending_tags(start_tag: str = '', limit: int = 250):\n assert start_tag == '', 'tags pagination not supported'\n assert limit == 250, 'only returns exactly 250 tags'\n sql = \"\"\"\n SELECT category,\n COUNT(*) AS total_posts,\n SUM(CASE WHEN depth = 0 THEN 1 ELSE 0 END) AS top_posts,\n SUM(payout) AS total_payouts\n FROM hive_posts_cache\n WHERE is_paidout = '0'\n GROUP BY category\n ORDER BY SUM(payout) DESC\n LIMIT 250\n \"\"\"\n out = []\n for row in query_all(sql):\n out.append({\n 'comments': row['total_posts'] - row['top_posts'],\n 'name': row['category'],\n 'top_posts': row['top_posts'],\n 'total_payouts': \"%.3f SBD\" % row['total_payouts']})\n\n return out", "def test_get_tags_successful(self):\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def test_retrieve_tags(self):\n\n Tag.objects.create(user=self.user, name='Vegan')\n Tag.objects.create(user=self.user, name='Dessert')\n\n res = self.client.get(TAGS_URL)\n\n tags = Tag.objects.all().order_by('-name')\n serializer = TagSerializer(tags, many=True)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)", "def make_branches(self, api_json=None):\n if api_json is None:\n return []\n\n obj = simplejson.loads(api_json)\n branches = [item[\"commit\"][\"sha\"] for item in obj]\n\n print branches\n\n return branches", "def tags_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"tags\", access_token)", "def getTags(owner_id=None, photo_id=None, access_key=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'access_key': access_key\n }\n result = call('photos.getTags', **params)\n return parse_response(result)", "def getTagsUsingId(self,resourceId):\n response = requests.get('https://api.imagga.com/v1/tagging?content=%s' % resourceId,\n auth=(self.apikey, self.secret))\n #print ('printing response')\n #print (response.json())", "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "def getJson(repo):\n\n url='https://api.github.com/repos/' + repo + '/stats/punch_card'\n r = requests.get(url, headers={'Authorization': 'token %s' % getToken()})\n if r.status_code == 403:\n raise SystemExit('Rate limited!')\n return r.json()", "def test_tag_list(self):\n self.seed_static_data()\n params = {\n 'event_id': 1,\n 'language': 'en'\n }\n\n response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data[0]['id'], 1)\n self.assertEqual(data[0]['event_id'], 1)\n self.assertEqual(data[0]['tag_type'], 'RESPONSE')\n self.assertEqual(data[0]['name'], 'English Tag 1 Event 1')\n self.assertEqual(data[0]['description'], 'English Tag 1 Event 1 Description')\n self.assertEqual(data[1]['id'], 2)\n self.assertEqual(data[1]['event_id'], 1)\n self.assertEqual(data[1]['tag_type'], 'RESPONSE')\n self.assertEqual(data[1]['name'], 'English Tag 2 Event 1')\n self.assertEqual(data[1]['description'], 'English Tag 2 Event 1 Description')\n\n params = {\n 'event_id': 1,\n 'language': 'fr'\n }\n\n response = self.app.get('/api/v1/tags', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n\n self.assertEqual(len(data), 2)\n self.assertEqual(data[0]['id'], 1)\n self.assertEqual(data[0]['event_id'], 1)\n self.assertEqual(data[0]['tag_type'], 'RESPONSE')\n self.assertEqual(data[0]['name'], 'French Tag 1 Event 1')\n self.assertEqual(data[0]['description'], 'French Tag 1 Event 1 Description')\n self.assertEqual(data[1]['id'], 2)\n self.assertEqual(data[1]['event_id'], 1)\n self.assertEqual(data[1]['tag_type'], 'RESPONSE')\n self.assertEqual(data[1]['name'], 'French Tag 2 Event 1')\n self.assertEqual(data[1]['description'], 'French Tag 2 Event 1 Description')", "def get_github_chandra_models_version_info():\n with urlopen('https://api.github.com/repos/sot/chandra_models/tags') as url:\n response = url.read()\n tags = json.loads(response.decode('utf-8'))\n\n with urlopen('https://api.github.com/repos/sot/chandra_models/branches') as url:\n response = url.read()\n branches = json.loads(response.decode('utf-8'))\n\n all_versions_info = {t[\"name\"]: t for t in tags}\n all_versions_info.update({b[\"name\"]: b for b in branches})\n return all_versions_info", "def get_all_links(self):\n links_url = \"{}/links\".format(self._project_url)\n print(links_url)\n response = requests.get(links_url).json()\n return json.dumps(response, indent=4, sort_keys=True)", "def instagramrequest(tag_name, max_tag_id=0):\n request_string = '?client_id=b865ec47b91346f3a2cbcfe04a6a80d9'\n if max_tag_id:\n request_string += '&max_tag_id='+str(max_tag_id)\n response = urlopen('https://api.instagram.com/v1/tags/'+tag_name+'/media/recent'+request_string)\n content = response.readall()\n return json.loads(content.decode(encoding='utf-8', errors='ignore'))", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def build_show_tags(ctx, args):\n for build_id in args:\n data = ctx.obj.get_build_tags_by_build_id(build_id)\n output_json_data(data)", "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def get(self, request: Request, podcast_id: str) -> Response:\n tags_set = set()\n tracks = Track.objects.filter(podcast_id=podcast_id)\n for track in tracks:\n tags = Tag.objects.filter(track_id=track.id)\n for tag in tags:\n tags_set.add({\"name\": tag.name})\n return Response(tags_set, status=status.HTTP_200_OK)", "def get_entities(tags):\n pass", "def get_commits(link):\n commits_response = requests.get(link)\n commits_result = commits_response.json()\n return commits_result", "def getTags(number=None):", "def list_bags(self, bags):\n prefix, suffix = self._get_jsonp()\n return prefix + JSON.list_bags(self, bags) + suffix", "def search(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/search?q={0}&access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n return request.json()", "def tag_list(request):\r\n rdict = request.matchdict\r\n username = rdict.get(\"username\", None)\r\n if username:\r\n username = username.lower()\r\n\r\n tags_found = TagMgr.find(username=username)\r\n\r\n return {\r\n 'tag_list': tags_found,\r\n 'tag_count': len(tags_found),\r\n 'username': username,\r\n }", "def tags(self):\n return self.get(\"tags\")", "def get_images(name):\n url = \"/\".join([REGISTRY_BASE, name, \"/tags/list\"])\n response = req(url)\n image_list = []\n if response is not None:\n headers = {\"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\"}\n tags = response[\"tags\"]\n for tag in tags:\n url = \"/\".join([REGISTRY_BASE, name, \"/manifests\", tag])\n response = req(url, headers)\n if response is not None:\n image = {}\n image[\"size\"] = response[\"config\"][\"size\"]\n for i in response[\"layers\"]:\n image[\"size\"] += i[\"size\"]\n image[\"size\"] = round(float(image[\"size\"]) / 1024 / 1024, 2)\n image[\"id\"] = response[\"config\"][\"digest\"][7:19]\n image[\"tag\"] = tag\n image[\"cmd\"] = \"docker pull uk8s.com/\" + name + \":\" + tag\n image_list.append(image)\n return sorted(image_list, reverse=True)", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def get_tags(self):\n return self.tags", "def tags(self) -> list[str]:\n _args: list[Arg] = []\n _ctx = self._select(\"tags\", _args)\n return _ctx.execute_sync(list[str])", "def list_tags():\n\n tags = Tag.query.all()\n return render_template('tags/list_tags.html', tags=tags)", "def get_tags_with_http_info(self, **kwargs):\n\n all_params = []\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_tags\" % key\n )\n params[key] = val\n del params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n\n # Authentication setting\n auth_settings = ['X-Token']\n\n return self.api_client.call_api('/tags', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Tag]',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def __list_all_tags(self):\n\n tags_dict = get_data.get_tagnames_dict()\n if len(tags_dict) > 0:\n first_str = 'tag'\n second_str = 'top posts scraped'\n third_str = 'recent posts scraped'\n descriptor = '{:<40} {:<20} {}'\n print('')\n print(descriptor.format(first_str, second_str, third_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-',\n len(third_str) * '-'))\n for number, tag in tags_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + tag\n second = str(get_data.get_top_tag_post_count(tag))\n third = str(get_data.get_recent_tag_post_count(tag))\n print(descriptor.format(first, second, third))\n else:\n print('no tags found in the database')", "def test_get_tag(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 1 Event 1',\n 'fr': 'French Tag 1 Event 1'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 1 Event 1 Description',\n 'fr': 'French Tag 1 Event 1 Description'\n })", "def find_tags(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n tags = []\n for tag, tag_id in [(t, ref_dict[t]) for t in repo.tags]:\n obj, obj_id = repo.repo[tag_id], None\n if isinstance(obj, Tag):\n _, obj_id = obj.object\n if isinstance(obj, Commit):\n obj_id = obj.id\n if commit.id == obj_id:\n tags.append((tag, obj))\n return tags", "def list_tags():\r\n tags = Tag.query.order_by(Tag.name).all()\r\n return render_template('tags.html', tags=tags)", "def get(self, namespace, repository):\n repo_ref = registry_model.lookup_repository(namespace, repository)\n if repo_ref is None:\n raise NotFound()\n\n tags = registry_model.list_all_active_repository_tags(repo_ref)\n images_with_tags = defaultdict(list)\n for tag in tags:\n legacy_image_id = tag.manifest.legacy_image_root_id\n if legacy_image_id is not None:\n images_with_tags[legacy_image_id].append(tag)\n\n # NOTE: This is replicating our older response for this endpoint, but\n # returns empty for the metadata fields. This is to ensure back-compat\n # for callers still using the deprecated API, while not having to load\n # all the manifests from storage.\n return {\n \"images\": [\n {\n \"id\": image_id,\n \"created\": format_date(\n datetime.utcfromtimestamp((min([tag.lifetime_start_ts for tag in tags])))\n ),\n \"comment\": \"\",\n \"command\": \"\",\n \"size\": 0,\n \"uploading\": False,\n \"sort_index\": 0,\n \"tags\": [tag.name for tag in tags],\n \"ancestors\": \"\",\n }\n for image_id, tags in images_with_tags.items()\n ]\n }" ]
[ "0.7320748", "0.71714383", "0.7113308", "0.6959439", "0.69185", "0.6898857", "0.6868199", "0.6557756", "0.63590163", "0.63203824", "0.630982", "0.63082796", "0.62603027", "0.61560315", "0.61486876", "0.6137396", "0.60915667", "0.60803205", "0.60155445", "0.5989724", "0.5959265", "0.5959265", "0.5950431", "0.5948194", "0.5880411", "0.5861264", "0.5856733", "0.58325946", "0.5797074", "0.5781113", "0.5779219", "0.5778807", "0.5770775", "0.57556933", "0.571521", "0.5714199", "0.5699264", "0.5691095", "0.5668461", "0.56550676", "0.5638991", "0.56313753", "0.5603293", "0.5597528", "0.55895245", "0.55871236", "0.5575099", "0.55726194", "0.5568725", "0.55538726", "0.554641", "0.55430454", "0.55427676", "0.55387276", "0.55377233", "0.5530614", "0.5516348", "0.5516121", "0.5489149", "0.54667735", "0.54567224", "0.54446995", "0.5440644", "0.5429937", "0.5420714", "0.54171014", "0.5409681", "0.53885436", "0.5387615", "0.5386356", "0.538566", "0.53812134", "0.53764236", "0.536448", "0.5363979", "0.53515404", "0.53515404", "0.5348022", "0.5345197", "0.53106916", "0.5310033", "0.530264", "0.52983755", "0.52925545", "0.5284426", "0.5280086", "0.527734", "0.5275999", "0.5271527", "0.52697176", "0.5268647", "0.52685213", "0.52678835", "0.52547944", "0.52527636", "0.52490926", "0.5242425", "0.5235385", "0.52274215", "0.5214602" ]
0.6558411
7
Get a specific tag's data from Github API.
def get_tag(self, sha): return self.get_url_data(self.api_url + 'tags/' + sha)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tag(self, tag):\n resp = self.get(_u.build_uri(\"tag\", domain=self.domain),\n data={'tag': tag})\n return utils.handle_response(resp)", "def find_by_id(self, tag, params={}, **options):\n path = \"/tags/%s\" % (tag)\n return self.client.get(path, params, **options)", "def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth=requests.auth.HTTPBasicAuth(self.credentials['username'], self.credentials['token']),\n params=params)\n return res.json()", "def info(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/{0}?access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n print(request.headers)\n return request.json()", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def read_tag(\n *,\n db: Session = Depends(get_db),\n id: int,\n current_user: DBUser = Depends(get_current_active_user),\n):\n tag = crud.tag.get(db_session=db, id=id)\n if not tag:\n raise HTTPException(status_code=404, detail=\"Tag not found\")\n if not crud.user.is_superuser(current_user) and (tag.owner_id != current_user.id):\n raise HTTPException(status_code=400, detail=\"Not enough permissions\")\n return tag", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def test_get_tag(self):\n self.seed_static_data()\n params = {'id': 1, 'event_id': 1}\n response = self.app.get('/api/v1/tag', headers=self.user1_headers, data=params)\n data = json.loads(response.data)\n self.assertEqual(data['id'], 1)\n self.assertEqual(data['event_id'], 1)\n self.assertEqual(data['tag_type'], 'RESPONSE')\n self.assertDictEqual(data['name'], {\n 'en': 'English Tag 1 Event 1',\n 'fr': 'French Tag 1 Event 1'\n })\n self.assertDictEqual(data['description'], {\n 'en': 'English Tag 1 Event 1 Description',\n 'fr': 'French Tag 1 Event 1 Description'\n })", "def get(self, uuid):\n\n\t\treturn self._get(\"/tag/%s\" % base.getid(uuid), \"tag\")", "async def get_tag_command(self, ctx):\n await self.get_tag(ctx)", "def get(self, endpoint, page=1, params=None):\n url = 'https://api.github.com/%(endpoint)s?access_token=%(token)s&page=%(page)d' % {\n 'endpoint': endpoint,\n 'token': self.access_token,\n 'page': page,\n }\n if params is not None:\n url += '&' + urlencode(params)\n response = requests.get(url)\n\n # Produce specific error on 404. Generic HTTPError otherwise.\n if response.status_code == 404:\n raise ResourceNotFound\n response.raise_for_status()\n\n return response.json()", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def get_pull_request(project, num, github_api=3):\r\n if github_api==2 :\r\n url = \"http://github.com/api/v2/json/pulls/{project}/{num}\".format(project=project, num=num)\r\n elif github_api == 3:\r\n url = \"https://api.github.com/repos/{project}/pulls/{num}\".format(project=project, num=num)\r\n response = requests.get(url)\r\n response.raise_for_status()\r\n if github_api == 2 :\r\n return json.loads(response.text)['pull']\r\n return json.loads(response.text)", "def get_from_git(project, obj, params={}, verbose=0):\n\n url = \"%s%s/raw/%s\" % (GIT_URL, project, obj)\n return load_yaml(requester(url, params=params,\n headers={'Accept': 'application/json'},\n verbose=verbose).text)", "def get(self, hash_tag):\n request_args = get_current_request_args()\n\n scope = request_args.get('scope') or DEFAULT_HASH_TAG_FETCH_SCOPE\n if scope not in HASH_TAG_RETRIEVAL_SCOPES:\n raise BadRequest(\n '`scope` must be one of {}'.format(HASH_TAG_RETRIEVAL_SCOPES))\n\n hash_tag = HashTag.get_not_deleted(hash_tag=hash_tag)\n if hash_tag is None:\n raise ResourceNotFound('Hash tag not found')\n\n hash_tag_details = {\n 'meta': lambda x: {\n 'data': None,\n 'meta': None\n },\n 'posts': lambda y: {\n 'data': None,\n 'meta': None\n },\n 'followers': lambda z: {\n 'data': None,\n 'meta': None\n }\n }\n\n scoped_details = hash_tag_details[scope]()\n\n return api_success_response(**scoped_details)", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def getTag(self, authenticationToken, guid):\r\n pass", "def get_tag_by_id(self,\r\n access_token,\r\n tag_id):\r\n\r\n # Prepare query URL\r\n _url_path = '/tags/{tag_id}'\r\n _url_path = APIHelper.append_url_with_template_parameters(_url_path, { \r\n 'tag_id': tag_id\r\n })\r\n _query_builder = Configuration.base_uri\r\n _query_builder += _url_path\r\n _query_parameters = {\r\n 'access_token': access_token\r\n }\r\n _query_builder = APIHelper.append_url_with_query_parameters(_query_builder,\r\n _query_parameters, Configuration.array_serialization)\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n CustomQueryAuth.apply(_request)\r\n _context = self.execute_request(_request)\r\n\r\n # Endpoint and global error handling using HTTP status codes.\r\n if _context.response.status_code == 0:\r\n raise APIException('Unexpected error.', _context)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, Tag.from_dictionary)", "def github_api(request):\n if not request.startswith('https://api.github.com/'):\n request = 'https://api.github.com/' + request\n d = time.time() - github_api.last_time\n if d < 1:\n time.sleep(1 - d) # wait at least one second between GitHub API calls\n key = os.getenv('GITHUB_API_KEY')\n req = Request(request)\n req.add_header('User-Agent', github_api.user_agent)\n if key is not None:\n req.add_header('Authorization', 'token %s' % key)\n content = ''\n try:\n response = urlopen(req)\n content = response.read().decode()\n except HTTPError as e:\n print(request)\n print(e.reason)\n print(e.info())\n raise(e)\n github_api.last_time = time.time()\n return json.loads(content)", "def search(self, tag):\n\n url = \"https://api.instagram.com/v1/tags/search?q={0}&access_token={1}\".format(tag, self.access_token)\n request = requests.get(url)\n return request.json()", "def api_scrape_url():\n if 'working_repo' in session:\n meta_data = get_tags(request.args['url'])\n return jsonify(msg=\"success\", data=meta_data)\n else:\n return jsonify(msg=\"failure, unauthorized\"), 401", "def get_repo_data(repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{repo}'\n return get_whole_response_as_json(url, session)", "def _get_repo(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}\"\n status, data, _ = self.get(url)\n if (status == 200):\n return data\n else:\n log.warning(\"GHUB\", f\"Unexpected status code {status} for request {url}.\")", "def get_tag(self, tag, filename):\n return self.get_tag_batch(tag, [filename])[0]", "def fetch(self, tag):\n return fetch_image(self.collection.client, tag)", "def instagramrequest(tag_name, max_tag_id=0):\n request_string = '?client_id=b865ec47b91346f3a2cbcfe04a6a80d9'\n if max_tag_id:\n request_string += '&max_tag_id='+str(max_tag_id)\n response = urlopen('https://api.instagram.com/v1/tags/'+tag_name+'/media/recent'+request_string)\n content = response.readall()\n return json.loads(content.decode(encoding='utf-8', errors='ignore'))", "def pull(self, repo, tag):\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)", "def by_tag(articles_by_tag, tag):\n for a in articles_by_tag:\n if a[0].slug == tag:\n return a[1]", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag", "def tag_cmd(context, json, name):\n store: Store = context.obj[\"store\"]\n LOG.info(\"Fetch tags\")\n tag_objs = store.get_tags()\n template = schema.TagSchema()\n result = []\n for tag_obj in tag_objs:\n if name and (tag_obj.name not in name):\n continue\n LOG.debug(\"Use tag %s\", tag_obj.name)\n result.append(template.dump(tag_obj))\n if not result:\n LOG.info(\"Could not find any of the specified tags [%s]\", \", \".join(name))\n return\n if json:\n click.echo(jsonlib.dumps(result))\n return\n console = Console()\n console.print(get_tags_table(result))", "def extract_recent_tag(self, tag):\n\n url_string = \"https://www.instagram.com/explore/tags/%s/\" % tag\n response = bs4.BeautifulSoup(requests.get(url_string).text, \"html.parser\")\n potential_query_ids = self.get_query_id(response)\n shared_data = self.extract_shared_data(response)\n\n media = shared_data['entry_data']['TagPage'][0]['tag']['media']\n posts = []\n for node in media['nodes']:\n post = self.extract_recent_instagram_post(node)\n posts.append(post)\n self.save_results(posts)\n\n end_cursor = media['page_info']['end_cursor']\n\n # figure out valid queryId\n for potential_id in potential_query_ids:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n potential_id, tag, end_cursor)\n try:\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n query_id = potential_id\n success = True\n break\n except JSONDecodeError as de:\n # no valid JSON retured, most likely wrong query_id resulting in 'Oops, an error occurred.'\n pass\n if not success:\n log.error(\"Error extracting Query Id, exiting\")\n sys.exit(1)\n\n while end_cursor is not None:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n query_id, tag, end_cursor)\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n end_cursor = data['data']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']\n posts = self.extract_instagram_posts(data['data']['hashtag']['edge_hashtag_to_media']['edges'])\n self.save_results(posts)", "def getTagsUsingId(self,resourceId):\n response = requests.get('https://api.imagga.com/v1/tagging?content=%s' % resourceId,\n auth=(self.apikey, self.secret))\n #print ('printing response')\n #print (response.json())", "def pull_image(self, tag):\n image_name = self.build_image_name(tag)\n image = self.client.images.pull(image_name)\n return image", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def getTag(self, authenticationToken, guid):\r\n self.send_getTag(authenticationToken, guid)\r\n return self.recv_getTag()", "def getTagData(tagname,data):\n tags = rhevGet(\"/api/tags\")\n doc = libxml2.parseDoc(tags)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/tags/tag[name[position()=1]= '\" + tagname + \"']\")\n return res[0].prop(data)", "def find_tag(tag : str):\n\tprint(f\"finding tag {tag} . . .\")\n\n\tkeys = db.keys() # lists the database keys\n\n\tif \"tags\" not in keys: # to make sure there's a database\n\t\tdb[\"tags\"] = {} # creates the tag database\n\t\tprint(f\"Initiated databse . . .\")\n\t\n\ttags = db[\"tags\"] # sets the database to a variable for easy use\n\t# tags is a dictionary with keys and values\n\t# to access a tag, use tags[tag]\n\n\treturn_value = None\n\n\tif tag in tags:\n\t\treturn_value = {\n\t\t\t\"key\": tag, # gets the tag name\n\t\t\t\"value\": tags[tag], # gets the tag value frome db\n\t\t\t\"status\": 200\n\t\t}\n\t\tprint(f\"Tag {tag} found with value {tags[tag]}.\")\n\t\n\telif tag not in tags:\n\t\treturn_value = {\n\t\t\t\"key\": tag, # gets the supposed tag name\n\t\t\t\"value\": f\"Tag `{tag}` doesn't exist.\", # returns none\n\t\t\t\"status\": 404\n\t\t}\n\t\tprint(f\"Tag {tag} not found.\")\n\t\tif tag == None:\n\t\t\treturn_value[\"value\"] = None\n\t\n\telse:\n\t\treturn_value = {\n\t\t\t\"key\": None,\n\t\t\t\"value\": None,\n\t\t\t\"status\": 500\n\t\t}\n\t\tprint(f\"An error occured finding {tag}.\")\n\t\n\treturn return_value", "def image_by_tag(self, tag):\n if not tag:\n return None\n\n return next((image for image in self.images() if tag\n in image['RepoTags']), None)", "def search_tag(self, tag):\n self.driver.get(self.tag_url.format(tag))", "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "def _get_tag(self, current_path, commit_sha):\n command = [\"git\", \"describe\", \"--tags\", commit_sha]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n elif \"fatal: no tags can describe '{}'.\".format(commit_sha) in error.decode(\n \"utf-8\"\n ).lower():\n return None\n elif \"fatal: no names found\" in error.decode(\"utf-8\").lower():\n return None\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )", "def get_tag(self, tag_name):\n tag_data = self.db.make_query(\n '''\n select tag_name from tag where tag_name = \"{}\"\n '''.format(tag_name)\n )\n\n if len(tag_data) > 0:\n tag_name = tag_data[0][0]\n human_readable_tag = name_util.make_decoded(tag_data[0][0])\n\n rtn_dict = {\n 'tag_name': tag_name,\n 'human_readable_name': human_readable_tag\n }\n\n return rtn_dict", "def get_repository(post):\n pattern = re.compile(constants.REPOSITORY_REGEX)\n if \"links\" in post.json_metadata.keys():\n for link in post.json_metadata[\"links\"]:\n if link.startswith(\"/exit?url=\"):\n link = link[len(\"/exit?url=\"):]\n\n try:\n result = pattern.search(link).group(0)\n return result\n except AttributeError:\n continue\n else:\n for line in post.body.split():\n try:\n result = pattern.search(line).group(0)\n return result\n except AttributeError:\n continue\n\n return \"\"", "def get_pr_info(num):\r\n url = \"https://api.github.com/repos/edx/edx-platform/pulls/{num}\".format(num=num)\r\n username, token = get_github_creds()\r\n headers = {\r\n \"Authorization\": \"token {}\".format(token),\r\n \"User-Agent\": \"edx-release\",\r\n }\r\n response = requests.get(url, headers=headers)\r\n result = response.json()\r\n if not response.ok:\r\n raise requests.exceptions.RequestException(result[\"message\"])\r\n return result", "def get_tag(tag):\r\n from tagging.models import Tag\r\n if isinstance(tag, Tag):\r\n return tag\r\n\r\n try:\r\n if isinstance(tag, types.StringTypes):\r\n return Tag.objects.get(name=tag)\r\n elif isinstance(tag, (types.IntType, types.LongType)):\r\n return Tag.objects.get(id=tag)\r\n except Tag.DoesNotExist:\r\n pass\r\n\r\n return None", "def pull(bento_tag: str, force: bool) -> None: # type: ignore (not accessed)\n yatai_client.pull_bento(bento_tag, force=force)", "def get_fetcher(tag):\n global FETCHERS\n if not tag in FETCHERS:\n valid_sources = (\", \").join(FETCHERS.keys())\n raise ValueError(\"invalid source name \"\n + \"(current source: {})\".format(valid_sources))\n\n return FETCHERS[tag]", "def api_get_tags(request):\n\n # TODO Get favorite tags for the given user ID\n\n tags = Tag.objects.get_not_empty_tags()\n tag_names = []\n for tag in tags:\n tag_names.append(tag.name)\n\n return HttpResponse(content=json.dumps(tag_names))", "def get_posts_tag(url_tag):\n return Posts.objects.filter(tags__tag=url_tag)", "def github_request(auth_token, repo, path, **kwargs):\n params = kwargs\n url = '{}/repos/{}/{}'.format(GITHUB_API_URL, repo, path)\n headers = {}\n if auth_token:\n headers['Authorization'] = 'token {}'.format(auth_token)\n\n while url:\n res = requests.get(url, headers=headers, params=params)\n if res.status_code != 200:\n raise Exception('GitHub request {} failed:'.format(url), res.text)\n\n page = res.json()\n if isinstance(page, list):\n for item in page:\n yield item\n try:\n url = res.links['next']['url']\n except KeyError:\n url = None\n params = None\n else:\n yield page\n break", "async def fetch_data(self) -> GitHubReleaseModel | None:\n result = await self._client.repos.releases.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n if not result.data:\n return None\n\n for release in result.data:\n if not release.prerelease:\n return release\n\n # Fall back to the latest release if no non-prerelease release is found\n return result.data[0]", "def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))", "def get_status_json(pr_id, tags):\n # Check status of PR\n cmds = [github_cli, 'pr', 'view', str(pr_id), '--json', tags]\n with subprocess.Popen(cmds, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n print(err)\n\n data = json.loads(result)\n\n if ',' in tags:\n return data\n else:\n return data[tags]", "def get_tag_stats(self, tag):\n resp = self.get(_u.build_uri(\"tag.stats\", domain=self.domain),\n data={'tag': tag})\n return utils.handle_response(resp)", "def GetFromTag(cls, tag):\n parent_key = cls._GetParentKeyFromTag(tag)\n return cls.query(ancestor=parent_key).get()", "def github(code, input):\n syntax = 'Syntax: \\'.github <user|user/repo>\\''\n failed = 'Failed to get data from Githubs API :('\n if len(input.group(2).strip().split()) != 1:\n return code.say(syntax)\n\n spacer = ' {blue}|{c} '\n\n if '/' not in input.group(2):\n # Assume a single username\n try:\n tmp = web.json(user_api % input.group(2).strip())\n response = {}\n # Remove dem ugly nulled values. It's a dictionary so we have to\n # loop differently.\n for key, value in tmp.iteritems():\n if value != '' or len(value) != 0 or value != 'null':\n response[key] = value\n print response\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n\n # Here is where we build the response\n output = []\n if 'name' in response:\n output.append('%s (%s)' % (response['name'], response['login']))\n else:\n output.append(response['login'])\n if 'location' in response:\n output.append(response['location'])\n if 'email' in response:\n output.append(response['email'])\n if 'public_repos' in response:\n output.append('%s Repos' % response['public_repos'])\n if 'followers' in response:\n output.append('%s Followers' % response['followers'])\n if 'following' in response:\n output.append('Following %s' % response['following'])\n if 'public_gists' in response:\n output.append('%s Gists' % response['public_gists'])\n if 'html_url' in response:\n output.append(response['html_url'])\n\n return code.say(spacer.join(output))\n\n else:\n # Assume Username/Repo\n try:\n response = jweb.json(repo_api % input.group(2).strip())\n except:\n return code.say(failed)\n if 'message' in response:\n # Assume failed\n return code.say(failed)\n # Here is where we build the response\n output = []\n output.append('%s (%s)' %\n (response['name'], response['owner']['login']))\n output.append(response['description'])\n output.append('%s %s' % (response['stargazers_count'], u'\\u2605'))\n output.append('%s %s' % (response['watchers_count'], u'\\u231A'))\n output.append('%s %s' % (response['forks_count'], u'\\u2442'))\n output.append('%s %s' % (response['open_issues_count'], u'\\u2602'))\n output.append('%s %s' % (response['network_count'], u'\\U0001F46C'))\n output.append('%s %s' % (response['subscribers_count'], u'\\u2764'))\n output.append(response['html_url'])\n return code.say(spacer.join(output))", "def get_tagname(tags, tagid):\n for tag in tags:\n if tag['id'] == tagid:\n return tag['name']", "def get(self, tag, index):\n raise NotImplementedError", "def get_tag(self, scope, key):\r\n print 'GETTING', scope, key, self._tags\r\n return self._tags[scope].get(key)", "def execute_request(path):\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()", "def get_one(self, file_name):\n github_url = ''.join((CONF.api.github_raw_base_url.rstrip('/'),\n '/', file_name, \".json\"))\n try:\n response = requests.get(github_url)\n LOG.debug(\"Response Status: %s / Used Requests Cache: %s\" %\n (response.status_code,\n getattr(response, 'from_cache', False)))\n if response.status_code == 200:\n return response.json()\n else:\n LOG.warning('Github returned non-success HTTP '\n 'code: %s' % response.status_code)\n pecan.abort(response.status_code)\n except requests.exceptions.RequestException as e:\n LOG.warning('An error occurred trying to get GitHub '\n 'capability file contents: %s' % e)\n pecan.abort(500)", "def get_one(self, file_name):\n github_url = ''.join((CONF.api.github_raw_base_url.rstrip('/'),\n '/', file_name, \".json\"))\n try:\n response = requests.get(github_url)\n LOG.debug(\"Response Status: %s / Used Requests Cache: %s\" %\n (response.status_code,\n getattr(response, 'from_cache', False)))\n if response.status_code == 200:\n return response.json()\n else:\n LOG.warning('Github returned non-success HTTP '\n 'code: %s' % response.status_code)\n pecan.abort(response.status_code)\n except requests.exceptions.RequestException as e:\n LOG.warning('An error occurred trying to get GitHub '\n 'capability file contents: %s' % e)\n pecan.abort(500)", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_tag(tag_id, as_dict=False):\n query = db_session.query(Tags).filter_by(id=tag_id)\n logging.debug('Query executed: %s' % query)\n data = query.first()\n if as_dict:\n columns = Tags.__table__.columns.keys()\n data = to_dict(data, columns)\n return data", "def get_gist(gist_id):\n urlbase = \"https://api.github.com/gists/\"\n url = urlbase + str(gist_id)\n r = requests.get(url)\n if r.status_code != 200:\n r.raise_for_status()\n return r.json()", "def tags_by_id(self, repository_id, access_token=None):\n return self._complete_request_by_id(repository_id, \"tags\", access_token)", "def run_releasetool_tag(lang: str, gh: github.GitHub, pull: dict) -> TagContext:\n language_module = importlib.import_module(f\"releasetool.commands.tag.{lang}\")\n ctx = TagContext()\n ctx.interactive = False\n # TODO(busunkim): Use proxy once KMS setup is complete.\n ctx.github = releasetool.github.GitHub(gh.token, use_proxy=False)\n ctx.token = gh.token\n ctx.upstream_repo = pull[\"base\"][\"repo\"][\"full_name\"]\n ctx.release_pr = pull\n return language_module.tag(ctx)", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def getHTMLTag(self, html, tag):\n soup = BeautifulSoup(html, 'html.parser')\n content = soup.find(tag)\n return content", "def getMetaByTags(tags):\n\n if type(tags) is str:\n tag = tags\n else:\n tag = tags[0]\n\n record = getRecordsByTags(tag)\n\n if record is not None:\n geodata_meta = parseMeta(record)\n return geodata_meta\n else:\n error = \"could not get record for tags from gnos\"\n print(error)\n return None", "def getMetaByTags(tags):\n\n if type(tags) is str:\n tag = tags\n else:\n tag = tags[0]\n\n record = getRecordsByTags(tag)\n\n if record is not None:\n geodata_meta = parseMeta(record)\n return geodata_meta\n else:\n error = \"could not get record for tags from gnos\"\n print(error)\n return None", "async def refs(self, user, repo):\n ref_types = (\"branches\", \"tags\")\n ref_data = [None, None]\n\n for i, ref_type in enumerate(ref_types):\n with self.catch_client_error():\n response = await getattr(self.github_client, \"get_%s\" % ref_type)(\n user, repo\n )\n ref_data[i] = json.loads(response_text(response))\n\n return ref_data", "def test_networking_project_network_tag_get(self):\n pass", "def git_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"git_tag\")", "def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]", "def get_pull(self, pull_number):\n url = self.base_url + 'pulls/%s' % pull_number\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "async def info(self, ctx, *, tag):\n try:\n self.fetch_tag(ctx, tag)\n except Exception as error:\n return await ctx.send(error)\n data = self._tag_dict[ctx.guild.id][tag]\n author = self.bot.get_user(data['author']) or await self.bot.fetch_user(data['author'])\n embed = discord.Embed(colour=self.bot.colour)\n embed.title = tag\n embed.description = f\"<:author:734991429843157042> **{author}**\\n\"\n embed.description += f\"Uses: **{data['uses']}**\\n\"\n embed.description += f\"ID: **{data['id']}**\"\n embed.set_author(name=str(author), icon_url=author.avatar_url)\n await ctx.send(embed=embed)", "def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def test_load_github(self):\n\n c = Client()\n response = c.get('/taric_books/github/')\n\n self.assertEqual(response.status_code, 200)", "def tags_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"tags\", access_token)", "async def githubinfo_command(self, ctx, *, githubusername: str):\n async with aiohttp.ClientSession() as session:\n async with session.get(\n f\"https://api.github.com/users/{githubusername}\"\n ) as resp:\n githubinfo = await resp.json()\n name = githubinfo[\"name\"]\n avatar_url = githubinfo[\"avatar_url\"]\n blog = githubinfo[\"blog\"]\n location = githubinfo[\"location\"]\n twitter_username = githubinfo[\"twitter_username\"]\n publicrepos = githubinfo[\"public_repos\"]\n followers = githubinfo[\"followers\"]\n following = githubinfo[\"following\"]\n embed = Embed(\n color=Color.blurple(),\n timestamp=datetime.utcnow(),\n description=(\n f\"**Name** - {name}\\n**Blog URL** - {None if not blog else blog}\\n**Location** - {location}\\n**Twitter Username** - {twitter_username}\\n **Public Repositories** - {publicrepos}\\n**Followers** - {followers}\\n**Following** - {following}\"\n ),\n )\n embed.set_author(name=f\"Github Profile info of username {githubusername}\")\n if avatar_url is not None:\n embed.set_thumbnail(url=avatar_url)\n await ctx.send(embed=embed)", "def search_term(query):\n results = {}\n logging.info(\"GET request search query is working\")\n try:\n results = requests.get(\n GITHUB_API+query+'&per_page=10&sort=updated&order=desc')\n logging.info(\"GET request search query is working by github api\")\n except requests.ConnectionError as exception:\n return f'{exception}'\n return results", "def api_repo_get(access_key):\n repo = Repo.query.get(access_key)\n if not repo:\n return jsonify(error=\"Repo not found\"), 404\n \n if repo.is_private and 'working_repo' not in session:\n return jsonify(error=\"Unauthorized\"), 401\n elif repo.is_private and session['working_repo'] != repo.access_key:\n return jsonify(error=\"Unauthorized\"), 403\n elif repo.is_private and session['working_repo'] == repo.access_key:\n return jsonify(repo.to_json())\n else:\n return jsonify(repo.to_json())", "def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name", "def get_tags(filter, api_site_parameter, page = 1, pagesize = 10, sort = 'popular'):\n path = \"tags\"\n \n query_filter = ')(Yb(vlSfU'\n \n results = __fetch_results(path, api_site_parameter, inname= filter, page = page, pagesize = pagesize, filter = query_filter, sort = sort)\n return results", "def projects_with_tag(request, tag):\n return tag.project_set.filter(user=request.user)", "def get_github_content(repository, filename, branch=\"main\"):\n raw_url = \"https://raw.github.com/{repo}/{branch}/{filename}\".format(\n repo=repository,\n branch=branch,\n filename=filename\n )\n try:\n resp = requests.get(raw_url)\n resp.raise_for_status()\n return resp.content\n except requests.exceptions.RequestException as error:\n logging.error(error)\n sys.exit(1)", "def tag_view(request, tag_id, error='', message=''):\n tag = Tag.objects.get(id=tag_id)\n return index(request=request, error=error, message=message, tag=tag, tag_id=tag_id)", "def __getitem__(self, tag):\n return self.get(tag)", "def tag(request, tag_name):\n raise NotImplementedError", "def mock_github_get(url):\n mock_repo_key = url.split(\"/\")[-1]\n\n result = requests.Response()\n result.status_code = 200\n result.encoding = \"utf-8\"\n result._content = repos[mock_repo_key].encode()\n\n return result", "def _grab_tags(self, url):\n a = self._api_request(url)\n return bs4.BeautifulSoup(a,features=\"html.parser\")", "def latest_github_release(username: str, repo: str) -> GitHubReleaseResponse:\n url = f'https://api.github.com/repos/{username}/{repo}/releases/latest'\n response = requests.get(url)\n if response.status_code != 200:\n print(f'Error: {response.status_code}', file=sys.stderr)\n sys.exit(1)\n try:\n decoded_json = response.json()\n return GitHubReleaseResponse(decoded_json)\n except requests.exceptions.JSONDecodeError:\n print('Error: Unable to parse JSON from GitHub releases', file=sys.stderr)\n sys.exit(1)", "def _recursive_gh_get(href, items, password=None):\n response = GitHub._request('GET', href, token=password)\n response.raise_for_status()\n items.extend(response.json())\n if \"link\" not in response.headers:\n return\n # links = link_header.parse(response.headers[\"link\"])\n # rels = {link.rel: link.href for link in links.links}\n # if \"next\" in rels:\n # ghRelease._recursive_gh_get(rels[\"next\"], items)", "def get_content_by_tag(self, tag: str) -> Any:\n result = self.client.get_instances_id_content_tags_path(id_=self.id_, tags_path=tag)\n\n try:\n return result.decode('utf-8').strip().replace('\\x00', '')\n except AttributeError:\n return result" ]
[ "0.67847025", "0.6460267", "0.63765323", "0.6361611", "0.6254757", "0.6193532", "0.6165435", "0.5924588", "0.5913108", "0.5913108", "0.5889115", "0.58397275", "0.5831963", "0.58196306", "0.581669", "0.57963234", "0.57881856", "0.57807654", "0.57554406", "0.57134587", "0.56990176", "0.5679682", "0.56588626", "0.5614263", "0.5614167", "0.55856615", "0.5577077", "0.55760366", "0.55580515", "0.5551342", "0.5550982", "0.55419695", "0.5533722", "0.5469568", "0.5463886", "0.5453569", "0.5453016", "0.54170835", "0.5411326", "0.5396782", "0.53923947", "0.53845125", "0.5382335", "0.5375054", "0.53569347", "0.5352931", "0.53486013", "0.5316857", "0.53081536", "0.53059024", "0.53035885", "0.5258411", "0.5256758", "0.52552503", "0.5251165", "0.52505815", "0.52474195", "0.5239648", "0.52343565", "0.5230584", "0.5221586", "0.5216933", "0.52064556", "0.5202647", "0.5198685", "0.5198685", "0.5182995", "0.51826054", "0.5180385", "0.5175278", "0.5170165", "0.51661575", "0.51642495", "0.5162663", "0.5162663", "0.51587397", "0.5153902", "0.51463187", "0.51425767", "0.5138558", "0.513661", "0.51365113", "0.5135637", "0.5127785", "0.51231146", "0.5122364", "0.51159376", "0.50920576", "0.50861007", "0.5075132", "0.5068718", "0.50611097", "0.5057101", "0.5051422", "0.50217503", "0.50206286", "0.5017021", "0.50164557", "0.50138944", "0.50133806" ]
0.65368634
1
Github API can only return all tags, but we only want the latest.
def get_latest_tags(self): start = len(self.tags) - self.num_comparisons tags = self.tags latest = [] for i in xrange(len(tags)): if i >= start: parts = tags[i]['ref'].split('/') release_num = parts[2] sha = tags[i]['object']['sha'] tag = [release_num, sha] latest.append(tag) return latest
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def do_latest_tag(args, image_name_tag, image_name):\n if args.latest is True:\n if tag(image_name_tag, image_name+':latest'):\n push(args, image_name+':latest')", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def get_last_tag_by_date(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n output = output.splitlines()\n if len(output) == 0:\n return ''\n return output[-1]", "def get_last_tag_by_version(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n tags = []\n versions = []\n for line in output.splitlines():\n tags.append(line.strip())\n ver = re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", line)\n if ver:\n versions.append(ver)\n return tags[versions.index(max(versions))] if versions else ''", "def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag", "def get_all_tags():\n try:\n tags = g.projects.distinct('tags')\n return jsonify(sorted(tags, key=str.lower))\n except Exception as err:\n raise ApiException(str(err), 500)", "def get_most_recent_tarball(self, pkg):\n pass", "def get_all_tags():\n try:\n data = ReadTag().run()\n except Exception as ex:\n return jsonify({'code': '500','message':'Internal server error'})\n else:\n return jsonify({'code': '200','data': data})", "def get_tags(self):\n return self.get_url_data(self.api_url + 'refs/tags')", "def get(self, url_to_get=None): # pylint: disable=too-many-branches\n\n next_url = None\n if not url_to_get:\n url_to_get = self.url_to_get\n\n if self.etags and url_to_get in self.etags:\n self.headers[\"If-None-Match\"] = self.etags[url_to_get]\n\n req = get(url_to_get, headers=self.headers)\n\n if req.status_code == 200:\n data = req.json()\n repos = []\n\n if \"Etag\" in req.headers:\n self.etags[url_to_get] = req.headers[\"Etag\"]\n Helpers.Dict(self.etags).to_json(Settings.etags_file)\n\n if isinstance(data, list):\n repos.extend(data)\n else:\n raise Exception(\n \"Unable to understand GitHub API response for: '%s'.\" % url_to_get\n )\n\n if \"Link\" in req.headers:\n next_url = Helpers.Regex(\n req.headers[\"Link\"], self.regex_next_url, group=1, return_data=True\n ).match()\n\n if next_url:\n for element in self.get(url_to_get=next_url):\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n if repos:\n for element in repos:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n\n elif req.status_code == 304:\n data = Helpers.Dict.from_json(\n Helpers.File(Settings.repositories_file).read()\n )\n\n for element in data:\n if element[\"name\"] not in Settings.repo_to_ignore:\n yield element\n else:\n continue\n elif req.status_code == 401:\n raise Exception(\"Bad GitHub credentials.\")\n else:\n raise Exception(\n \"Somethign went wrong while communicating with: '%s'.\" % url_to_get\n )", "def get_latest_build(tag, package):\n proc = Popen([\"osg-koji\", \"-q\", \"list-tagged\", \"--latest\", tag, package],\n stdout=PIPE)\n out = proc.communicate()[0] or b''\n ret = proc.returncode\n\n latest_build_line = out.decode(\"latin-1\").strip()\n\n if ret != 0 or not latest_build_line:\n return\n\n return latest_build_line.split()[0]", "def _sort_latest_tag(self, versions: List[dict], tag_key: str) -> Dict:\n return next(\n iter(\n sorted(\n versions,\n reverse=True,\n key=lambda s: list(\n map(\n int,\n filter(None, re.sub(r\"[^0-9.]+\", \"\", s.get(tag_key), re.I).split(\".\")),\n )\n )\n if \".\" in s.get(tag_key)\n else [-1],\n )\n )\n )", "def tags(self):\r\n url = self.base_url + 'tags/'\r\n return json.loads(self.bb.load_url(url))", "def latest_tagged_video(tag):\n if not isinstance(tag, Tag):\n try:\n tag = Tag.objects.get(text=tag)\n except Tag.DoesNotExist:\n return mark_safe('')\n video = first_or_none(Video.objects.filter(tags=tag)\n .order_by('-issue__issue_date'))\n if video:\n return mark_safe(video.key)\n return mark_safe('')", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def _fetch_latest_for_tag(self, tag, today):\n result = []\n url = Fetch163.search_link % urllib2.quote(tag.name.encode('utf8'))\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError as e:\n urllib_error(e)\n else:\n doc = eval(resp.read())\n if doc and type(doc) is list:\n if today:\n news_today = self._today_filter(doc, delta=2)\n else:\n news_today = doc\n for d in news_today:\n docid = d.get('docid', '')\n #title = u'%s' % d.get('title', '')\n # the d.get('title') is a unicode string represent by\n # python str, so use unicode-escape to decode it.\n title = d.get('title', '')\n #print type(title)\n news_title = self._trans_title(title)\n if docid and title:\n news_exits = News.objects.filter(\n Q(docid=docid) | Q(title=news_title)\n )\n #print docid, news_title, news_exits\n intro, body, c_num, ptime, pic = self._fetch_news(docid)\n if not news_exits:\n print 'new news', news_title, docid\n news = News()\n news.docid = docid\n news.title = news_title\n news.content = body\n news.tag = tag\n news.comment_num = c_num\n news.list_pic = pic\n news.abstract = intro\n news.update_time = ptime\n news.save()\n import time\n time.sleep(2)\n if news:\n result.append(news)\n else:\n print 'update news', news_title\n n = news_exits[0]\n print 'old:', n.comment_num, 'new:', c_num\n n.comment_num = c_num\n n.save()\n else:\n print 'Fetch news for tag: %s, Error' % tag.name\n\n return result", "def tags(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'tags')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def test_none_return_if_all_excluded(self): # pylint: disable=invalid-name\n tags = [_TagInfo('1.0.1', 'commit1', ''),\n _TagInfo('notsemver', 'commit2', '')]\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)", "def get_latest_items(parser, token):\n bits = token.split_contents()\n\n if len(bits) != 4:\n raise TemplateSyntaxError, \"get_latest_item tag takes exactly three arguments\"\n if bits[2] != 'as':\n raise TemplateSyntaxError, \"second argument to get_latest_item tag must be 'as'\"\n return LatestItemNode(bits[1], bits[3])", "def get_repository_tags(repository_name):\n tags_query = \"SELECT * FROM release_tag where repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def __gitTagList(self):\n self.vcs.gitListTagBranch(self.project.getProjectPath(), True)", "def get_latest_posts(parser, token):\n\ttry:\n\t\ttag_name, arg = token.contents.split(None, 1)\n\texcept ValueError:\n\t\traise template.TemplateSyntaxError, \"%s tag requires arguments\" % token.contents.split()[0]\n\t\n\tm = re.search(r'(.*?) as (\\w+)', arg)\n\t\n\tif not m:\n\t\traise template.TemplateSyntaxError, \"%s tag had invalid arguments\" % tag_name\n\t\n\tformat_string, var_name = m.groups()\n\t\n\treturn LatestPosts(format_string[0], var_name)", "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def get(self):\n res = SmartAPI.get_tags(self.args.field)\n self.finish(res)", "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_release:\n try:\n upload_time = artifact.get(\"upload_time_iso_8601\")\n parsed_upload_time = dateutil.parser.isoparse(upload_time)\n release_artifact_dates.append(parsed_upload_time)\n except Exception:\n pass\n latest_artifact_timestamp = max(release_artifact_dates)\n return latest_artifact_timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return \"\"", "def extract_recent_tag(self, tag):\n\n url_string = \"https://www.instagram.com/explore/tags/%s/\" % tag\n response = bs4.BeautifulSoup(requests.get(url_string).text, \"html.parser\")\n potential_query_ids = self.get_query_id(response)\n shared_data = self.extract_shared_data(response)\n\n media = shared_data['entry_data']['TagPage'][0]['tag']['media']\n posts = []\n for node in media['nodes']:\n post = self.extract_recent_instagram_post(node)\n posts.append(post)\n self.save_results(posts)\n\n end_cursor = media['page_info']['end_cursor']\n\n # figure out valid queryId\n for potential_id in potential_query_ids:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n potential_id, tag, end_cursor)\n try:\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n query_id = potential_id\n success = True\n break\n except JSONDecodeError as de:\n # no valid JSON retured, most likely wrong query_id resulting in 'Oops, an error occurred.'\n pass\n if not success:\n log.error(\"Error extracting Query Id, exiting\")\n sys.exit(1)\n\n while end_cursor is not None:\n url = \"https://www.instagram.com/graphql/query/?query_id=%s&tag_name=%s&first=12&after=%s\" % (\n query_id, tag, end_cursor)\n data = requests.get(url).json()\n if 'hashtag' not in data['data']:\n # empty response, skip\n continue\n end_cursor = data['data']['hashtag']['edge_hashtag_to_media']['page_info']['end_cursor']\n posts = self.extract_instagram_posts(data['data']['hashtag']['edge_hashtag_to_media']['edges'])\n self.save_results(posts)", "def get_tags():\n\n error_on_unauthorized()\n\n tags = Tag.query.order_by(Tag.id)\n total_num = tags.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n \n return jsonify(total=total_num, tags=[t.to_dict() for t in tags.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def test_none_return(self):\n tags = []\n for i in range(15):\n tags.append(_TagInfo('v1.0.' + str(i), 'commit' + str(i), ''))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags), None)", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)", "def tags(self):\r\n url = '{0}/tags/'.format(self.get_url())\r\n request = http.Request('GET', url)\r\n\r\n return request, parsers.parse_json", "def instagramrequest(tag_name, max_tag_id=0):\n request_string = '?client_id=b865ec47b91346f3a2cbcfe04a6a80d9'\n if max_tag_id:\n request_string += '&max_tag_id='+str(max_tag_id)\n response = urlopen('https://api.instagram.com/v1/tags/'+tag_name+'/media/recent'+request_string)\n content = response.readall()\n return json.loads(content.decode(encoding='utf-8', errors='ignore'))", "def get_tags_list(url, auth_token, repo_name):\n response, _ = get_response(url + '/v2/' + repo_name + '/tags/list',\n auth_token)\n result = response.get('tags', [])\n return result", "def test_none_version_return_if_all_excluded(self): # pylint: disable=invalid-name\n version_prefix = 'v'\n tags = [_TagInfo('v1.0.1', 'commit1', version_prefix),\n _TagInfo('notsemver', 'commit2', version_prefix),\n _TagInfo('v1.0.v2', 'commit2', version_prefix)]\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)", "def get_tags(self):\n resp = self.get(_u.build_uri(\"tags\", domain=self.domain))\n return utils.handle_response(resp)", "def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")", "def test_pull_multiple_tags(self):\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': self.public,\n 'remotetype': 'dockerv2'\n }\n # Do the pull\n session = self.m.new_session(self.auth, self.system)\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Confirm record\n q = {'system': self.system, 'itype': self.itype,\n 'pulltag': self.public}\n mrec = self.images.find_one(q)\n self.assertIn('_id', mrec)\n # Track through transistions\n state = self.time_wait(id)\n self.assertEqual(state, 'READY')\n\n # Now reppull with a different tag for the same image\n newtag = self.public.replace('latest', '1')\n pr = {\n 'system': self.system,\n 'itype': self.itype,\n 'tag': newtag,\n 'remotetype': 'dockerv2'\n }\n rec = self.m.pull(session, pr) # ,delay=False)\n id = rec['_id']\n self.assertIsNotNone(rec)\n # Track through transistions\n state = self.time_wait(id)\n # Requery the original record\n mrec = self.images.find_one(q)\n self.assertIn(self.public, mrec['tag'])\n self.assertIn(newtag, mrec['tag'])", "async def latest(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"latest\"], *args, **kwargs)", "def get_tags(self):\n tags = []\n for image in self.client.images.list():\n for tag in image.tags:\n if tag.startswith(self.repository_name):\n tokens = tag.split(':')\n tags.append(tokens[1])\n return tags", "def last(self, count=None):\r\n url = '{0}/{1}'.format(self.get_pull_url(), 'last')\r\n params = base.get_params(('count',), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def latest_github_release(username: str, repo: str) -> GitHubReleaseResponse:\n url = f'https://api.github.com/repos/{username}/{repo}/releases/latest'\n response = requests.get(url)\n if response.status_code != 200:\n print(f'Error: {response.status_code}', file=sys.stderr)\n sys.exit(1)\n try:\n decoded_json = response.json()\n return GitHubReleaseResponse(decoded_json)\n except requests.exceptions.JSONDecodeError:\n print('Error: Unable to parse JSON from GitHub releases', file=sys.stderr)\n sys.exit(1)", "def tags(self) -> List[str]:\n if \"RepoTags\" in self.attrs:\n return [tag for tag in self.attrs[\"RepoTags\"] if tag != \"<none>:<none>\"]\n return []", "def do_list_tags(cs, args):\n resp, tags = cs.repositories.list_tags(args.repository)\n tags = [{\"Tag\": t} for t in tags]\n utils.print_list(tags, [\"Tag\"], sortby=\"Tag\")", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def get_tags(self, *args, **kwargs):\n \n tags_data = api.get_tags(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Tag(tag_data) for tag_data in tags_data]", "def parseGithubFeed(data):\n\tgitResult = []\n\tif data:\n\t\tfor entries in data:\n\t\t\ttext = entries['commit']['message']\n\t\t\tauthor = entries['commit']['author']['name']\n\t\t\ttime = entries['commit']['author']['date']\n\t\t\ttime = dateutil.parser.parse(time).isoformat(' ').split('+')[0] \n\t\t\ttime = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\titem = copy.deepcopy(templateResult)\n\t\t\titem['message'] = text \n\t\t\titem['author'] = author\n\t\t\titem['datetime'] = time\n\t\t\titem['source'] = 'Github'\n\t\t\tgitResult.append(item)\n\treturn gitResult", "def get_tags(directory=None):\n out = check_output('git tag -l', shell=True, cwd=directory)\n return [l.strip() for l in out.splitlines()]", "def get_prs_merged_since(auth_token, repo, tag):\n tag_date = get_tag_date(tag)\n prs = []\n\n def merge_date(pr):\n if pr.get('merged_at'):\n return dateutil.parser.parse(pr['merged_at'])\n else:\n return None\n\n # The GitHub API does not provide a `since` parameter to retrieve PRs\n # closed since a given date, so instead we iterate over PRs in descending\n # order of last update and stop when we reach a PR that was last updated\n # before the given tag was created.\n for closed_pr in github_request(auth_token, repo, 'pulls', state='closed',\n sort='updated', direction='desc'):\n pr_date = dateutil.parser.parse(closed_pr['updated_at'])\n if pr_date < tag_date:\n break\n merged_at = merge_date(closed_pr)\n if merged_at and merged_at > tag_date:\n prs += [closed_pr]\n\n return sorted(prs, key=merge_date)", "def get_object_tagging(Bucket=None, Key=None, VersionId=None):\n pass", "def show_tags(config, args):\n for item in lib.input_json_lines():\n yield config.repo.tag(item)", "def refresh():\n git.fetch()\n output = str(git.merge('--ff-only')).strip()\n if output != 'Already up to date.':\n print(output)\n git.fetch('--tags')", "def _get_tag(self, current_path, commit_sha):\n command = [\"git\", \"describe\", \"--tags\", commit_sha]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n elif \"fatal: no tags can describe '{}'.\".format(commit_sha) in error.decode(\n \"utf-8\"\n ).lower():\n return None\n elif \"fatal: no names found\" in error.decode(\"utf-8\").lower():\n return None\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )", "def fetchGithubFeed(startDate,endDate):\n\tresp = requests.get(k.GITHUB_URL + \"journal/commits?access_token=\" + k.GITHUB_TOKEN + \"&since=\" + startDate.isoformat() + \"&until=\" + endDate.isoformat())\n\t\n\treturn resp.json()", "def get_latest_content():\n\n latest_content = {}\n latest_content['all'] = ContentItem.objects.all().order_by('updated_at')[:4]\n latest_content['ga'] = ContentItem.objects.filter(tags__name='Geeks Abroad').order_by('updated_at')[:4]\n latest_content['gaming'] = ContentItem.objects.filter(tags__name='Gaming').order_by('updated_at')[:4]\n latest_content['osalt'] = ContentItem.objects.filter(tags__name='OS.Alt').order_by('updated_at')[:4]\n latest_content['sqa'] = ContentItem.objects.filter(tags__name='Squirrel Army').order_by('updated_at')[:4]\n\n return latest_content", "def get_versions(self):\n # They randomly use and don't use 'r' prefix so we have to sort\n # versions manually\n versions = list(self._get_github_tags())\n versions.sort(\n key=operator.attrgetter('base_version'),\n reverse=True,\n )\n return versions", "def get_top_tags(self, limit: int = 50) -> ListModel[Tag]:\n return self.retrieve(\n bind=Tag,\n flatten=\"tag\",\n params=dict(method=\"user.getTopTags\", user=self.name, limit=limit),\n )", "def list(self):\n\n\t\treturn self._list(\"/tag\", \"tag\")", "def tags():", "def pull_image_then_retag(docker_client=None, repository=None, tag='latest', retag_repository=None):\n try:\n Logger.debug(f\"ready to pull the image {repository}:{tag}\")\n image = docker_client.images.pull(repository, tag)\n Logger.info(f\"pull the image {image.attrs['RepoTags'][0]} completed,ready to re-tag.\")\n return image.tag(retag_repository, tag)\n except docker.errors.APIError as error:\n Logger.error(error)\n return False", "def get_latest_push(cls, git_lab_response: list) -> Optional[dict]:\n git_lab_response = cls.extract_push_data(git_lab_response)\n _push_dates = []\n\n if bool(git_lab_response) is False:\n return None\n\n # Extract the dates into a List\n for push in git_lab_response:\n _push_dates.append(\n iso8601.parse_date(\n push[cls.PUSH_DATE_KEY]\n )\n )\n # Get latest date.\n _latest_date = max(_push_dates)\n # Get latest dates index.\n _latest_push_index = _push_dates.index(_latest_date)\n\n # Connect Push event and date with the dates index.\n return git_lab_response[_latest_push_index]", "def get_github_commits():\n utcnow = datetime.datetime.utcnow()\n yesterday = utcnow - datetime.timedelta(hours=24)\n yesterday = yesterday.replace(hour=12, minute=0, second=0)\n iso = yesterday.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n\n txt = [\"> IEM Code Pushes <to branch> on Github\\n\"]\n html = [\"<h3>IEM Code Pushes &lt;to branch&gt; on Github</h3>\"]\n\n # get branches, main is first!\n branches = [\"main\"]\n req = exponential_backoff(requests.get, IEM_BRANCHES, timeout=30)\n for branch in req.json():\n if branch[\"name\"] == \"main\":\n continue\n branches.append(branch[\"name\"])\n\n hashes = []\n links = []\n for branch in branches:\n uri = (\n f\"https://api.github.com/repos/akrherz/iem/commits?since={iso}&\"\n f\"sha={branch}\"\n )\n req2 = exponential_backoff(requests.get, uri, timeout=30)\n # commits are in reverse order\n for commit in req2.json()[::-1]:\n if commit[\"sha\"] in hashes:\n continue\n hashes.append(commit[\"sha\"])\n timestring = commit[\"commit\"][\"author\"][\"date\"]\n utcvalid = datetime.datetime.strptime(\n timestring, \"%Y-%m-%dT%H:%M:%SZ\"\n )\n valid = utcvalid.replace(tzinfo=pytz.utc).astimezone(\n pytz.timezone(\"America/Chicago\")\n )\n data = {\n \"stamp\": valid.strftime(\"%b %-d %-2I:%M %p\"),\n \"msg\": commit[\"commit\"][\"message\"],\n \"htmlmsg\": htmlize(commit[\"commit\"][\"message\"])\n .replace(\"\\n\\n\", \"\\n\")\n .replace(\"\\n\", \"<br />\\n\"),\n \"branch\": branch,\n \"url\": commit[\"html_url\"][:-20], # chomp to make shorter\n \"i\": len(links) + 1,\n }\n links.append(\"[%(i)s] %(url)s\" % data)\n txt.append(\n mywrap(\" %(stamp)s[%(i)s] <%(branch)s> %(msg)s\" % data)\n )\n html.append(\n (\n '<li><a href=\"%(url)s\">%(stamp)s</a> '\n \"&lt;%(branch)s&gt; %(htmlmsg)s</li>\\n\"\n )\n % data\n )\n\n if len(txt) == 1:\n txt = txt[0] + \" No code commits found in previous 24 Hours\"\n html = html[0] + (\n \"<strong>No code commits found \" \"in previous 24 Hours</strong>\"\n )\n else:\n txt = \"\\n\".join(txt) + \"\\n\\n\" + \"\\n\".join(links)\n html = html[0] + \"<ul>\" + \"\\n\".join(html[1:]) + \"</ul>\"\n\n return txt + \"\\n\\n\", html + \"<br /><br />\"", "def tags(self) -> Optional[Any]:\n return pulumi.get(self, \"tags\")", "def find_tags(self, commit, repo):\n ref_dict = repo.repo.refs.as_dict()\n tags = []\n for tag, tag_id in [(t, ref_dict[t]) for t in repo.tags]:\n obj, obj_id = repo.repo[tag_id], None\n if isinstance(obj, Tag):\n _, obj_id = obj.object\n if isinstance(obj, Commit):\n obj_id = obj.id\n if commit.id == obj_id:\n tags.append((tag, obj))\n return tags", "def _find_latest():\n try:\n db = get_master_collection()\n service_details = db.find({\"master.key\": \"release\"}).sort([(\"master.value\", pymongo.DESCENDING)]).limit(1)\n for service in service_details:\n for r in sorted(service[\"master\"][\"value\"], reverse=True):\n latest_release = r\n build_list = service[\"master\"][\"value\"][r]\n break\n break\n\n latest_rel_num = str(latest_release).replace(\"_\", \".\")\n build_list = _natural_sort(build_list)\n for build in build_list:\n latest_build = build\n break\n\n latest_build_num = latest_build\n second_latest_build_num = int(latest_build_num) - 1\n latest = {\"latest_val\": latest_rel_num + \"_\" + latest_build_num,\n \"second_latest_val\": latest_rel_num + \"_\" + str(second_latest_build_num)}\n except Exception as e:\n logger.error(\"Exception in _find_latest : \" + str(e))\n return latest", "def gettime(self, tag):\n cmd = ['git', 'log', '--pretty=format:\"%ct\"', \"-1\", tag]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n if data == b'':\n return [], []\n time_stamp = []\n this_tag = []\n for seconds in data.decode(\"utf-8\").split(\"\\n\"):\n month = round((int(seconds.strip('\"')) - ReleaseTime.base) / ReleaseTime.month_time)\n if month not in time_stamp:\n time_stamp.append(month)\n this_tag.append(tag[0:4])\n else:\n pass\n return time_stamp, this_tag", "def get_tags(self, tags, filename):\n return self.get_tags_batch(tags, [filename])[0]", "def _get_recent_feed(cls, target):\n response = feedparser.parse(\n target.link, modified=target.last_modified, etag=target.etag\n )\n\n # Some of the feeds offer one of these two tags and others none of them.\n modified = cls._time_to_date(response.get(\"modified_parsed\"))\n etag = response.get(\"etag\")\n\n # In case RSS feed doesn't support modified tag, we compute it artificially.\n if not modified:\n response.entries, modified = cls._entries_after_date(\n response.entries, target.last_modified\n )\n\n return response, modified, etag", "def __gitDescribeTag(self):\n self.vcs.gitDescribe(self.project.getProjectPath(), [])", "def recent_media(self, tag, max_tag_id = None, min_tag_id = None, count = 10):\n\n url = \"https://api.instagram.com/v1/tags/{0}/media/recent?access_token={1}\".format(tag, self.access_token)\n\n if max_tag_id:\n url += \"&max_tag_id=\" + str(max_tag_id)\n if min_tag_id:\n url += \"&min_tag_id=\" + str(min_tag_id)\n\n request = requests.get(url)\n return request.json()", "def get_latest(self) -> tuple:\n raise NotImplementedError", "def query_repo_tip(repo_url):\n url = \"%s?tipsonly=1\" % (JSON_PUSHES % {\"repo_url\": repo_url})\n recent_commits = retry(requests.get, args=(url,)).json()\n tip_id = sorted(recent_commits.keys())[-1]\n return Push(push_id=tip_id, push_info=recent_commits[tip_id])", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def gitlab_terraform_modules_taglist() -> list:\n gl = gitlab.Gitlab(URL, PRIVATE_TOKEN, ssl_verify=False)\n # test connection\n try:\n if gl.projects.list():\n pass\n except Exception:\n print(\"could not reach gitlab\")\n sys.exit(1)\n\n terraform_group = None\n for group in gl.groups.list(all=True):\n if group.name == GITLAB_GROUP_TF_MODS:\n terraform_group = group\n\n terraform_modules_latest_tags = {}\n for tfmod_project in terraform_group.projects.list(all=True):\n project = gl.projects.get(tfmod_project.id)\n tag_list = [tag.name for tag in project.tags.list()]\n highest_tag = find_highest_tag(tag_list)\n\n if highest_tag is not None:\n terraform_modules_latest_tags[project.name] = highest_tag\n\n return terraform_modules_latest_tags", "def git_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"git_tag\")", "def _get_cache_tags(self):\n try:\n project = self._get_project()\n version = self._get_version()\n except Exception:\n log.warning(\n \"Error while retrieving project or version for this view.\",\n exc_info=True,\n )\n return []\n\n tags = []\n if project:\n tags.append(project.slug)\n if project and version:\n tags.append(get_cache_tag(project.slug, version.slug))\n if project and self.project_cache_tag:\n tags.append(get_cache_tag(project.slug, self.project_cache_tag))\n return tags", "def get_latest(self):\n latest_bt = self.get_latest_bt()\n latest_wifi = self.get_latest_wifi()\n return [latest_bt, latest_wifi] # Return a list", "def get_top_tags(tags):\n # ~3x faster\n return list(sorted(tags.items(), key=itemgetter(1), reverse=True))[:TOP_NUMBER]", "def api_get_tags(request):\n\n # TODO Get favorite tags for the given user ID\n\n tags = Tag.objects.get_not_empty_tags()\n tag_names = []\n for tag in tags:\n tag_names.append(tag.name)\n\n return HttpResponse(content=json.dumps(tag_names))", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "def get_tags_list(*args, **kwargs):\n return Tag.objects.active()", "async def getTags(self, body=\"\"):\n payload = {}\n \n # Parameter validation\n schema = ContentValidator.getTags()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(api_url=self._urls[\"getTags\"], proccessed_params=\"\"\"{\"required\":[],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + base64.b64encode(\"{}:{}\".format(self._conf.applicationID, self._conf.applicationToken).encode()).decode()\n }\n if self._conf.locationDetails:\n headers[\"x-location-detail\"] = ujson.dumps(self._conf.locationDetails)\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(urlparse(self._urls[\"getTags\"]).netloc, \"get\", await create_url_without_domain(\"/service/application/content/v1.0/tags\", ), query_string, headers, body, exclude_headers=exclude_headers), data=body, cookies=self._conf.cookies)", "def getTags(number=None):", "def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]", "def test_none_version_return(self):\n version_prefix = 'v'\n tags = []\n for i in range(15):\n tags.append(_TagInfo('1.0.' + str(i),\n 'commit' + str(i),\n version_prefix))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags, version_prefix), None)", "def list(self):\n return self._post(\n request='list',\n uri=ApiUri.TAGS.value,\n ).get('tags')", "def get_pull_requests():\n pull_requests = []\n url_base = f\"https://github.com/{GITHUB_OWNER}/{GITHUB_REPO}/pull/\"\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n pulls = repo.get_pulls(base=\"main\", state=\"closed\")\n last_release_date = repo.get_latest_release().published_at\n for pull in pulls:\n if not pull.draft and pull.closed_at > last_release_date and pull.merged:\n log_line = f\"* {pull.title} [#{pull.number}]({url_base}{pull.number})\"\n pull_requests.append(log_line)\n return pull_requests", "def tags(self) -> pulumi.Output[Optional[Sequence['outputs.ContainerTag']]]:\n return pulumi.get(self, \"tags\")", "def pull(self, repo, tag):\n check_blacklist(repo)\n logger.info(\"Pulling Docker image {}:{}\".format(repo, tag))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.pull(repo, tag=tag, stream=True, insecure_registry=True)\n log_output(stream)", "def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tags\")", "def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"tags\")", "def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag", "def __list_all_tags(self):\n\n tags_dict = get_data.get_tagnames_dict()\n if len(tags_dict) > 0:\n first_str = 'tag'\n second_str = 'top posts scraped'\n third_str = 'recent posts scraped'\n descriptor = '{:<40} {:<20} {}'\n print('')\n print(descriptor.format(first_str, second_str, third_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-',\n len(third_str) * '-'))\n for number, tag in tags_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + tag\n second = str(get_data.get_top_tag_post_count(tag))\n third = str(get_data.get_recent_tag_post_count(tag))\n print(descriptor.format(first, second, third))\n else:\n print('no tags found in the database')" ]
[ "0.7582535", "0.67998123", "0.65141267", "0.64969236", "0.6446542", "0.640628", "0.6393738", "0.6162547", "0.5983191", "0.5975903", "0.59552914", "0.5918902", "0.5868156", "0.58608156", "0.5853621", "0.58516884", "0.5850484", "0.5837113", "0.5833859", "0.5821743", "0.5812356", "0.5806186", "0.57818055", "0.57770026", "0.5776528", "0.5763899", "0.5745858", "0.5714461", "0.5693107", "0.5693107", "0.5685411", "0.5671767", "0.5670752", "0.5670484", "0.5667299", "0.5634412", "0.56219286", "0.56138104", "0.56053126", "0.5604821", "0.55998707", "0.5598967", "0.5584659", "0.558324", "0.55831546", "0.55811805", "0.5572673", "0.5555265", "0.5551048", "0.5509506", "0.54892135", "0.54842985", "0.5474258", "0.5471586", "0.5457578", "0.54419315", "0.54285437", "0.5407365", "0.53965217", "0.5395175", "0.539075", "0.53846717", "0.538242", "0.53807193", "0.5377454", "0.5375776", "0.53740543", "0.537322", "0.53707117", "0.5368166", "0.5357558", "0.5347169", "0.5339842", "0.5336568", "0.5333476", "0.5322719", "0.53222364", "0.5313195", "0.52984834", "0.52930534", "0.52922785", "0.5289878", "0.52679354", "0.52666754", "0.5263095", "0.52508706", "0.52508706", "0.52469635", "0.52437407", "0.52404165", "0.5240398", "0.5240159", "0.5215799", "0.52154267", "0.52111197", "0.52049893", "0.52049893", "0.52049893", "0.52031666", "0.52027303" ]
0.6986198
1
Return github tag release URL as string
def get_url_tag_release(self, release_num): url = 'https://{}/{}/{}/releases/tag/{}'.format( HOST_GITHUB, self.repo, self.product, release_num ) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url", "def github_url(self):\n return self.github.replace('.git', '')", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def ticket_url_or_tag(tag: str) -> str:\n url = _url_if_url(get_url_from_tag, tag)\n return _value_with_url(tag, url) if url else tag", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def svn_url(svninfo=None):\n if svninfo is None:\n svninfo = svn_info()\n return svninfo.find('entry/url').text", "def svnurl(self):\r\n info = self.info()\r\n return py.path.svnurl(info.url)", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def repo_url(self):\n return self._repo_url", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')", "def tag_release():\n # We're assuming that setup.py has already been updated\n # manually or using scripts/release/bump-version so the\n # current version in setup.py is the version number we should tag.\n version_number = get_current_version_number()\n click.echo(\"Tagging %s release\" % version_number)\n subprocess.check_call(\n ['git', 'tag', '-a', version_number,\n '-m', 'Tagging %s release' % version_number],\n )", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")", "def get_release(request):\r\n\r\n release = raven.fetch_git_sha(os.path.dirname(os.path.dirname(__file__)))\r\n return HttpResponse(json.dumps({\"release\": release[:7]}))", "def version_link(self):\n release_link = url_for('data.data', selected_release=self.DATASET_RELEASE)\n return Markup(f\"<a href='{release_link}'>{self.DATASET_RELEASE}</a>\")", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def getProjectURL():", "def get_changelog_url(repository_url, branch):\n changelog_url = f\"{repository_url}/blob/{branch}/CHANGES.txt\"\n requests_var = requests.get(changelog_url, timeout=30)\n if requests_var.status_code != 200:\n raise RuntimeError(f\"Page at URL {changelog_url} not found\")\n\n return changelog_url", "def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url", "def target_to_url(target):\n if is_wc(target):\n info = get_svninfo(target)\n return info[\"URL\"]\n return target", "def get_repository(post):\n pattern = re.compile(constants.REPOSITORY_REGEX)\n if \"links\" in post.json_metadata.keys():\n for link in post.json_metadata[\"links\"]:\n if link.startswith(\"/exit?url=\"):\n link = link[len(\"/exit?url=\"):]\n\n try:\n result = pattern.search(link).group(0)\n return result\n except AttributeError:\n continue\n else:\n for line in post.body.split():\n try:\n result = pattern.search(line).group(0)\n return result\n except AttributeError:\n continue\n\n return \"\"", "def git_remote_url(self):\n return self._git_remote_url", "def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]", "def getBuildbotURL():", "def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url", "def get_version(git_repo, commit):\n version = git_repo.rev_parse(commit, short=7)\n try:\n version = \"%s@%s\" % (git_repo.find_tag(commit), version)\n except GitRepositoryError:\n pass\n\n return version", "def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())", "def _get_download_url(model_name, version=__version__):\n\n version = \"0.0.0\" if \"vectors\" in model_name else version\n\n return (\n f\"https://github.com/mauna-ai/spacy-numberbatch/releases/download/\"\n f\"{version}/{model_name}-{version}.tar.gz\" )", "def tag_uri(self, name):\n return 'tag:%s,%d:%s' % (self.DOMAIN, datetime.datetime.now().year, name)", "def git_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"git_tag\")", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def get_version_tag(self, version: str) -> str:\n return version", "def scm_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"scm_url\")", "def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()", "def get_url(path, repo=None, rev=None, remote=None):\n with _make_repo(repo, rev=rev) as _repo:\n _require_dvc(_repo)\n out = _repo.find_out_by_relpath(path)\n remote_obj = _repo.cloud.get_remote(remote)\n return str(remote_obj.checksum_to_path_info(out.checksum))", "def generate_url(self, version: str, plat: Platform) -> str:\n platform = self.url_platform_mapping.get(plat.value, \"\")\n url = self.url_template.format(version=version, platform=platform)\n extension = \"gz\" if plat.is_macos else \"xz\"\n return f\"{url}.{extension}\"", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(context, exec_cmd=\"git checkout main\", pty=False, error_message=\"Failed to checkout main!\")\n\n run_cmd(context, exec_cmd=\"git pull origin main\", pty=False, error_message=\"Failed to pull from origin/main\")\n\n run_cmd(\n context, exec_cmd=f\"git tag v{IMAGE_VER}\", pty=False, error_message=f\"Failed to create the tag 'v{IMAGE_VER}'!\"\n )\n\n run_cmd(context, exec_cmd=\"git push --tags\", pty=False, error_message=f\"Failed to push the tag 'v{IMAGE_VER}'!\")", "def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)", "def svn_branch():\n return svn_url().split('/')[-1]", "def _get_unfurl_requirement_url(spec):\n if not spec:\n return spec\n if \"egg=unfurl\" in spec:\n # looks fully specified, just return it\n return spec\n\n url, sep, ref = spec.rpartition(\"@\")\n if sep:\n if ref:\n ref = \"@\" + ref\n else:\n ref = \"@\" + __version__()\n\n if not url:\n return \"git+https://github.com/onecommons/unfurl.git\" + ref + \"#egg=unfurl\"\n if not url.startswith(\"git+\"):\n return \"git+file://\" + os.path.abspath(url) + ref + \"#egg=unfurl\"\n else:\n return url + ref + \"#egg=unfurl\"", "def pr_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pr_url\")", "def git_ref_from_eups_version(version: str) -> str:\n return version.split(\"+\")[0]", "def SvnUrl(self):\n return self._module.url", "def get_url(name, version=None):\n global urls\n\n # Only download the URL look up table once.\n if urls is None:\n from six.moves.urllib.request import urlopen\n import json\n f = urlopen(\"http://sncosmo.github.io/data/urls.json\")\n reader = codecs.getreader(\"utf-8\")\n urls = json.load(reader(f))\n f.close()\n\n key = name if (version is None) else \"{0}_v{1}\".format(name, version)\n\n return urls[key]", "def get_pypi_url(requirement, version=None, base_url=PYPI_BASE_URL):\n return \"{base}/{req}/json\".format(base=base_url, req=requirement, version=version)", "def svn_info_t_repos_root_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_repository_uri(self) -> str:\n raise NotImplementedError", "def package_name_from_url(url):\n\n url_repo_part = url.split('/')[-1]\n\n if url_repo_part.endswith('.git'):\n return url_repo_part[:-4]\n\n return url_repo_part", "def build_image_name(self, tag):\n return self.repository_name + ':' + tag", "def pull(release):\n image = f\"breqwatr/rsyslog:{release}\"\n ecr.pull(image)", "def scm_url(self):\n return self._data.get('scm_url')", "def revision_url_template(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"revision_url_template\")", "def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''", "def _get_releaseinfo_str(version):\n opts = {}\n f = StringIO.StringIO()\n opts['version'] = version\n opts['date'] = get_git_log_info(\"%ci\")\n opts['comments'] = get_git_log_info(\"%b%+s%+N\")\n opts['commit'] = get_git_log_info(\"%H\")\n f.write(relfile_template % opts)\n return f.getvalue()", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())", "def __get_url_and_name(self, arch: str):\n page = requests.get(self.releases_url)\n page_text = page.text\n soup = BeautifulSoup(page_text, features=\"html.parser\")\n regex = re.compile('frida-server-[0-9]{1,2}.[0-9]{1,2}.[0-9]{1,2}-android-' + arch, re.IGNORECASE)\n frida_server_name = soup.find(text=regex)[0:-3]\n release_version = re.findall(\"[0-9]{1,2}.[0-9]{1,2}.[0-9]{1,2}\", frida_server_name)[0]\n return (self.releases_url + '/download/' + release_version + '/' + frida_server_name + \".xz\"), frida_server_name", "def get_ver():\n import subprocess\n\n proc = subprocess.run(\n [\"git\", \"describe\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if not proc.returncode == 0:\n return\n v = proc.stdout.decode().strip()\n if \"-\" not in v:\n ret = v\n else:\n csum = v[v.rindex(\"-\") + 1 :]\n base = v[: v.rindex(\"-\")]\n count = base[base.rindex(\"-\") + 1 :]\n tag = base[: base.rindex(\"-\")]\n ret = f\"{tag}.post{count}+{csum}\"\n return ret", "def build_gitlab_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://gitlab.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/-/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def github_svn_rev2hash(tag: str, rev): # pragma: no cover\n uri = f'https://github.com/wikimedia/{tag}/!svn/vcc/default'\n request = fetch(uri, method='PROPFIND',\n data=\"<?xml version='1.0' encoding='utf-8'?>\"\n '<propfind xmlns=\\\"DAV:\\\"><allprop/></propfind>',\n headers={'label': str(rev),\n 'user-agent': 'SVN/1.7.5 {pwb}'})\n dom = xml.dom.minidom.parse(BytesIO(request.content))\n hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue\n date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n return hsh, date", "def tag_match_to_url(tag):\n # TODO(fsiddi) when moving to Python 3.7, specify the type of tag (re.Match)\n tag_name = tag.group(0)\n tag_url = reverse('posts_list_tag', kwargs={'tag_name': tag_name[1:]})\n return f'<a href=\"{tag_url}\">{tag_name}</a>'", "def get_release_notes(self):\n\n notes = self.output.get_header('RELEASE NOTES')\n notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \\\n self.repo, self.product) + '\\n'\n\n notes += self.output.get_sub_header('COMPARISONS')\n notes += self.get_comparison(self.latest_tags[0][VERS],\n self.latest_tags[1][VERS])\n\n if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):\n notes += self.get_comparison(self.latest_tags[1][VERS],\n self.latest_tags[2][VERS])\n\n if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:\n notes += self.get_comparison(self.latest_tags[2][VERS],\n self.latest_tags[3][VERS])\n\n tag_data = self.get_tag(self.latest_tags[3][SHA])\n\n notes += self.output.get_sub_header('TAGS')\n notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\\n'\n notes += self.get_url_tag_commit(tag_data[\"object\"][\"sha\"]) + '\\n'\n\n changelog = self.get_changelog(tag_data[\"object\"][\"sha\"])\n if changelog:\n notes += self.output.get_sub_header('CHANGELOG')\n notes += changelog\n return notes", "def git_remote(git_repo):\n github_token = os.getenv(GITHUB_TOKEN_KEY)\n if github_token:\n return 'https://{0}@github.com/{1}'.format(\n github_token, git_repo)\n return '[email protected]:{0}'.format(git_repo)", "def test_parse_package_url():\n rv = versioning.to_remote_version('fake', '1.0.1-alpha.1', 'fake-bucket')\n rv_url = versioning.parse_package_url(rv.url)\n assert rv == rv_url, 'Expect URL parsing to be consistent.'", "def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")", "def _to_dockerfile_url(image):\n path = \"/\".join((image.platform, image.release, image.architecture, \"Dockerfile\"))\n return git.get_github_blob_url(path, ref=f\"v{image.version}\")", "def get_mozilla_dmg_url(self, base_url, product_name, release, locale):", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def get_url() -> str:\n parser = ArgumentParser()\n\n parser.add_argument('--url',\n type=str,\n help='Url to download log file')\n\n args = parser.parse_args()\n url = args.url\n return url", "def get_version():\n import subprocess\n proc = subprocess.Popen(\n 'hg log -r tip --template \"{latesttagdistance}\"',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n pending, _ = proc.communicate()\n return \"%(tag)sd%(pending)s\" % dict(tag=config.TAG, pending=pending)", "def build_bitbucket_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://bitbucket.org/{namespace}/{name}\".format(\n namespace=namespace, name=name\n )\n if version:\n url = \"{url}/src/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def get_geckodriver_url(version: str) -> str:\n if _ARCHITECTURE == \"other\": # or platform BSD\n return f\"https://github.com/mozilla/geckodriver/archive/{version}.{_COMPRESSION}\"\n else:\n return f\"https://github.com/mozilla/geckodriver/releases/download/{version}\" \\\n f\"/geckodriver-{version}-{_PLATFORM}{_ARCHITECTURE}.{_COMPRESSION}\"", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def tag(self):\n if self.method == 'buildArch':\n # Note: buildArch tag will be an int here.\n return self.params[1]\n if self.method in ('createdistrepo', 'distRepo', 'newRepo', 'runroot',\n 'tagBuild', 'waitrepo'):\n return self.params[0]\n if self.method == 'tagNotification':\n return self.params[2]\n if self.method == 'buildMaven':\n return self.params[1]['name']", "def scm_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scm_url\")", "def test_url_with_access_token():\n assert url_with_access_token(\n \"access\", \"http://github.com/mitodl/release-script.git\"\n ) == \"https://[email protected]/mitodl/release-script.git\"", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def do_version_tag(args, image_name_tag, image_name):\n if args.versiontag is True:\n date_stamp = \"{:%Y%m%d%H%M%S}\".format(datetime.now())\n version_tag = args.tag + '-' + date_stamp\n image_name_version_tag = f\"{image_name}:{version_tag}\"\n return_code = tag(image_name_tag, image_name_version_tag)\n if return_code == 0:\n push(args, image_name_version_tag)", "def upload_asset(ctx, asset, release):\n\n try:\n\n gh = ctx.obj.github\n\n log.echo('Uploading {} to release {}...'\n .format(os.path.basename(asset), release), break_line=False)\n asset_url = gh.upload_asset(asset=asset, release=release)\n log.checkmark()\n log.echo('Uploaded asset: {}'.format(asset_url))\n return asset_url\n except BaseException as _:\n log.xmark()\n raise", "def asset_url(filename=\"\", version=True):\n if filename.startswith(\"http\") or filename.startswith(\"/\"):\n return filename\n else:\n if config.static_url:\n return_url = \"http://\" + config.static_url\n else:\n return_url = \"/static\" # web.ctx.home + \"/static\"\n if filename:\n return_url += \"/\" + filename\n if version:\n return_url += \"?\" + config.asset_version\n return return_url" ]
[ "0.7346837", "0.7119125", "0.6906928", "0.6849196", "0.6814661", "0.6633057", "0.6601647", "0.65425444", "0.65085566", "0.6498361", "0.6480949", "0.6473229", "0.63616604", "0.63453585", "0.63183016", "0.62817633", "0.62013495", "0.6147767", "0.61303365", "0.61104536", "0.6100949", "0.60963523", "0.6080163", "0.6078019", "0.6056599", "0.6055186", "0.6023433", "0.6011335", "0.6003082", "0.5991574", "0.5969723", "0.5959156", "0.5932608", "0.5930282", "0.5917647", "0.5891315", "0.5858393", "0.5855818", "0.5826006", "0.5824109", "0.5822342", "0.58221585", "0.58209145", "0.5815837", "0.5801863", "0.57795656", "0.577137", "0.57492787", "0.5748756", "0.5741756", "0.5738791", "0.5704775", "0.5702158", "0.5697762", "0.5692655", "0.56879544", "0.56642747", "0.56637335", "0.5663466", "0.5605411", "0.5604391", "0.5603063", "0.5575149", "0.5567003", "0.555212", "0.5551829", "0.55398506", "0.5525016", "0.5507452", "0.54973316", "0.5485795", "0.5478691", "0.5468487", "0.54648954", "0.54632103", "0.54544985", "0.545254", "0.5449013", "0.5436167", "0.54234767", "0.54217666", "0.5421462", "0.5420082", "0.54191375", "0.54171354", "0.54162395", "0.5414159", "0.5408944", "0.5408355", "0.5404013", "0.5402442", "0.5394271", "0.5376126", "0.5371226", "0.53681684", "0.5362415", "0.53620225", "0.5355566", "0.5353513", "0.5352783" ]
0.7984513
0
Return github tag commit SHA URL as string
def get_url_tag_commit(self, git_sha): url = 'https://{}/{}/{}/commit/{}'.format( HOST_GITHUB, self.repo, self.product, git_sha ) return url
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def github_url(self):\n return self.github.replace('.git', '')", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def github_svn_rev2hash(tag: str, rev): # pragma: no cover\n uri = f'https://github.com/wikimedia/{tag}/!svn/vcc/default'\n request = fetch(uri, method='PROPFIND',\n data=\"<?xml version='1.0' encoding='utf-8'?>\"\n '<propfind xmlns=\\\"DAV:\\\"><allprop/></propfind>',\n headers={'label': str(rev),\n 'user-agent': 'SVN/1.7.5 {pwb}'})\n dom = xml.dom.minidom.parse(BytesIO(request.content))\n hsh = dom.getElementsByTagName('C:git-commit')[0].firstChild.nodeValue\n date = dom.getElementsByTagName('S:date')[0].firstChild.nodeValue\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n return hsh, date", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def cmd_get_sha(ref):\n return ['git', 'rev-parse', ref]", "def get_url_tag_release(self, release_num):\n\n url = 'https://{}/{}/{}/releases/tag/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n release_num\n )\n return url", "def get_version(git_repo, commit):\n version = git_repo.rev_parse(commit, short=7)\n try:\n version = \"%s@%s\" % (git_repo.find_tag(commit), version)\n except GitRepositoryError:\n pass\n\n return version", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def get_tag(repo: str = None) -> str:\n if not repo:\n repo = '.'\n repo=repo.replace('\\\\','/')\n cmd = ['git', 'describe']\n result = _run_git(cmd, repo=repo, expect_stderr=True)\n if not result:\n return None\n tag: str = result.stdout.decode(\"utf-8\")\n tag = tag.replace('\\r', '').replace('\\n', '')\n return tag", "def get_hash(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify', ref],\n cwd=repo).rstrip()", "def get_tag(self, sha):\n return self.get_url_data(self.api_url + 'tags/' + sha)", "def git_sha1_commit():\n return local('git rev-parse --short HEAD', capture=True)", "def sha(location, rev):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git rev-parse --verify {}'.format(rev)\n return subprocess.check_output(cmd, shell=True).strip()", "def _tag_to_sha1(self):\n def get_sha1(url):\n # Ceph (and other projects) uses annotated tags for releases. This\n # has the side-effect of making git ls-remote return the sha1 for\n # the annotated tag object and not the last \"real\" commit in that\n # tag. By contrast, when a person (or a build system) issues a\n # \"git checkout <tag>\" command, HEAD will be the last \"real\" commit\n # and not the tag.\n # Below we have to append \"^{}\" to the tag value to work around\n # this in order to query for the sha1 that the build system uses.\n return repo_utils.ls_remote(url, \"%s^{}\" % self.tag)\n\n git_url = repo_utils.build_git_url(self.project)\n result = get_sha1(git_url)\n # For upgrade tests that are otherwise using ceph-ci.git, we need to\n # also look in ceph.git to lookup released tags.\n if result is None and 'ceph-ci' in git_url:\n alt_git_url = git_url.replace('ceph-ci', 'ceph')\n log.info(\n \"Tag '%s' not found in %s; will also look in %s\",\n self.tag,\n git_url,\n alt_git_url,\n )\n result = get_sha1(alt_git_url)\n\n if result is None:\n raise CommitNotFoundError(self.tag, git_url)\n return result", "def ticket_url_or_tag(tag: str) -> str:\n url = _url_if_url(get_url_from_tag, tag)\n return _value_with_url(tag, url) if url else tag", "def repo_tag(self):\n return '%s/gcloud/%s' % (constants.APPENGINE_REGISTRY, self._tag)", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def get_git_revision_hash():\n return subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip().decode('ascii')", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def get_commit():\n cmd = \"git rev-parse HEAD\"\n result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n return result.stdout.decode(\"utf-8\").strip()", "def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')", "def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')", "def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'", "def get_commit_by_url(commit_url):\n commit_sql = \"SELECT * FROM github_commit WHERE url=?\"\n return dbutils.execute_query(commit_sql, (commit_url,), DATABASE_FILE)", "def git_hash():\n if not exists('qmk_firmware'):\n checkout_qmk()\n\n return open('qmk_firmware/version.txt').read().strip()", "def get_commit_hash(reference, directory=None):\n # Track remote branch\n if branch_exists(reference, local_only=False, directory=directory):\n if not branch_exists(reference, local_only=True, directory=directory):\n track_branches(reference, directory)\n cmd = 'git show-branch --sha1-name ' + reference\n out = check_output(cmd, shell=True, cwd=directory)\n return out.split('[')[1].split(']')[0]", "def get_url(path, repo=None, rev=None, remote=None):\n with _make_repo(repo, rev=rev) as _repo:\n _require_dvc(_repo)\n out = _repo.find_out_by_relpath(path)\n remote_obj = _repo.cloud.get_remote(remote)\n return str(remote_obj.checksum_to_path_info(out.checksum))", "def get_sha_from_ref(repo_url, reference):\n # Using subprocess instead of convoluted git libraries.\n # Any rc != 0 will be throwing an exception, so we don't have to care\n out = subprocess.check_output(\n [\"git\", \"ls-remote\", \"--exit-code\", repo_url, reference]\n )\n # out is a b'' type string always finishing up with a newline\n # construct list of (ref,sha)\n refs = [\n (line.split(b\"\\t\")[1], line.split(b\"\\t\")[0])\n for line in out.split(b\"\\n\")\n if line != b\"\" and b\"^{}\" not in line\n ]\n if len(refs) > 1:\n raise ValueError(\n \"More than one ref for reference %s, please be more explicit %s\"\n % (reference, refs)\n )\n return refs[0][1].decode(\"utf-8\")", "def get_git_hash() -> Optional[str]:\n rv = _git('rev-parse', 'HEAD')\n if rv:\n return rv[:6]", "def get_archive_url(url, branch='master', release=None):\n git_url = trim_repo_url(url)\n fragment = None\n file = git_url.split(\"/\")[-1]\n \n if release:\n fragment = \"/archive/{}.zip\".format(release)\n else:\n fragment = \"/archive/{}.zip\".format(branch)\n \n return file, git_url+fragment", "def git_tag(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"git_tag\")", "def get_git_revision_short_hash() -> str:\n try:\n #ghash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n\n # independent of pyNastran location as long as there is a git folder\n # what about if you use setup_user.py install?\n # what about if you don't have git?\n # can raise a subprocess.CalledProcessError, which means the return code != 0\n ghash = subprocess.check_output(['git', 'describe', '--always'],\n cwd=os.path.dirname(__file__))\n\n ghash = ghash.decode('utf-8').rstrip()\n except Exception:\n # git isn't installed\n ghash = 'no.checksum.error'\n return 'dev.%s' % ghash", "def git_remote_url(self):\n return self._git_remote_url", "def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url", "def get_repository(post):\n pattern = re.compile(constants.REPOSITORY_REGEX)\n if \"links\" in post.json_metadata.keys():\n for link in post.json_metadata[\"links\"]:\n if link.startswith(\"/exit?url=\"):\n link = link[len(\"/exit?url=\"):]\n\n try:\n result = pattern.search(link).group(0)\n return result\n except AttributeError:\n continue\n else:\n for line in post.body.split():\n try:\n result = pattern.search(line).group(0)\n return result\n except AttributeError:\n continue\n\n return \"\"", "def get_latest_sha(repo):\n cwd = os.getcwd()\n command = \"git rev-list -1 HEAD -- {0}\".format(repo)\n os.chdir(repo)\n git_sha = process_helpers.run(command.split(\" \"))\n os.chdir(cwd)\n return git_sha.strip()", "def repo_url(self):\n return self._repo_url", "def get_repo_sha(base_dir):\n try:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=base_dir)\n return sha.decode('utf-8').strip()\n except Exception as e:\n print(\"Failed to get repo sha for '%s': %s\" % (base_dir, e))\n return \"\"", "def parse_ref(url_path):\n ref = url_path.lstrip('/')\n if not ref:\n ref = os.environ.get('DEFAULT_GIT_REF', 'HEAD').strip()\n return ref", "def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()", "def get_ver():\n import subprocess\n\n proc = subprocess.run(\n [\"git\", \"describe\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if not proc.returncode == 0:\n return\n v = proc.stdout.decode().strip()\n if \"-\" not in v:\n ret = v\n else:\n csum = v[v.rindex(\"-\") + 1 :]\n base = v[: v.rindex(\"-\")]\n count = base[base.rindex(\"-\") + 1 :]\n tag = base[: base.rindex(\"-\")]\n ret = f\"{tag}.post{count}+{csum}\"\n return ret", "def get_git_hash(git_dir, short=True):\n\n cwd = os.getcwd()\n os.chdir(git_dir)\n\n args = ['git', 'rev-parse', '--short', 'HEAD']\n if not short:\n args.remove('--short')\n\n ver = subprocess.check_output(args).strip('\\n')\n\n os.chdir(cwd)\n\n return ver", "def commit_msg(rev):\n return (\n subprocess.check_output([\"git\", \"show\", \"--pretty=format:%s\", \"-s\", rev])\n .decode()\n .strip()\n )", "def gitversion():\n import os\n from subprocess import Popen, PIPE, STDOUT\n origdir = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n try:\n p = Popen(['git', \"describe\", \"--tags\", \"--dirty\", \"--always\"], stdout=PIPE, stderr=STDOUT)\n except EnvironmentError:\n return 'unknown'\n\n os.chdir(origdir)\n out = p.communicate()[0]\n if p.returncode == 0:\n #- avoid py3 bytes and py3 unicode; get native str in both cases\n return str(out.rstrip().decode('ascii'))\n else:\n return 'unknown'", "def _get_commit_sha() -> str:\n repo_root = os.path.join(os.path.dirname(__file__), '..', '..')\n repo = Repo(repo_root)\n if repo.is_dirty():\n warning_msg = 'The git repo is dirty. The commit sha for source code links will be incorrect.'\n if os.environ.get('CI', '0') == '0':\n # If developing locally, warn.\n warnings.warn(warning_msg)\n else:\n # If on CI, error.\n raise RuntimeError(warning_msg)\n return repo.commit().hexsha", "def GetTipOfTrunkRevision(git_url):\n parsed_url = urlparse.urlparse(git_url)\n path = parsed_url[2].rstrip('/') + '/+log/master?n=1&format=JSON'\n j = FetchUrlJson(parsed_url[1], path, ignore_404=False)\n if not j:\n raise GOBError(\n 'Could not find revision information from %s' % git_url)\n try:\n return j['log'][0]['commit']\n except (IndexError, KeyError, TypeError):\n msg = ('The json returned by https://%s%s has an unfamiliar structure:\\n'\n '%s\\n' % (parsed_url[1], path, j))\n raise GOBError(msg)", "def hash(self):\n return os.popen('git rev-parse HEAD').read().strip()", "def _get_git_commit_id():\n from git import Repo\n from os.path import split, dirname\n path = split(dirname(__file__))[0]\n commit_id = Repo(path).head.object.hexsha\n return commit_id[:8]", "def get_git_revision_hash() -> str:\n try:\n # We are not interested in gits complaints\n git_hash = subprocess.check_output(\n [\"git\", \"rev-parse\", \"HEAD\"], stderr=subprocess.DEVNULL, encoding=\"utf8\"\n )\n # ie. \"git\" was not found\n # should we return a more generic meta hash here?\n # like \"undefined\"?\n except FileNotFoundError:\n git_hash = \"git_not_available\"\n except subprocess.CalledProcessError:\n # Ditto\n git_hash = \"no_repository\"\n return git_hash.rstrip()", "def getversion_git(path=None):\n _program_dir = path or _get_program_dir()\n cmd = 'git'\n try:\n subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()\n except OSError:\n # some Windows git versions provide git.cmd instead of git.exe\n cmd = 'git.cmd'\n\n with open(os.path.join(_program_dir, '.git/config')) as f:\n tag = f.read()\n # Try 'origin' and then 'gerrit' as remote name; bail if can't find either.\n remote_pos = tag.find('[remote \"origin\"]')\n if remote_pos == -1:\n remote_pos = tag.find('[remote \"gerrit\"]')\n if remote_pos == -1:\n tag = '?'\n else:\n s = tag.find('url = ', remote_pos)\n e = tag.find('\\n', s)\n tag = tag[(s + 6):e]\n t = tag.strip().split('/')\n tag = f\"[{t[0][:-1]}] {'-'.join(t[3:])}\"\n dp = subprocess.Popen([cmd, '--no-pager',\n 'log', '-1',\n '--pretty=format:\"%ad|%an|%h|%H|%d\"',\n '--abbrev-commit',\n '--date=iso'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n info, _ = dp.communicate()\n info = info.decode(config.console_encoding).split('|')\n date = info[0][:-6]\n date = time.strptime(date.strip('\"'), '%Y-%m-%d %H:%M:%S')\n dp = subprocess.Popen([cmd, 'rev-list', 'HEAD'],\n cwd=_program_dir,\n stdout=subprocess.PIPE)\n rev, stderr = dp.communicate()\n rev = f'g{len(rev.splitlines())}'\n hsh = info[3] # also stored in '.git/refs/heads/master'\n if (not date or not tag or not rev) and not path:\n raise VersionParseError\n return (tag, rev, date, hsh)", "def git_ref_from_eups_version(version: str) -> str:\n return version.split(\"+\")[0]", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def get_head_commit_hash(git_repo: Optional[Union[str, pathlib.Path]] = None\n ) -> str:\n if not git_repo:\n git_repo = get_chromium_src_path()\n\n if not isinstance(git_repo, pathlib.Path):\n git_repo = pathlib.Path(git_repo)\n\n _assert_git_repository(git_repo)\n\n return subprocess_utils.run_command(\n ['git', 'show', '--no-patch', f'--pretty=format:%H'], cwd=git_repo)", "def svn_client_commit_item_t_url_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_git_hash(revname):\n try:\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()\n except:\n revname = \"origin/\" + revname\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def cur_commit():\n result = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, stderr=PIPE, encoding=\"utf-8\",\n )\n result.check_returncode()\n return result.stdout.strip()", "def git_remote(git_repo):\n github_token = os.getenv(GITHUB_TOKEN_KEY)\n if github_token:\n return 'https://{0}@github.com/{1}'.format(\n github_token, git_repo)\n return '[email protected]:{0}'.format(git_repo)", "def get_changelog_url(repository_url, branch):\n changelog_url = f\"{repository_url}/blob/{branch}/CHANGES.txt\"\n requests_var = requests.get(changelog_url, timeout=30)\n if requests_var.status_code != 200:\n raise RuntimeError(f\"Page at URL {changelog_url} not found\")\n\n return changelog_url", "def get_git_hash(rev='HEAD'):\n\n git_hash = ''\n try:\n git_out = subprocess.check_output(['git', 'rev-parse', rev], universal_newlines=True)\n except subprocess.CalledProcessError:\n mylogger.exception(\"Couldn't determine the git hash!\")\n else:\n git_hash = git_out.strip()\n\n return git_hash", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def _get_commit_info(commit: git.Commit, pretty_format: str) -> str:\n try:\n return commit.repo.git.show(commit.hexsha, pretty=f\"format:{pretty_format}\")\n except git.GitCommandError as error:\n raise PackitException(\n f\"Cannot find commit {commit.hexsha!r} to check its signature.\", error\n )", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def version_hash():\n git_hash = current_git_hash()\n return \"%s-%s\" % (__VERSION__, git_hash)", "def get_current_commit_sha():\n return check_output(\n \"git rev-parse HEAD\".split(\" \")\n ).decode('utf-8').strip()", "def get_commit_hash(self, directory):\n\n return (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=directory)\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )", "def svnurl(self):\r\n info = self.info()\r\n return py.path.svnurl(info.url)", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())", "def get_repository_uri(self) -> str:\n raise NotImplementedError", "async def _get_commit(self: Self, checkout_dir: Path) -> str:\n git_sha_process = await create_subprocess_exec(\n *[\"git\", \"rev-parse\", \"HEAD\"],\n cwd=checkout_dir,\n stdout=PIPE,\n )\n git_sha_stdout, _ = await git_sha_process.communicate()\n assert (\n await git_sha_process.wait() == 0\n ), f\"Failed to retrieve commit sha at {checkout_dir}\"\n return git_sha_stdout.decode().strip()", "def fetch_remote_hashcode(self, path):\n\t\treturn self.fetch_repo_file(\"/hash/\" + path.replace(\"packages/\", \"\")).decode('utf-8').strip()", "def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']", "def get_git_repo_url(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n repo = Repo(path, search_parent_directories=True)\n return next((remote.url for remote in repo.remotes), None)\n except Exception:\n return None", "def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]", "def get_git_commit():\n try:\n commit = Popen([\"git\", \"describe\", \"--always\"],\n stdout=PIPE).communicate()[0].strip()\n # the following only works in Python 2.7\n # commit = subprocess.check_output(['git', 'describe']).strip()\n return commit\n except OSError:\n return \"unknown\"", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def svn_client_commit_item2_t_url_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def svn_branch():\n return svn_url().split('/')[-1]", "def scm_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"scm_url\")", "def get_commit_hash(repo_location, commit='origin/HEAD'):\n if not os.path.exists(pjoin(repo_location, '.git')):\n raise ValueError\n ret, out = spawn_get_output(\n ['git', 'rev-parse', commit], cwd=repo_location)\n if ret != 0:\n raise ValueError(\n f'failed retrieving {commit} commit hash '\n f'for git repo: {repo_location}')\n return out[0].strip()", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def svn_url(svninfo=None):\n if svninfo is None:\n svninfo = svn_info()\n return svninfo.find('entry/url').text", "def _get_git_url_if_present(uri):\n if '#' in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n print(\"Notice: failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\" % e, file=sys.stderr)\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, '')\n if (rlpath == ''):\n git_path = repo_url\n elif (rlpath[0] == '/'):\n git_path = repo_url + '#' + rlpath[1:]\n else:\n git_path = repo_url + '#' + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri", "def _get_tag(self, current_path, commit_sha):\n command = [\"git\", \"describe\", \"--tags\", commit_sha]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n elif \"fatal: no tags can describe '{}'.\".format(commit_sha) in error.decode(\n \"utf-8\"\n ).lower():\n return None\n elif \"fatal: no names found\" in error.decode(\"utf-8\").lower():\n return None\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def read_gitref():\n\n gitref = module.directory.joinpath('.gitref')\n if gitref.is_file():\n with gitref.open('r') as fp:\n return fp.read().strip()\n return '<unknown_gitref>'", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def get_sha_commit(self):\n self.get_meta()\n filename = 'lastshacommit'\n # For unittest read from localfile\n if app.config['TEST']:\n filename = 'lastshacommittest'\n app.logger.debug(\"App config set to TEST. Reading shacommit from file \" + filename)\n\n try:\n handle = open(filename, \"r\")\n except Exception as e:\n app.logger.error(\"Error occurred when opening file \" + filename)\n app.logger.error(e)\n raise\n l_shacommit = handle.read().rstrip()\n handle.close()\n return l_shacommit" ]
[ "0.71973187", "0.69282585", "0.6792901", "0.6748358", "0.66634905", "0.6656634", "0.6605502", "0.6590671", "0.65770376", "0.6560338", "0.6532101", "0.65264523", "0.6505128", "0.6500491", "0.6499007", "0.64897436", "0.63229996", "0.6280397", "0.6262036", "0.6240915", "0.61932933", "0.6188964", "0.61703473", "0.61590856", "0.6153254", "0.6153118", "0.61395156", "0.61198765", "0.61079973", "0.60922277", "0.60853493", "0.6074295", "0.60622823", "0.6058751", "0.6054194", "0.603725", "0.6035806", "0.6033715", "0.6012759", "0.60038024", "0.59935415", "0.599341", "0.5992575", "0.59854186", "0.59759265", "0.59723234", "0.5971598", "0.5955242", "0.5951773", "0.594192", "0.5925065", "0.59182113", "0.59170836", "0.589543", "0.5893797", "0.5893546", "0.5885269", "0.58705133", "0.58670574", "0.586333", "0.585963", "0.5842698", "0.5840368", "0.58179975", "0.5806686", "0.5801591", "0.57920486", "0.57906055", "0.57677424", "0.5762209", "0.5761208", "0.5747229", "0.5731", "0.5722064", "0.57187086", "0.5709318", "0.56939244", "0.56784713", "0.5677574", "0.5674998", "0.5673029", "0.5668827", "0.5663604", "0.56468916", "0.56420803", "0.5639952", "0.56372243", "0.5632835", "0.56310356", "0.5603287", "0.5600398", "0.5599838", "0.5598523", "0.55738056", "0.55662787", "0.5564496", "0.55633026", "0.5562504", "0.5550208", "0.5526869" ]
0.8073723
0
Return github compare URL as string
def get_comparison(self, start, end): return 'https://{}/{}/{}/compare/{}...{}'.format(HOST_GITHUB, \ self.repo, self.product, start, end) + '\n'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def github_url(self):\n return self.github.replace('.git', '')", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')", "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url", "def git_remote_url(self):\n return self._git_remote_url", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def get_url(self):\n return (\n \"https://raw.githubusercontent.com\"\n \"/benoitbryon/django-downloadview\"\n \"/b7f660c5e3f37d918b106b02c5af7a887acc0111\"\n \"/demo/demoproject/download/fixtures/hello-world.txt\"\n )", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def format_url(self, data):\n git_url = urlparse(data[\"git_url\"])\n\n url = \"oauth2:{0}@{1}\".format(data[\"token\"], git_url.netloc)\n return git_url._replace(netloc=url).geturl()", "def get_public_url(self, doc_id, branch='master'):\n name, path_frag = self.get_repo_and_path_fragment(doc_id)\n return 'https://raw.githubusercontent.com/OpenTreeOfLife/' + name + '/' + branch + '/' + path_frag", "def GetGerritFetchUrl(host):\n return 'https://%s/' % host", "def repo_url(self):\n return self._repo_url", "def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def scm_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"scm_url\")", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def svnurl(self):\r\n info = self.info()\r\n return py.path.svnurl(info.url)", "def build_github_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://github.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def getBuildbotURL():", "def get_url(\n self,\n *,\n context: Context,\n ) -> str:\n request = context['request']\n\n # We want to use a relative URL in the diff viewer as we will not be\n # re-rendering the page when switching between revisions.\n from reviewboard.urls import diffviewer_url_names\n match = request.resolver_match\n\n if match.url_name in diffviewer_url_names:\n return 'raw/'\n\n return local_site_reverse(\n 'raw-diff',\n request,\n kwargs={\n 'review_request_id': context['review_request'].display_id,\n })", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def _transform_github_url(self):\n self.url = (\n self.url\n .replace('/blob/', '/')\n .replace(self.GITHUB_NETLOC, self.GITHUB_RAW_NETLOC)\n )", "def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)", "def getProjectURL():", "def _format_to_link(self, commit):\n return os.path.join(self.mount, \"commits-by-hash\", self._hash_updir(commit), commit) + \"/\"", "def get_url(path, repo=None, rev=None, remote=None):\n with _make_repo(repo, rev=rev) as _repo:\n _require_dvc(_repo)\n out = _repo.find_out_by_relpath(path)\n remote_obj = _repo.cloud.get_remote(remote)\n return str(remote_obj.checksum_to_path_info(out.checksum))", "def get_github_student_url(netid):\n url = 'https://raw.githubusercontent.com/CT-CS5356-Fall2017/cs5356/master/README.md'\n r = requests.get(url)\n assert r.ok\n text = r.text\n for l in text.split('\\n'):\n if netid in l:\n return extract_netid_and_url(l)\n return None, None, None", "def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True", "def GetChangeUrl(host, change):\n return 'https://%s/a/%s' % (host, _GetChangePath(change))", "def scm_url(self):\n return self._data.get('scm_url')", "def create_link(repository, project_name):\n beginning_url = \"https://api.github.com/repos/\"\n separator_url = \"/\"\n end_url = \"/commits\"\n\n base_url = beginning_url+repository+separator_url+project_name+end_url\n return base_url", "def get_github_equivalent(self):\n\n if self.description:\n match_obj1 = re.search(r'(?<=Repository Name: )(.*?)(?={color})', self.description)\n match_obj2 = re.search(r'(?<=Issue Number: )(.*?)(?={color})', self.description)\n match_obj3 = re.search(r'(?<=Milestone: )(.*?)(?={color})', self.description)\n match_obj4 = re.search(r'(?<=github.com/)(.*?)(?=/)', self.description)\n if not any([match_obj1, match_obj2, match_obj3, match_obj4]):\n logging.warning(f'No GitHub link information was found in the description of issue {self.jira_key}')\n self.github_repo = match_obj1.group(0) if match_obj1 else None\n self.github_key = match_obj2.group(0) if match_obj2 else None\n self.milestone_name = match_obj3.group(0) if match_obj3 else None\n self.github_org = match_obj4.group(0) if match_obj4 else None", "def svn_url(svninfo=None):\n if svninfo is None:\n svninfo = svn_info()\n return svninfo.find('entry/url').text", "def _get_git_url_if_present(uri):\n if '#' in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n print(\"Notice: failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\" % e, file=sys.stderr)\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, '')\n if (rlpath == ''):\n git_path = repo_url\n elif (rlpath[0] == '/'):\n git_path = repo_url + '#' + rlpath[1:]\n else:\n git_path = repo_url + '#' + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri", "def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())", "def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")", "def target_to_url(target):\n if is_wc(target):\n info = get_svninfo(target)\n return info[\"URL\"]\n return target", "def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]", "def get_remote_url(self, alias):\n url = self.url_base + 'download/current/'\n if 'interactions' in alias:\n url += \"interactors/\" + alias + '.txt'\n else:\n url += alias + '.txt'\n return url", "def get_repository_uri(self) -> str:\n raise NotImplementedError", "def svn_info_t_repos_root_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_repository_uri(self) -> str:\n try:\n url = subprocess.check_output(\n ['git', 'config', '--get', 'remote.origin.url']\n ).decode('utf-8').strip()\n return self.norm_uri(url)\n except subprocess.CalledProcessError as error:\n # no remote origin defined, log and continue\n logger.debug('Unable to get remote origin {}'.format(str(error)))\n return None", "def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename", "def svn_branch():\n return svn_url().split('/')[-1]", "def parse_ref(url_path):\n ref = url_path.lstrip('/')\n if not ref:\n ref = os.environ.get('DEFAULT_GIT_REF', 'HEAD').strip()\n return ref", "def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"", "def pr_link(repo, id):\n\n return '[#{id}](https://github.com/{repo}/pull/{id})'.format(id=id, repo=repo)", "def _getRemoteUrlTheOldWay(self):\n utool = getUtility(IURLTool)\n if self.remote_url:\n return utool() + '/' + self.remote_url\n else:\n return utool()", "def get_changelog_url(repository_url, branch):\n changelog_url = f\"{repository_url}/blob/{branch}/CHANGES.txt\"\n requests_var = requests.get(changelog_url, timeout=30)\n if requests_var.status_code != 200:\n raise RuntimeError(f\"Page at URL {changelog_url} not found\")\n\n return changelog_url", "def git_remote(git_repo):\n github_token = os.getenv(GITHUB_TOKEN_KEY)\n if github_token:\n return 'https://{0}@github.com/{1}'.format(\n github_token, git_repo)\n return '[email protected]:{0}'.format(git_repo)", "def Url(self) -> str:", "def get_submission_remote_url(submission_id=str()):\n\n result = dict(status='info', urls=list(\n ), message=\"Remote identifiers not found or unspecified procedure.\")\n\n # get repository type, and use this to decide what to return\n\n try:\n repository = Submission().get_repository_type(submission_id=submission_id)\n except (IndexError, AttributeError) as error:\n Logger().error(error)\n result['status'] = 'error'\n result['message'] = 'Could not retrieve record'\n return result\n\n # sacrificing an extra call to the db, based on what is needed, than dumping all accessions to memory\n if repository == \"ena\":\n doc = Submission().get_collection_handle().find_one({\"_id\": ObjectId(submission_id)},\n {\"accessions.project\": 1})\n if not doc:\n return result\n\n prj = doc.get('accessions', dict()).get('project', list())\n if prj:\n result[\"urls\"].append(\n \"https://www.ebi.ac.uk/ena/data/view/\" + prj[0].get(\"accession\", str()))\n\n # generate for other repository types here\n\n return result", "def get_hash(repo, ref='HEAD'):\n return subprocess.check_output(['git', 'rev-parse', '--verify', ref],\n cwd=repo).rstrip()", "def test_get_url_on_diff_viewer_revision(self) -> None:\n self.assertEqual(\n self.action.get_url(context=self._create_request_context(\n url_name='view-diff-revision')),\n 'raw/')", "def scm_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scm_url\")", "def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']", "def svn_client_commit_item2_t_url_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_project_page(self, name=None):\n project = self.get_project(name)\n url = project.http_url_to_repo\n if url.endswith('.git'):\n url = url[:-4]\n return url", "def test_url_with_access_token():\n assert url_with_access_token(\n \"access\", \"http://github.com/mitodl/release-script.git\"\n ) == \"https://[email protected]/mitodl/release-script.git\"", "def test_computed_url(self):\n t = self.create_request_object()\n self.assertEqual(\"metadata/libraries/Fixitol(Dev)/versions/1234\", t.url_path())", "def svn_client_commit_item_t_url_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def SvnUrl(self):\n return self._module.url", "def __get_repo_url_by_name(self, name, repos_list):\n for repo in repos_list:\n if repo['name'] == name:\n return repo['commits_url'].split('{')[0]", "def test_get_url_on_diff_viewer(self) -> None:\n self.assertEqual(\n self.action.get_url(context=self._create_request_context()),\n 'raw/')", "def build_gitlab_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://gitlab.com/{namespace}/{name}\".format(namespace=namespace, name=name)\n\n if version:\n url = \"{url}/-/tree/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def button_github(args):\n cell_source = args[\"cell_source\"]\n repo = get_arg_or_fail(args[\"user\"], \"repo\", \"<org/name>\")\n branch = args[\"user\"].get(\"branch\", \"master\")\n docs_dir, rel_path = split_doc_path(args[\"path\"])\n\n # Buttons use OSS URLs.\n if str(docs_dir) == \"g3doc/en\":\n docs_dir = pathlib.Path(\"site/en\")\n\n base_url = f\"github.com/{repo}/blob/{branch}\"\n this_url = \"https://\" + str(base_url / docs_dir / rel_path)\n\n if is_button_cell_re.search(cell_source) and cell_source.find(this_url) != -1:\n return True\n else:\n fail(\n f\"GitHub button URL doesn't match: {this_url}\",\n fix=fix.regex_between_groups_replace_all,\n fix_args=[r\"(href.*)http.*?(\\\\\\\".*GitHub-Mark-32px.png)\", this_url])", "def repo_name(git_url):\n name = git_url.split('/')[-1]\n\n if name.endswith('.git'):\n name = name[:-4]\n\n return name.lower()", "def workspace_url(self):\n return os.environ.get('TEAMRAUM_URL', '').strip('/')", "def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url", "def test_fetch_valid_github_repo(self):\n url = 'https://github.com/ivacf/archi'\n repo = GitHubRepoFetcher().fetch(url)\n self.assertEqual('archi', repo['name'])", "def get_open_source_link(self):\n return self.bot_data_file[\"open_source_link\"]", "def get_repository(post):\n pattern = re.compile(constants.REPOSITORY_REGEX)\n if \"links\" in post.json_metadata.keys():\n for link in post.json_metadata[\"links\"]:\n if link.startswith(\"/exit?url=\"):\n link = link[len(\"/exit?url=\"):]\n\n try:\n result = pattern.search(link).group(0)\n return result\n except AttributeError:\n continue\n else:\n for line in post.body.split():\n try:\n result = pattern.search(line).group(0)\n return result\n except AttributeError:\n continue\n\n return \"\"", "def build_url_long(self, obj):\n if obj.slug:\n url = self.request.build_absolute_uri(reverse('build_repo', args=(obj.slug,)))\n return '<a href=\"%s\" target=\"_blank\">%s<a>' % (url, url)\n else:\n return ''", "def build_bitbucket_homepage_url(purl):\n purl_data = PackageURL.from_string(purl)\n\n namespace = purl_data.namespace\n name = purl_data.name\n version = purl_data.version\n subpath = purl_data.subpath\n\n if not (name and namespace):\n return\n\n url = \"https://bitbucket.org/{namespace}/{name}\".format(\n namespace=namespace, name=name\n )\n if version:\n url = \"{url}/src/{version}\".format(url=url, version=version)\n\n if subpath:\n url = \"{url}/{subpath}\".format(url=url, subpath=subpath)\n\n return url", "def pr_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pr_url\")", "def GetChangePageUrl(host, change_number):\n return 'https://%s/#/c/%d/' % (host, change_number)", "def fetch_remote_hashcode(self, path):\n\t\treturn self.fetch_repo_file(\"/hash/\" + path.replace(\"packages/\", \"\")).decode('utf-8').strip()", "def get_redmine_issue_url():\n win32clipboard.OpenClipboard()\n url = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n # check the URL http:xxx.xxx.xxx.xx:8080/redmine/issues/1234\n path, issue_num = os.path.split(url)\n path, issue = os.path.split(path)\n path, redmine = os.path.split(path)\n # check the URL is redmine issues\n if validators.url(url) and issue_num.isdigit() and issue=='issues' and redmine=='redmine' :\n URL = url;\n print(f'URL for redmine issue (get from Clipboard)')\n print(f'{URL}')\n else:\n print(f'Clipboard contents :')\n if validators.url(url):\n print(f'\\t{url}')\n else:\n print(f'\\tnon-url data')\n print(f'\\n')\n print(f'Usage: Copy (e.g. Ctrl+C) redmine issue URL (e.g. http://192.168.0.1:8080/redmine/issues/1234 ) to clipboard and execute script')\n URL = \"\"\n\n return URL", "def get_git_hash(revname):\n try:\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()\n except:\n revname = \"origin/\" + revname\n return check_output([\"git\", \"rev-parse\", revname],\n cwd=get_repo_dir()).strip()", "def get_commit_hash(reference, directory=None):\n # Track remote branch\n if branch_exists(reference, local_only=False, directory=directory):\n if not branch_exists(reference, local_only=True, directory=directory):\n track_branches(reference, directory)\n cmd = 'git show-branch --sha1-name ' + reference\n out = check_output(cmd, shell=True, cwd=directory)\n return out.split('[')[1].split(']')[0]", "def reference_url(self):\n return self.get(\"reference_url\", decode=True)", "def getURLForThing(thing):", "def retrieve_author_url(name):\n response = requests.get('https://api.github.com/search/users', {'q': name})\n data = json.loads(response.text)\n if data.get('total_count', 0) > 0:\n return data['items'][0]['html_url']\n else:\n print \"--- ERROR: no author URL retrieved for '{0}' ---\".format(\n response.url)\n return name", "def remote_origin_url(self):\n if self._remote_origin_url:\n return self._remote_origin_url\n\n topleveldata = self.git(\"config\", \"--get\", \"remote.origin.url\")\n self._remote_origin_url = topleveldata[0]\n return self._remote_origin_url", "def git_ref_from_eups_version(version: str) -> str:\n return version.split(\"+\")[0]", "def get_url() -> str:\n parser = ArgumentParser()\n\n parser.add_argument('--url',\n type=str,\n help='Url to download log file')\n\n args = parser.parse_args()\n url = args.url\n return url", "def test_github_without_url(self):\n url = reverse_lazy('authenticate:github')\n response = self.client.get(url)\n\n data = response.data\n details = data['details']\n status_code = data['status_code']\n\n self.assertEqual(2, len(data))\n self.assertEqual(details, 'No callback URL specified')\n self.assertEqual(status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def get_git_repo_url(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n repo = Repo(path, search_parent_directories=True)\n return next((remote.url for remote in repo.remotes), None)\n except Exception:\n return None", "def _format_url(s):\n return u'%s%s\\n' % (BASE_URL, s.get_absolute_url())", "def get_sha_from_ref(repo_url, reference):\n # Using subprocess instead of convoluted git libraries.\n # Any rc != 0 will be throwing an exception, so we don't have to care\n out = subprocess.check_output(\n [\"git\", \"ls-remote\", \"--exit-code\", repo_url, reference]\n )\n # out is a b'' type string always finishing up with a newline\n # construct list of (ref,sha)\n refs = [\n (line.split(b\"\\t\")[1], line.split(b\"\\t\")[0])\n for line in out.split(b\"\\n\")\n if line != b\"\" and b\"^{}\" not in line\n ]\n if len(refs) > 1:\n raise ValueError(\n \"More than one ref for reference %s, please be more explicit %s\"\n % (reference, refs)\n )\n return refs[0][1].decode(\"utf-8\")", "def test_giturl_missing(self):\r\n response = self.client.get(self.test_url)\r\n self.assertEqual(200, response.status_code)\r\n self.assertIn(\r\n ('giturl must be defined in your '\r\n 'course settings before you can export to git.'),\r\n response.content\r\n )\r\n\r\n response = self.client.get('{}?action=push'.format(self.test_url))\r\n self.assertEqual(200, response.status_code)\r\n self.assertIn(\r\n ('giturl must be defined in your '\r\n 'course settings before you can export to git.'),\r\n response.content\r\n )", "def url(self):\n return (urljoin(self.lodgeit.address, self.relative_url)\n if self.relative_url else None)", "def test_computed_url(self):\n t = BuildVersionRequest()\n self.assertEqual(\"version/build\", t.url_path())", "def repo_full_name_from_remote(remote_url):\n # Check whether we have a https or ssh url\n if remote_url.startswith(\"https\"):\n path = urllib.parse.urlparse(remote_url)\n path = path.path\n # Remove the intial '/'\n path = path[1:]\n # Remove extension\n path = os.path.splitext(path)[0]\n else:\n # Remove the initial `git@``\n path = remote_url.split(\"@\")\n path = path[-1] if len(path) > 1 else path[0]\n path = urllib.parse.urlparse(path)\n path = path.path\n # Remove extension\n path = os.path.splitext(path)[0]\n return path" ]
[ "0.7844604", "0.739291", "0.73613286", "0.73068553", "0.69562215", "0.6901379", "0.6821557", "0.6768966", "0.671453", "0.6567272", "0.65653217", "0.6540621", "0.6515569", "0.6500115", "0.6459063", "0.64307237", "0.63972664", "0.63898844", "0.63744414", "0.6369827", "0.63008016", "0.6276019", "0.626647", "0.62618524", "0.6246135", "0.6228535", "0.62244457", "0.6220689", "0.61677015", "0.6161917", "0.61511534", "0.6150355", "0.6116467", "0.6106337", "0.60690147", "0.60677487", "0.60413086", "0.60168576", "0.59981924", "0.5980133", "0.5979051", "0.5975187", "0.5958323", "0.5952768", "0.5946524", "0.59336185", "0.5930169", "0.5882556", "0.5868011", "0.5867119", "0.5860174", "0.58520067", "0.5850292", "0.58466417", "0.58424234", "0.5827072", "0.58100027", "0.58031195", "0.5792485", "0.578675", "0.5780658", "0.57721597", "0.5767471", "0.57516843", "0.5743556", "0.5735496", "0.57338506", "0.5727109", "0.5723197", "0.5710507", "0.56902", "0.56883216", "0.5687023", "0.5677785", "0.56758916", "0.5669938", "0.56492585", "0.56477284", "0.5639177", "0.5636661", "0.56138015", "0.56086487", "0.56067246", "0.560525", "0.55997163", "0.55977577", "0.5592162", "0.557015", "0.5567111", "0.5558389", "0.5558287", "0.55559206", "0.5549121", "0.55454147", "0.554478", "0.55345577", "0.55342335", "0.5523065", "0.5522127", "0.55182004" ]
0.701876
4
Parse CHANGELOG for latest tag.
def get_changelog(self, commit_sha): url = 'https://{}/{}/{}/' + commit_sha + '/CHANGELOG' url = url.format(HOST_GITHUB_RAW, self.repo, self.product) req = requests.get(url) lines = req.text first = self.latest_tags[self.num_comparisons - 1][VERS] last = self.latest_tags[self.num_comparisons - 2][VERS] flag = False log = '' for line in lines.splitlines(): if first in line: flag = True if last in line: flag = False if flag: log += line + '\n' return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_release:\n try:\n upload_time = artifact.get(\"upload_time_iso_8601\")\n parsed_upload_time = dateutil.parser.isoparse(upload_time)\n release_artifact_dates.append(parsed_upload_time)\n except Exception:\n pass\n latest_artifact_timestamp = max(release_artifact_dates)\n return latest_artifact_timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return \"\"", "def parse(self):\n i = 0\n while i < len(self.__lines):\n line = self.__lines[i]\n dt = re.match(r\"(\\d{4}-\\d{1,2}-\\d{1,2}\\s\\d{1,2}:\\d{1,2}:\\d{1,2})\", line)\n if not dt:\n i += 1\n continue\n log = {\n \"datetime\": dt.group()\n }\n line = line[dt.end()+1:].rstrip(\"\\n\")[::-1]\n qq_flag = line.find(\"(\")\n log[\"qq\"] = line[qq_flag-1:0:-1]\n log[\"name\"] = line[:qq_flag:-1].strip(\" \")\n i += 1\n log[\"content\"] = self.__lines[i].rstrip(\"\\n\")\n while self.__lines[i+1] != \"\\n\":\n i += 1\n log[\"content\"] += \" \" + self.__lines[i].rstrip(\"\\n\")\n self.__logs.append(log)\n i += 2", "def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest", "def parse_changelog(filename):\n with open(filename, 'r') as changelog:\n for line in changelog.readlines():\n if re.match(r'^ .*<.*@.*> [A-Z][a-z][a-z], [0-9][0-9]', line):\n return re.split(r'^ .*<.*@.*>', line)[1].strip()", "def parse_tag(self, tag):\n \n mytag = \"latest\"\n mydigest = None\n\n regex = \"([\\w\\d\\.\\-]+)@?([\\w\\d\\.\\-]*)$\"\n\n regex_matched = re.match(regex, tag)\n mytag = regex_matched.group(1)\n mydigest = regex_matched.group(2)\n \n if regex_matched is None:\n mytag = \"latest\"\n\n return (mytag, mydigest)", "def parse(self, text):\n \n self.clear()\n lines = text.split(\"\\n\")\n self.logger.info(\"Parsing Git history\")\n \n for line in lines:\n if len(line) == 0:\n # Line is a spacer\n pass\n \n elif line[0] == ' ':\n # Line is part of a commit message\n pass\n \n else:\n # Line is part of a commit header\n spaceIdx = line.find(' ')\n if spaceIdx == -1:\n self.logger.warn(\"Skipping unrecognizable history line: \" + line)\n continue\n \n keyword = line[:spaceIdx]\n content = line[spaceIdx+1:]\n self.logger.debug(\"Found key-value pair: {0} {1}\".format(keyword, content))\n \n self._handleKeyValue(keyword, content)\n \n # Grab the last commit\n self._commits[self._currentCommit.hashKey] = self._currentCommit\n self._currentCommit = None\n \n # Finalize the commit tree\n self._resolveCommits()", "def parseLog(self, log):\n return 0", "def gettime(self, tag):\n cmd = ['git', 'log', '--pretty=format:\"%ct\"', \"-1\", tag]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n if data == b'':\n return [], []\n time_stamp = []\n this_tag = []\n for seconds in data.decode(\"utf-8\").split(\"\\n\"):\n month = round((int(seconds.strip('\"')) - ReleaseTime.base) / ReleaseTime.month_time)\n if month not in time_stamp:\n time_stamp.append(month)\n this_tag.append(tag[0:4])\n else:\n pass\n return time_stamp, this_tag", "def parse_svn_log_xml(xml_string):\r\n l = []\r\n tree = ET.fromstring(xml_string)\r\n for entry in tree.findall('logentry'):\r\n d = {}\r\n d['revision'] = int(entry.get('revision'))\r\n # Some revisions don't have authors, most notably\r\n # the first revision in a repository.\r\n author = entry.find('author')\r\n d['author'] = author is not None and author.text or None\r\n d['date'] = svn_date_to_timestamp(entry.find('date').text)\r\n # Some revisions may have empty commit message\r\n message = entry.find('msg')\r\n message = message is not None and message.text is not None \\\r\n and message.text.strip() or \"\"\r\n # Replace DOS return '\\r\\n' and MacOS return '\\r' with unix return '\\n'\r\n d['message'] = message.replace('\\r\\n', '\\n').replace('\\n\\r', '\\n'). \\\r\n replace('\\r', '\\n')\r\n paths = d['changed_paths'] = []\r\n for path in entry.findall('.//path'):\r\n copyfrom_rev = path.get('copyfrom-rev')\r\n if copyfrom_rev:\r\n copyfrom_rev = int(copyfrom_rev)\r\n paths.append({\r\n 'path': path.text,\r\n 'action': path.get('action'),\r\n 'copyfrom_path': path.get('copyfrom-path'),\r\n 'copyfrom_revision': copyfrom_rev,\r\n })\r\n l.append(d)\r\n return l", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def process_git_tag(regex, inputtag):\n\ttry: \n\t\tgitre = re.compile(regex)\n\t\tmatch = gitre.search(inputtag)\n\t\tgroups = match.groupdict()\n\t\tversion = groups.get('version', '.unknown')\n\t\tdate = groups.get('date', '')\n\t\tgitmeta = groups.get('gitmeta', '')\n\t\tif date:\n\t\t\tversion = '.'.join([version, ''.join(date.split('-'))])\n\texcept (AttributeError, EnvironmentError, OSError):\n\t\tversion, gitmeta = '.unknown', ''\n\n\treturn version, gitmeta", "def _parse_commit_log(base_commit, tip_commit):\n\n class LogState(object):\n SEPARATOR_LINE = 0\n COMMIT_SHA1_LINE = 1\n MERGE_LINE = 2\n AUTHOR_LINE = 3\n COMMITTER_LINE = 4\n MIDDLE_SEPARATOR_LINE = 5\n TITLE_LINE = 6\n BLANK_LINE = 7\n BODY_LINES = 8\n\n commit_info = {}\n check_churn = True\n check_move = True\n\n git_log_cmd = shlex.split(\n 'git log --format=full --reverse {base_commit}..{tip_commit}'.format(\n base_commit=base_commit, tip_commit=tip_commit))\n git_log_output = subprocess.check_output(git_log_cmd)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n git_log_output_lines = git_log_output.splitlines()\n for idx, line in enumerate(git_log_output_lines, 1):\n # commit line\n if (\n log_line_state == LogState.SEPARATOR_LINE and\n line.startswith('commit ')):\n commit_sha1 = line.split(' ')[1]\n log_line_state = LogState.COMMIT_SHA1_LINE\n continue\n\n # Merge: line\n if (\n log_line_state == LogState.COMMIT_SHA1_LINE and\n line.startswith('Merge: ')):\n merge = line.split(' ', 1)[1]\n log_line_state = LogState.MERGE_LINE\n continue\n\n # Author: line\n if (\n log_line_state in [\n LogState.COMMIT_SHA1_LINE, LogState.MERGE_LINE] and\n line.startswith('Author: ')):\n author = line.split(' ', 1)[1]\n log_line_state = LogState.AUTHOR_LINE\n continue\n\n # Commit: line\n if log_line_state == LogState.AUTHOR_LINE and line.startswith('Commit: '):\n committer = line.split(' ', 1)[1]\n log_line_state = LogState.COMMITTER_LINE\n continue\n\n # empty line after Commit: line\n if log_line_state == LogState.COMMITTER_LINE and line == '':\n log_line_state = LogState.MIDDLE_SEPARATOR_LINE\n continue\n\n # Title line of commit message\n if (\n log_line_state == LogState.MIDDLE_SEPARATOR_LINE and\n line.startswith(' ')):\n title = line.lstrip(' ')\n log_line_state = LogState.TITLE_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Blank line between title and body (still contains 4 space prefix)\n if log_line_state == LogState.TITLE_LINE and line.startswith(' '):\n separator = line.lstrip(' ')\n log_line_state = LogState.BLANK_LINE\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # Body lines\n if (\n log_line_state in [LogState.BLANK_LINE, LogState.BODY_LINES] and\n line.startswith(' ')):\n body.append(line.lstrip(' '))\n log_line_state = LogState.BODY_LINES\n\n if idx < len(git_log_output_lines):\n continue\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n break\n\n # End of commit message\n if (\n log_line_state in [\n LogState.TITLE_LINE, LogState.BLANK_LINE,\n LogState.BODY_LINES] and\n line == ''):\n\n commit_status = _validate_commit(\n commit_sha1, merge, author, committer, title, separator, body)\n\n if commit_sha1 not in commit_info.keys():\n commit_info[commit_sha1] = commit_status\n else:\n commit_info[commit_sha1].extend(commit_status)\n\n if check_churn:\n commit_churn_info, branch_churn_sha1s = _check_diff_add_delete(\n commit_sha1, tip_commit)\n\n for commit_churn_sha1 in commit_churn_info.keys():\n if commit_churn_sha1 not in commit_info.keys():\n commit_info[commit_churn_sha1] = commit_churn_info[\n commit_churn_sha1]\n else:\n commit_info[commit_churn_sha1].extend(\n commit_churn_info[commit_churn_sha1])\n\n check_churn = bool(branch_churn_sha1s)\n\n if check_move:\n commit_move_info, branch_move_sha1s = _check_diff_move(\n commit_sha1, tip_commit)\n\n for commit_move_sha1 in commit_move_info.keys():\n if commit_move_sha1 not in commit_info.keys():\n commit_info[commit_move_sha1] = commit_move_info[\n commit_move_sha1]\n else:\n commit_info[commit_move_sha1].extend(\n commit_move_info[commit_move_sha1])\n\n check_move = bool(branch_move_sha1s)\n\n log_line_state = LogState.SEPARATOR_LINE\n commit_sha1 = None\n merge = None\n author = None\n committer = None\n title = None\n separator = None\n body = []\n\n return commit_info", "def parseMergeChangeLogNodes( stdout ):\n result = []\n for l in stdout.split('\\n'):\n m = CHANGESET.match( l )\n if m:\n result.append( m.group(1) )\n return result", "def parseLog(self, log_lines):\n abstract", "def parse(file):\n logger.info('parsing DL7 dive log data')\n log = Log()\n content = file.readline()\n while not content == '':\n __parse_line(log, content)\n content = file.readline()\n return log", "def get_latest_rev(changesfile):\n if os.path.exists(changesfile):\n with open(changesfile) as chlog:\n line = chlog.readline()\n return line.strip().split(\" \")[-1].split(\"@\")[-1]\n return ''", "def get_last_tag_by_version(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n tags = []\n versions = []\n for line in output.splitlines():\n tags.append(line.strip())\n ver = re.match(r\"[0-9]+\\.[0-9]+\\.[0-9]+\", line)\n if ver:\n versions.append(ver)\n return tags[versions.index(max(versions))] if versions else ''", "def get_last_tag_by_date(directory=None):\n cmd = \"git for-each-ref --sort='*authordate' \" \\\n \"--format='%(refname:short)' refs/tags/upstream\"\n output = check_output(cmd, shell=True, cwd=directory, stderr=PIPE)\n output = output.splitlines()\n if len(output) == 0:\n return ''\n return output[-1]", "def call_change_log(input_filter):\n try:\n if input_filter is None:\n latest = _find_latest()\n service_endpoint = _find_filter(\"change_log\")\n else:\n keyword = input_filter.split(\" \")[0]\n if \"release\" == keyword or \"build\" == keyword:\n service_endpoint = _find_filter(input_filter.split(\";\")[2])\n else:\n service_endpoint = _find_filter(keyword)\n\n rel_build = input_filter.replace(\"_\", \".\").split(\" \")[1].split(\";\")\n\n if \"build\" == keyword:\n latest_rel = rel_build[1]\n latest_bui = rel_build[0]\n else:\n latest_rel = rel_build[0]\n latest_bui = rel_build[1]\n\n latest = {\"latest_val\": latest_rel + \"_\" + latest_bui,\n \"second_latest_val\": latest_rel + \"_\" + str(int(latest_bui)-1)}\n\n latest_query = latest[\"second_latest_val\"] + \"..\" + latest[\"latest_val\"]\n data = _call_rest_api(service_endpoint + \"/\" + latest_query, None)\n except Exception as e:\n logger.error(str(e))\n data = {\"success\": \"\", \"data\": {}, \"error\": {\"Message\": str(e)}}\n data = jsonify(data)\n return data", "def _load_changelog(self):\n\n changelog_json_file = self._project.get_changelog_path()\n if not os.path.isfile(changelog_json_file):\n logger.warning('Changelog File \"{}\" does not exists!'.format(changelog_json_file))\n return\n\n logger.warning('Loading Changelog from: \"{}\"'.format(changelog_json_file))\n\n with open(changelog_json_file, 'r') as f:\n if changelog_json_file.endswith('.json'):\n changelog_data = json.load(f, object_pairs_hook=OrderedDict)\n else:\n changelog_data = yaml.load(f, Loader=yamlordereddictloader.Loader)\n if not changelog_data:\n return\n\n changelog_versions = [key for key in changelog_data.keys()]\n ordered_versions = self._order_changelog_versions(changelog_versions)\n\n for version in reversed(ordered_versions):\n self._create_version(str(version), changelog_data[str(version)])\n\n last_version_item = self.version_accordion.item_at(0)\n last_version_item.set_collapsed(False)", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_newest_changefile_info(changefile_type):\n url = get_url(changefile_type) + \"/state.txt\"\n changefile_timestamp = None\n file_sequence_number = 0\n for result in urllib.urlopen(url):\n # get sequence number\n sequence_number_p = result.find(\"sequenceNumber=\")\n if sequence_number_p != -1:\n file_sequence_number = int(result[sequence_number_p + 15:])\n # get timestamp\n timestamp_p = result.find(\"timestamp=\")\n if timestamp_p != -1:\n # found timestamp line\n timestamp_p += 10 # jump over text\n result = result[timestamp_p:].replace(\"\\\\\", \"\").strip()\n changefile_timestamp = strtodatetime(result)\n\n if not changefile_timestamp:\n logging.info(\"(no timestamp)\")\n else:\n logging.info(\"newest %s timestamp: %s\" % \\\n (changefile_type, changefile_timestamp.isoformat()))\n return (changefile_timestamp, file_sequence_number)", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def aggregate_git_log(path, progress_callback=lambda progress: None):\n versions = list()\n\n current_version, current_commits = None, list()\n\n log_data = git_log_hash(path)\n log_length = len(log_data)\n progress_step = max(1, log_length / 100)\n \n for idx, (rev_hash, date, msg) in enumerate(log_data):\n if idx % progress_step == 0:\n progress_callback(float(idx) / log_length)\n \n current_commits.append(msg)\n if git_checkout(path=path, revision_hash=rev_hash):\n version = get_package_metadata(path=path, field_name='Version')\n if version != current_version:\n # memorize it\n versions.insert(0,\n dict(version=version,\n date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n current_version, current_commits = version, list()\n\n if current_commits:\n versions.insert(0,\n dict(version='newest',\n date=None,\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n return versions", "def get_latest_build(tag, package):\n proc = Popen([\"osg-koji\", \"-q\", \"list-tagged\", \"--latest\", tag, package],\n stdout=PIPE)\n out = proc.communicate()[0] or b''\n ret = proc.returncode\n\n latest_build_line = out.decode(\"latin-1\").strip()\n\n if ret != 0 or not latest_build_line:\n return\n\n return latest_build_line.split()[0]", "def get_changelog(no):\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]", "def parse_log_file(self):\n # Open log file\n log_file_data = utils.open_file(self.log_file)\n for line in log_file_data:\n algo = line.strip(\"\\n\").split(\":\")[1]\n if len(algo) > 3:\n hash_algo = algo.split(\"$\")[1]\n if hash_algo not in self.used_algo:\n self.used_algo.append(hash_algo)", "def flatten_log(self, log):\n # TODO take out flat_log by reference\n flat_log = []\n try:\n self.parser.parse_snapshot(log['chunkedSnapshot'], flat_log)\n self.parser.parse_log(log['changelog'], flat_log)\n except KeyError:\n self.logger.exception('Missing chunkedSnapshot or changelog keys in log')\n raise\n\n return flat_log", "def _get_top_changes(cls, upload_changes):\n result = \"\"\n header_found = False\n for line in upload_changes.get(\"Changes\", \"\").splitlines(True):\n if re.match(r\"^ [a-z0-9]+\", line):\n if header_found:\n break\n header_found = True\n result += line\n return result", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def get_latest_items(parser, token):\n bits = token.split_contents()\n\n if len(bits) != 4:\n raise TemplateSyntaxError, \"get_latest_item tag takes exactly three arguments\"\n if bits[2] != 'as':\n raise TemplateSyntaxError, \"second argument to get_latest_item tag must be 'as'\"\n return LatestItemNode(bits[1], bits[3])", "def changelog(self, branch, since=None):\n walker = Walker(self.repo, [self.latest_branch_revision(branch)])\n for entry in walker:\n if since is not None and entry.commit.id == since:\n break\n commit = entry.commit\n files = Command(\n 'git show --pretty=\"format:\" --name-only %s' % commit.id,\n cwd=self.path,\n ).out.split()\n yield Commit(\n commit.id,\n commit.committer,\n datetime.fromtimestamp(commit.commit_time),\n commit.message,\n files,\n )", "def test_get_parse_not_empty(self):\n \n self.assertEqual(bitchangesets.parse_changeset(self.changeset), {'timestamp': '2013-07-27 01:56:46', 'parsed_author': 'David Leonard'})", "def _get_recent_feed(cls, target):\n response = feedparser.parse(\n target.link, modified=target.last_modified, etag=target.etag\n )\n\n # Some of the feeds offer one of these two tags and others none of them.\n modified = cls._time_to_date(response.get(\"modified_parsed\"))\n etag = response.get(\"etag\")\n\n # In case RSS feed doesn't support modified tag, we compute it artificially.\n if not modified:\n response.entries, modified = cls._entries_after_date(\n response.entries, target.last_modified\n )\n\n return response, modified, etag", "def test_parse_diff_revision(self):\n self.assertEqual(\n self.tool.parse_diff_revision(filename=b'doc/readme',\n revision=b'bf544ea'),\n (b'doc/readme', b'bf544ea'))\n self.assertEqual(\n self.tool.parse_diff_revision(filename=b'/dev/null',\n revision=b'bf544ea'),\n (b'/dev/null', PRE_CREATION))\n self.assertEqual(\n self.tool.parse_diff_revision(filename=b'/dev/null',\n revision=b'0000000'),\n (b'/dev/null', PRE_CREATION))", "def process_entry(self,\n log_entry: str):\n elem = ET.fromstring(log_entry)\n rev = elem.attrib['revision']\n values = {}\n for sub in ['author', 'date', 'msg']:\n try:\n values[sub] = elem.find(f'./{sub}').text\n except (AttributeError, SyntaxError):\n log.warning('failed to retrieve %s in %s', sub, log_entry)\n values[sub] = None\n if values['msg']:\n values['msg'] = values['msg'].replace('\\n', ' ')\n rel_url_slash = self.relative_url + '/'\n for path_elem in elem.findall('*/path'):\n other = {}\n for sub in ['text-mods', 'kind', 'action', 'prop-mods',\n 'copyfrom-rev', 'copyfrom-path']:\n try:\n other[sub] = path_elem.attrib[sub]\n except (AttributeError, SyntaxError, KeyError):\n other[sub] = np.nan\n try:\n path = path_elem.text.replace(rel_url_slash, '')\n except (AttributeError, SyntaxError, ValueError) as err:\n log.warning(f'{err} processing rev {rev}')\n path = None\n entry = scm.LogEntry(rev, values['author'], to_date(values['date']),\n path=path, message=values['msg'],\n textmods=to_bool(other['text-mods']),\n kind=other['kind'], action=other['action'],\n propmods=to_bool(other['prop-mods']),\n copyfromrev=other['copyfrom-rev'],\n copyfrompath=other['copyfrom-path'],\n added=np.nan, removed=np.nan)\n yield entry", "def parse_event_attlog(self):\n uid = ''\n ver_type = -1\n date_str = ''\n if self.last_event_code == DEFS.EF_ATTLOG:\n uid = self.last_payload_data[0:9].decode('ascii').\\\n replace('\\x00', '')\n ver_type = struct.unpack('<H', self.last_payload_data[24:26])[0]\n date_str = \"20%i/%i/%i %i:%i:%i\" %\\\n tuple(self.last_payload_data[26:32])\n\n return [uid, ver_type, date_str]", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def parse_log_file(filename, job_name):\n\n time_re = \"(\\d{4}/\\d{2}/\\d{2} \\d{2}:\\d{2}:\\d{2})\"\n time_pat = re.compile(time_re)\n pat = re.compile(time_re + \".*RUM\\.Workflow.*(START|FINISH)\\s+(.*)\")\n\n time_fmt = \"%Y/%m/%d %H:%M:%S\"\n\n first_time = None\n \n with open(filename) as f:\n for line in f:\n if first_time is None:\n m = time_pat.match(line)\n if m is None:\n raise Exception(\"Couldn't parse time from \" + line)\n tm = m.group(1)\n print \"TM is \" + str(tm)\n first_time = time.strptime(tm, time_fmt)\n print \"First time is \" + str(first_time)\n\n yield Event(first_time, 'START', 'log', job_name, filename)\n m = pat.match(line)\n if (m is not None):\n (tm, type, step) = m.groups()\n t = time.strptime(tm, time_fmt)\n e = Event(t, type, step, job_name, filename)\n yield e", "def handle_endtag(self, tag):\n if verbose(): print(\"TIParser.handle_endtag(self, %s)\" % (tag))\n if tag == 'head':\n self.head = 'closed'\n if tag == 'body':\n self.body = 'closed'\n (line, offset) = self.getpos()\n etag = self.text[line-1][offset:]\n if tag not in self.nostack:\n pop = self.stack.pop()\n if tag != pop:\n self.errmsg(\"</%s> does not match <%s>\" % (tag, pop))", "def do_latest_tag(args, image_name_tag, image_name):\n if args.latest is True:\n if tag(image_name_tag, image_name+':latest'):\n push(args, image_name+':latest')", "def parse_log():\n error_dict = {}\n user_dict = {}\n pattern = r': (INFO|ERROR) (.*) \\((.*)\\)'\n\n with open('syslog.log', 'r') as log_file:\n for line in log_file.readlines():\n capture_groups = re.findall(pattern, line)\n main = capture_groups[0][0]\n detail = capture_groups[0][1]\n user = capture_groups[0][2]\n if user not in user_dict:\n user_dict[user] = {}\n if main == 'ERROR':\n error_dict[detail] = error_dict.get(detail, 0) + 1\n user_dict[user]['ERROR'] = user_dict[user].get('ERROR', 0) + 1\n else:\n user_dict[user]['INFO'] = user_dict[user].get('INFO', 0) + 1\n\n # cover use cases where users never have 'ERRORS' or 'INFO' events in their usage history.\n for user in user_dict:\n if 'INFO' not in user_dict[user]:\n user_dict[user]['INFO'] = user_dict[user].get('INFO', 0)\n if 'ERROR' not in user_dict[user]:\n user_dict[user]['ERROR'] = user_dict[user].get('ERROR', 0)\n\n sorted_errors = sorted(error_dict.items(), key=operator.itemgetter(1), reverse=True)\n sorted_users = sorted(user_dict.items())\n logging.debug(sorted_errors)\n logging.debug(sorted_users)\n\n return sorted_errors, sorted_users", "def parse_log_file(filename='log_file.txt'):\n # Example to read the last line of a file and split it in half with separator sep\n sep = '='\n\n with open(filename, 'r') as f:\n last_line = f.readlines()[-1]\n s1, s2 = last_line.rsplit(sep)\n return s1, s2", "def _parse_logs_path(path):\n tokens = [token for token in path.split('/') if len(token) > 0]\n artifact = {}\n job = {}\n\n if tokens[0].startswith('periodic'):\n if len(tokens) >= 3:\n artifact['pipeline'] = tokens[0]\n job['name'] = tokens[1]\n else:\n if len(tokens) >= 2:\n artifact['change_id'] = int(tokens[1])\n\n if len(tokens) >= 3:\n artifact['revision'] = int(tokens[2])\n\n if len(tokens) >= 4:\n artifact['pipeline'] = tokens[3]\n\n if len(tokens) >= 5:\n job['name'] = tokens[4]\n\n return artifact, job", "def parseGithubFeed(data):\n\tgitResult = []\n\tif data:\n\t\tfor entries in data:\n\t\t\ttext = entries['commit']['message']\n\t\t\tauthor = entries['commit']['author']['name']\n\t\t\ttime = entries['commit']['author']['date']\n\t\t\ttime = dateutil.parser.parse(time).isoformat(' ').split('+')[0] \n\t\t\ttime = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\titem = copy.deepcopy(templateResult)\n\t\t\titem['message'] = text \n\t\t\titem['author'] = author\n\t\t\titem['datetime'] = time\n\t\t\titem['source'] = 'Github'\n\t\t\tgitResult.append(item)\n\treturn gitResult", "def parse_log_entry(self, logstring):\n\n splitLogInfo = logstring.partition(self.LOGFILE_PREFIX)\n if len(splitLogInfo[1]) == 0:\n raise errorhandler.LogDatabaseError(\"separator {} not found in log entry\".format(self.LOGFILE_PREFIX))\n str2 = splitLogInfo[2]\n\n entrytype = None\n for k, v in self.validpostfixes.items():\n if splitLogInfo[2][0:len(k)] == k:\n entrytype = v\n break\n if entrytype is None:\n raise errorhandler.LogDatabaseError(\"Invalid log type: {}\".format(splitLogInfo[2][0:10]))\n\n try:\n timestringtrimmed = logstring.partition(\".\")[0]\n timestamp = datetime.datetime(*time.strptime(timestringtrimmed, \"%Y-%m-%dT%H:%M:%S\")[:6])\n except ValueError:\n raise errorhandler.LogDatabaseError(\"Value error parsing timestamp out of log entry\")\n\n mactokens = {\n \"MAC source\": \"MAC source = \",\n \"MAC dest\": \"MAC dest = \",\n }\n indices = []\n lastidx = 0\n for k, v in mactokens.items():\n nextidx = str2.find(v, lastidx)\n if nextidx < 0:\n raise errorhandler.LogDatabaseError(\"{} not found in log entry\".format(k))\n indices.append(nextidx + len(v))\n lastidx = nextidx\n srcMAC = str2[indices[0] : indices[0] + 17]\n dstMAC = str2[indices[1] : indices[1] + 17]\n\n iptokens = {\n \"IP source\": \"IP SRC=\",\n \"IP dest\": \"IP DST=\",\n \"IP source port\": \"SPT=\",\n \"IP dest port\": \"DPT=\"\n }\n if entrytype == LogEntryType.UNKNOWN_IP or entrytype == LogEntryType.IP_TRAFFIC_IN \\\n or entrytype == LogEntryType.IP_TRAFFIC_OUT or entrytype == LogEntryType.DROP:\n for k, v in iptokens.items():\n nextidx = str2.find(v, lastidx)\n if nextidx < 0:\n raise errorhandler.LogDatabaseError(\"{} not found in log entry\".format(k))\n indices.append(nextidx + len(v))\n lastidx = nextidx\n\n srcIP = extract_ip(str2, indices[2])\n dstIP = extract_ip(str2, indices[3])\n srcPort = str2[indices[4]:].partition(\" \")[0]\n dstPort = str2[indices[5]:]\n else:\n srcIP = \"\"\n dstIP = \"\"\n srcPort = \"\"\n dstPort = \"\"\n\n logdataentry = LogDataEntry(entry_type=entrytype, timestamp=timestamp, srcMAC=srcMAC, dstMAC=dstMAC, srcIP=srcIP, dstIP=dstIP,\n srcPort=srcPort, dstPort=dstPort)\n return logdataentry", "def parse(self, filename):\n def invalid_line(line, reason):\n stats.count_lines_invalid.increment()\n if config.options.debug >= 2:\n logging.debug('Invalid line detected (%s): %s' % (reason, line))\n\n if filename == '-':\n filename = '(stdin)'\n file = sys.stdin\n else:\n if not os.path.exists(filename):\n print >> sys.stderr, \"\\n=====> Warning: File %s does not exist <=====\" % filename\n return\n else:\n if filename.endswith('.bz2'):\n open_func = bz2.BZ2File\n elif filename.endswith('.gz'):\n open_func = gzip.open\n else:\n open_func = open\n file = open_func(filename, 'r')\n\n if config.options.show_progress:\n print 'Parsing log %s...' % filename\n\n if config.format:\n # The format was explicitely specified.\n format = config.format\n\n if isinstance(format, W3cExtendedFormat):\n format.create_regex(file)\n\n if format.regex is None:\n return fatal_error(\n \"File is not in the correct format, is there a '#Fields:' line? \"\n \"If not, use the --w3c-fields option.\"\n )\n else:\n # If the file is empty, don't bother.\n data = file.read(100)\n if len(data.strip()) == 0:\n return\n try:\n file.seek(0)\n except IOError:\n pass\n\n format = self.detect_format(file)\n if format is None:\n return fatal_error(\n 'Cannot guess the logs format. Please give one using '\n 'either the --log-format-name or --log-format-regex option'\n )\n # Make sure the format is compatible with the resolver.\n\n if config.options.dump_log_regex:\n logging.info(\"Using format '%s'.\" % format.name)\n if format.regex:\n logging.info(\"Regex being used: %s\" % format.regex.pattern)\n else:\n logging.info(\"Format %s does not use a regex to parse log lines.\" % format.name)\n logging.info(\"--dump-log-regex option used, aborting log import.\")\n os._exit(0)\n\n hits = []\n for lineno, line in enumerate(file):\n try:\n line = line.decode(config.options.encoding)\n except UnicodeDecodeError:\n invalid_line(line, 'invalid encoding')\n continue\n\n stats.count_lines_parsed.increment()\n if stats.count_lines_parsed.value <= config.options.skip:\n continue\n\n match = format.match(line)\n if not match:\n invalid_line(line, 'line did not match')\n continue\n\n hit = Hit(\n filename=filename,\n lineno=lineno,\n status=format.get('status'),\n full_path=format.get('path'),\n is_download=False,\n is_robot=False,\n is_error=False,\n is_redirect=False,\n args={},\n )\n\n if config.options.regex_group_to_page_cvars_map:\n self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_page_cvars_map, True)\n\n if config.options.regex_group_to_visit_cvars_map:\n self._add_custom_vars_from_regex_groups(hit, format, config.options.regex_group_to_visit_cvars_map, False)\n\n if config.options.regex_groups_to_ignore:\n format.remove_ignored_groups(config.options.regex_groups_to_ignore)\n\n try:\n hit.query_string = format.get('query_string')\n hit.path = hit.full_path\n except BaseFormatException:\n hit.path, _, hit.query_string = hit.full_path.partition(config.options.query_string_delimiter)\n\n # W3cExtendedFormat detaults to - when there is no query string, but we want empty string\n if hit.query_string == '-':\n hit.query_string = ''\n\n hit.extension = hit.path.rsplit('.')[-1].lower()\n\n try:\n hit.referrer = format.get('referrer')\n\n if hit.referrer.startswith('\"'):\n hit.referrer = hit.referrer[1:-1]\n except BaseFormatException:\n hit.referrer = ''\n if hit.referrer == '-':\n hit.referrer = ''\n\n try:\n hit.user_agent = format.get('user_agent')\n\n # in case a format parser included enclosing quotes, remove them so they are not\n # sent to Piwik\n if hit.user_agent.startswith('\"'):\n hit.user_agent = hit.user_agent[1:-1]\n except BaseFormatException:\n hit.user_agent = ''\n\n hit.ip = format.get('ip')\n try:\n hit.length = int(format.get('length'))\n except (ValueError, BaseFormatException):\n # Some lines or formats don't have a length (e.g. 304 redirects, W3C logs)\n hit.length = 0\n\n try:\n hit.generation_time_milli = float(format.get('generation_time_milli'))\n except BaseFormatException:\n try:\n hit.generation_time_milli = float(format.get('generation_time_micro')) / 1000\n except BaseFormatException:\n try:\n hit.generation_time_milli = float(format.get('generation_time_secs')) * 1000\n except BaseFormatException:\n hit.generation_time_milli = 0\n\n if config.options.log_hostname:\n hit.host = config.options.log_hostname\n else:\n try:\n hit.host = format.get('host').lower().strip('.')\n\n if hit.host.startswith('\"'):\n hit.host = hit.host[1:-1]\n except BaseFormatException:\n # Some formats have no host.\n pass\n\n # Add userid\n try:\n hit.userid = None\n\n userid = format.get('userid')\n if userid != '-':\n hit.args['uid'] = hit.userid = userid\n except:\n pass\n\n # add event info\n try:\n hit.event_category = hit.event_action = hit.event_name = None\n\n hit.event_category = format.get('event_category')\n hit.event_action = format.get('event_action')\n\n hit.event_name = format.get('event_name')\n if hit.event_name == '-':\n hit.event_name = None\n except:\n pass\n\n # add session time\n try:\n hit.session_time = None\n\n session_time = format.get('session_time')\n hit.session_time = int(session_time)\n except:\n pass\n\n # Check if the hit must be excluded.\n if not all((method(hit) for method in self.check_methods)):\n continue\n\n # Parse date.\n # We parse it after calling check_methods as it's quite CPU hungry, and\n # we want to avoid that cost for excluded hits.\n date_string = format.get('date')\n try:\n hit.date = datetime.datetime.strptime(date_string, format.date_format)\n except ValueError:\n invalid_line(line, 'invalid date')\n continue\n\n # Parse timezone and substract its value from the date\n try:\n timezone = float(format.get('timezone'))\n except BaseFormatException:\n timezone = 0\n except ValueError:\n invalid_line(line, 'invalid timezone')\n continue\n\n if timezone:\n hit.date -= datetime.timedelta(hours=timezone/100)\n\n if config.options.replay_tracking:\n # we need a query string and we only consider requests with piwik.php\n if not hit.query_string or not hit.path.lower().endswith(config.options.replay_tracking_expected_tracker_file):\n invalid_line(line, 'no query string, or ' + hit.path.lower() + ' does not end with piwik.php')\n continue\n\n query_arguments = urlparse.parse_qs(hit.query_string)\n if not \"idsite\" in query_arguments:\n invalid_line(line, 'missing idsite')\n continue\n\n try:\n hit.args.update((k, v.pop().encode('raw_unicode_escape').decode(config.options.encoding)) for k, v in query_arguments.iteritems())\n except UnicodeDecodeError:\n invalid_line(line, 'invalid encoding')\n continue\n\n hits.append(hit)\n if len(hits) >= config.options.recorder_max_payload_size * len(Recorder.recorders):\n Recorder.add_hits(hits)\n hits = []\n if len(hits) > 0:\n Recorder.add_hits(hits)", "def _sort_latest_tag(self, versions: List[dict], tag_key: str) -> Dict:\n return next(\n iter(\n sorted(\n versions,\n reverse=True,\n key=lambda s: list(\n map(\n int,\n filter(None, re.sub(r\"[^0-9.]+\", \"\", s.get(tag_key), re.I).split(\".\")),\n )\n )\n if \".\" in s.get(tag_key)\n else [-1],\n )\n )\n )", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "def log_decode(log_data):\n data = json.loads(log_data.data)\n timestamp = log_data.timestamp\n return (data, timestamp)", "def make_semver(repo_root, build_number):\n branch_name, sha, tags = parse_describe(repo_root)\n if tags:\n # There are git tags to consider. Parse them all then choose the one that is latest (sorted by semver rules)\n return sorted([make_version_number(branch_name, build_number, tag, sha) for tag in tags])[-1]\n else:\n return make_version_number(branch_name, build_number, None, sha)", "def parse_mjlog(root_node, tags=None):\n parsed = []\n for node in root_node:\n if tags is None or node.tag in tags:\n parsed.append(parse_node(node.tag, node.attrib))\n if tags is None:\n return _structure_parsed_result(parsed)\n return parsed", "def parse_date(self) -> str:\r\n for line in self.lines:\r\n line = ''.join(line)\r\n if 'updated' in line:\r\n index = line.find('Last updated')\r\n if index != -1:\r\n substring = line[index + 10: index + 50].split('.')[0][-13:]\r\n print(substring)\r\n return pd.to_datetime(substring)\r\n if 'Scottish test n' in line:\r\n index_date = line.find('h test n')\r\n print(index_date)\r\n if index_date != -1:\r\n return pd.to_datetime(line[index_date+15:index_date+29])", "def parse_commit_message(message):\n # ['closes', 'close', 'fix', ...]\n keywords = []\n [keywords.extend(val) for val in KEYWORDS.values()]\n # we need to sort to match longuest command possible\n keywords.sort(lambda x, y: cmp(len(y), len(x)))\n # 'closes|close|fix...'\n keywords_re = '|'.join(keywords)\n\n # [('refs', 'affinitic', '#1'), ('refs', 'affinitic', '#2')]\n refs = re.findall('(%s)[ ]*([a-z]+)[ ]*([# \\d]*)' % keywords_re,\n message,\n re.IGNORECASE)\n\n parseds = []\n for ref in refs:\n if len(ref) != 3:\n # XXX envoi de mail si 1 < ref < 3 ?\n continue\n\n command = _word_to_command(ref[0])\n trac = ref[1].lower()\n tickets = ref[2]\n\n tickets_split = re.findall('\\d+', tickets)\n for ticket in tickets_split:\n parsed = {}\n parsed[\"command\"] = command\n parsed[\"ticket\"] = ticket\n parsed[\"trac\"] = trac\n parseds.append(parsed)\n\n return parseds", "def iter_tag_pairs(tags,\n format='rst',\n heading_char='^',\n heading_level=2,\n include_cmd=True,\n encoding='utf-8'\n ):\n tagdates = collections.OrderedDict()\n tagpairsiter = _izip(tags,\n itertools.islice(tags, 1, None))\n tagpairs = list(tagpairsiter)\n logging.debug(('tagpairs', tagpairs))\n\n _format = format.lower()\n if _format not in ['rst', 'md']:\n raise ValueError(('format unsupported', _format))\n\n def iter_release_data(tagpairs, git_cmd):\n for (tag1, tag2) in tagpairs[::-1]:\n data = {}\n tag1 = tag1.decode(encoding) if hasattr(tag1, 'decode') else tag1\n #tag1date = tagdates.setdefault(tag1, git_get_rev_date(tag1))\n tag2date = tagdates.setdefault(tag2, git_get_rev_date(tag2))\n data['tag2date'] = tag2date\n heading = rst_escape(\"%s (%s)\" % (tag2, tag2date.decode(encoding))) # TODO: date\n data['heading'] = heading\n logpath = \"%s..%s\" % (tag1, tag2)\n data['logpath'] = logpath\n changelog_cmd = ['log', '--reverse', '--pretty=format:* %s [%h]', logpath]\n data['changelog_cmd'] = changelog_cmd\n changelog_cmdstr = \"log --reverse --pretty=format:'* %s [%h]' \" + logpath\n data['changelog_cmdstr'] = changelog_cmdstr\n cmd = git_cmd + changelog_cmd\n data['cmd'] = cmd\n logging.debug(cmd)\n logging.debug(('cmdstr*', ' '.join(cmd)))\n output = subprocess.check_output(cmd)\n data['_output'] = output\n data['output_rst'] = rst_escape(output)\n yield data\n\n #\n tag1 = tag2\n\n def template_as_rst(tagpairs,\n git_cmd,\n heading_char=heading_char,\n include_cmd=True):\n for data in iter_release_data(tagpairs, git_cmd):\n # RST heading\n yield ''\n yield ''\n yield data['heading']\n yield heading_char * len(data['heading'])\n if include_cmd:\n yield \"::\"\n yield \"\"\n yield \" git %s\" % (data['changelog_cmdstr'])\n yield \"\"\n yield data['output_rst']\n\n def template_as_md(tagpairs,\n git_cmd,\n heading_char='#',\n heading_level=2,\n include_cmd=True):\n for data in iter_release_data(tagpairs, git_cmd):\n # RST heading\n yield ''\n yield ''\n if heading_level:\n yield \"%s %s\" % ((heading_level * heading_char), data['heading'])\n if include_cmd:\n yield \"```bash\"\n yield \"$ git %s\" % (data['changelog_cmdstr'])\n yield \"```\"\n yield \"\"\n yield data['output_rst']\n\n if _format == 'rst':\n return template_as_rst(\n tagpairs,\n git_cmd,\n heading_char=heading_char,\n include_cmd=include_cmd)\n elif _format == 'md':\n return template_as_md(\n tagpairs,\n git_cmd,\n heading_char=heading_char,\n heading_level=heading_level,\n include_cmd=include_cmd)", "def _get_pkg_changelog_contents(ctx: Context, version: str):\n changes = _get_changelog_contents(ctx, version)\n changes = \"\\n\".join(changes.split(\"\\n\")[2:])\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Removed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Deprecated\n ----------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Changed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Fixed\n -----\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Added\n -----\n\n \"\"\"\n ),\n \"\",\n )\n return changes", "def _parse_diff(commit_sha1):\n class DiffState(object):\n START = 0\n DIFF_BLOCK_LINE = 1\n INDEX_LINE = 2\n A_LINE = 3\n B_LINE = 4\n AT_LINE = 5\n DIFF_LINES = 6\n\n diff_cmd = shlex.split('git show {commit_sha1}'.format(\n commit_sha1=commit_sha1))\n diff_output = subprocess.check_output(diff_cmd)\n\n diff_lines = set()\n state = DiffState.START\n for line in diff_output.splitlines():\n if state in [DiffState.START, DiffState.DIFF_LINES] and line.startswith('diff '):\n state = DiffState.DIFF_BLOCK_LINE\n continue\n\n if state == DiffState.DIFF_BLOCK_LINE and line.startswith('index '):\n state = DiffState.INDEX_LINE\n continue\n\n if state == DiffState.INDEX_LINE and line.startswith('--- '):\n state = DiffState.A_LINE\n continue\n\n if state == DiffState.A_LINE and line.startswith('+++ '):\n state = DiffState.B_LINE\n continue\n\n if state in [DiffState.B_LINE, DiffState.DIFF_LINES] and line.startswith('@@ '):\n state = DiffState.AT_LINE\n continue\n\n if state in [DiffState.AT_LINE, DiffState.DIFF_LINES] and (\n line.startswith(('+', '-', ' '))):\n state = DiffState.DIFF_LINES\n\n if line.startswith(' '):\n continue\n diff_lines.add(line)\n continue\n\n state = DiffState.START\n return diff_lines", "def parse_event(event):\n attrs = event.get('Records')[0].get('Sns').get('MessageAttributes')\n\n try:\n if attrs.get('X-Github-Event') and attrs.get('X-Github-Event').get('Value') == 'push':\n event_obj = json.loads(event.get('Records')[0].get('Sns').get('Message'))\n found_branch = event_obj['ref'].split('/')[-1]\n if found_branch == os.environ['branch']:\n return event_obj['head_commit']['id']\n return False\n return False\n except AttributeError:\n raise", "def git_changelog(\n path=None,\n tags=None,\n append_tags=None,\n git_bin=None,\n format='rst',\n heading_char=None,\n heading_level=2,\n include_cmd=True):\n\n git_bin = (\n distutils.spawn.find_executable('git') if git_bin is None else git_bin)\n git_cmd = [git_bin]\n if path:\n git_cmd.extend(['-R', path])\n\n _format = format.lower()\n if heading_char is None:\n if _format == 'rst':\n heading_char = '^'\n elif _format == 'md':\n heading_char = '#'\n\n def git_list_tags(tags=None,\n tagrgx=TAGRGX_DEFAULT,\n append_tags=None,\n git_cmd=git_cmd,\n heading_level=heading_level,\n include_cmd=include_cmd,\n ):\n \"\"\"List git tag pairs which match a regex\n\n Keyword Arguments:\n tags (list): empty list of addition tags\n tagrgx (``rawstr``): default: ``'v?\\d+.*'``\n append_tags (list or None): additional tags to append\n git_cmd (list): list of command strings\n heading_level (int): heading level 2 = '##'\n include_cmd=True,\n\n Yields:\n str: tag name\n\n \"\"\"\n git_list_tags_cmd = git_cmd[:] + ['tag', '-l']\n\n if tags is None:\n\n if True:\n git_get_first_rev_cmd = [\n 'rev-list', '--all', '--reverse', '--abbrev-commit'] #|head -n 1\n cmd = git_cmd + git_get_first_rev_cmd\n first_rev_output = subprocess.check_output(cmd).splitlines()\n if not first_rev_output:\n raise Exception(('no first revision found:',\n ('cmd', cmd),\n ('output', first_rev_output)))\n else:\n yield first_rev_output[0].rstrip()\n\n tag_output = subprocess.check_output(git_list_tags_cmd).splitlines()\n logging.debug(('tag_output', tag_output))\n\n # import semantic_version\n versiontags = []\n for x in tag_output:\n x = str(x)\n if re.match(tagrgx, x):\n if x.startswith('v'):\n _x = x[1:]\n elif x.startswith('release/'):\n _x = x[7:]\n else:\n _x = x\n ver = semantic_version.Version(_x.rstrip())\n versiontags.append((ver, x))\n for version, _tag in sorted(versiontags):\n yield _tag\n if append_tags:\n for _tag in append_tags:\n yield _tag\n\n tagsiter = git_list_tags(tags=tags,\n append_tags=append_tags,\n git_cmd=git_cmd)\n tags = list(tagsiter)\n logging.debug(('tags', tags))\n\n def git_get_rev_date(revstr, git_cmd=git_cmd):\n git_get_rev_date_cmd = ['log', '-n1', revstr, '--format=%ci']\n cmd = git_cmd + git_get_rev_date_cmd\n return subprocess.check_output(cmd).strip()\n\n def iter_tag_pairs(tags,\n format='rst',\n heading_char='^',\n heading_level=2,\n include_cmd=True,\n encoding='utf-8'\n ):\n \"\"\"Iterate over 2-tuple tag pairs e.g. ``[(tag1, tag2), ]``\n\n Args:\n tags (list\n \"\"\"\n tagdates = collections.OrderedDict()\n tagpairsiter = _izip(tags,\n itertools.islice(tags, 1, None))\n tagpairs = list(tagpairsiter)\n logging.debug(('tagpairs', tagpairs))\n\n _format = format.lower()\n if _format not in ['rst', 'md']:\n raise ValueError(('format unsupported', _format))\n\n def iter_release_data(tagpairs, git_cmd):\n for (tag1, tag2) in tagpairs[::-1]:\n data = {}\n tag1 = tag1.decode(encoding) if hasattr(tag1, 'decode') else tag1\n #tag1date = tagdates.setdefault(tag1, git_get_rev_date(tag1))\n tag2date = tagdates.setdefault(tag2, git_get_rev_date(tag2))\n data['tag2date'] = tag2date\n heading = rst_escape(\"%s (%s)\" % (tag2, tag2date.decode(encoding))) # TODO: date\n data['heading'] = heading\n logpath = \"%s..%s\" % (tag1, tag2)\n data['logpath'] = logpath\n changelog_cmd = ['log', '--reverse', '--pretty=format:* %s [%h]', logpath]\n data['changelog_cmd'] = changelog_cmd\n changelog_cmdstr = \"log --reverse --pretty=format:'* %s [%h]' \" + logpath\n data['changelog_cmdstr'] = changelog_cmdstr\n cmd = git_cmd + changelog_cmd\n data['cmd'] = cmd\n logging.debug(cmd)\n logging.debug(('cmdstr*', ' '.join(cmd)))\n output = subprocess.check_output(cmd)\n data['_output'] = output\n data['output_rst'] = rst_escape(output)\n yield data\n\n #\n tag1 = tag2\n\n def template_as_rst(tagpairs,\n git_cmd,\n heading_char=heading_char,\n include_cmd=True):\n for data in iter_release_data(tagpairs, git_cmd):\n # RST heading\n yield ''\n yield ''\n yield data['heading']\n yield heading_char * len(data['heading'])\n if include_cmd:\n yield \"::\"\n yield \"\"\n yield \" git %s\" % (data['changelog_cmdstr'])\n yield \"\"\n yield data['output_rst']\n\n def template_as_md(tagpairs,\n git_cmd,\n heading_char='#',\n heading_level=2,\n include_cmd=True):\n for data in iter_release_data(tagpairs, git_cmd):\n # RST heading\n yield ''\n yield ''\n if heading_level:\n yield \"%s %s\" % ((heading_level * heading_char), data['heading'])\n if include_cmd:\n yield \"```bash\"\n yield \"$ git %s\" % (data['changelog_cmdstr'])\n yield \"```\"\n yield \"\"\n yield data['output_rst']\n\n if _format == 'rst':\n return template_as_rst(\n tagpairs,\n git_cmd,\n heading_char=heading_char,\n include_cmd=include_cmd)\n elif _format == 'md':\n return template_as_md(\n tagpairs,\n git_cmd,\n heading_char=heading_char,\n heading_level=heading_level,\n include_cmd=include_cmd)\n\n\n for line in iter_tag_pairs(tags,\n format=format,\n heading_char=heading_char,\n heading_level=heading_level,\n include_cmd=include_cmd):\n yield line", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def parse_logs(log_paths):\n from psclient.file_util import read as _read\n\n import datetime, json\n result = []\n for log_path in log_paths:\n data = _read(log_path).split('\\n')\n for row in data:\n if not row: continue\n timestamp, log = row.split(',', 1)\n result.append((\n datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%S\"),\n json.loads(log)))\n\n # sort by timestamp\n result.sort()\n\n return result", "def get_latest_posts(parser, token):\n\ttry:\n\t\ttag_name, arg = token.contents.split(None, 1)\n\texcept ValueError:\n\t\traise template.TemplateSyntaxError, \"%s tag requires arguments\" % token.contents.split()[0]\n\t\n\tm = re.search(r'(.*?) as (\\w+)', arg)\n\t\n\tif not m:\n\t\traise template.TemplateSyntaxError, \"%s tag had invalid arguments\" % tag_name\n\t\n\tformat_string, var_name = m.groups()\n\t\n\treturn LatestPosts(format_string[0], var_name)", "def _fetch_latest_for_tag(self, tag, today):\n result = []\n url = Fetch163.search_link % urllib2.quote(tag.name.encode('utf8'))\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError as e:\n urllib_error(e)\n else:\n doc = eval(resp.read())\n if doc and type(doc) is list:\n if today:\n news_today = self._today_filter(doc, delta=2)\n else:\n news_today = doc\n for d in news_today:\n docid = d.get('docid', '')\n #title = u'%s' % d.get('title', '')\n # the d.get('title') is a unicode string represent by\n # python str, so use unicode-escape to decode it.\n title = d.get('title', '')\n #print type(title)\n news_title = self._trans_title(title)\n if docid and title:\n news_exits = News.objects.filter(\n Q(docid=docid) | Q(title=news_title)\n )\n #print docid, news_title, news_exits\n intro, body, c_num, ptime, pic = self._fetch_news(docid)\n if not news_exits:\n print 'new news', news_title, docid\n news = News()\n news.docid = docid\n news.title = news_title\n news.content = body\n news.tag = tag\n news.comment_num = c_num\n news.list_pic = pic\n news.abstract = intro\n news.update_time = ptime\n news.save()\n import time\n time.sleep(2)\n if news:\n result.append(news)\n else:\n print 'update news', news_title\n n = news_exits[0]\n print 'old:', n.comment_num, 'new:', c_num\n n.comment_num = c_num\n n.save()\n else:\n print 'Fetch news for tag: %s, Error' % tag.name\n\n return result", "def parse_git_log(cls, repo_path, commit=None, pkgs=False, verbosity=-1):\n cmd = shlex.split(cls._git_cmd)\n # custom git log format, see the \"PRETTY FORMATS\" section of the git\n # log man page for details\n format_lines = [\n '# BEGIN COMMIT',\n '%h', # abbreviated commit hash\n '%cd', # commit date\n '%an <%ae>', # Author Name <[email protected]>\n '%cn <%ce>', # Committer Name <[email protected]>\n '%B', # commit message\n '# END MESSAGE BODY',\n ]\n format_str = '%n'.join(format_lines)\n cmd.append(f'--pretty=tformat:{format_str}')\n\n if commit:\n if '..' in commit:\n cmd.append(commit)\n else:\n cmd.append(f'{commit}..origin/HEAD')\n else:\n cmd.append('origin/HEAD')\n\n git_log = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_path)\n line = git_log.stdout.readline().decode().strip()\n if git_log.poll():\n error = git_log.stderr.read().decode().strip()\n logger.warning('skipping git checks: %s', error)\n return\n\n count = 1\n with base.ProgressManager(verbosity=verbosity) as progress:\n while line:\n hash = git_log.stdout.readline().decode().strip()\n commit_date = git_log.stdout.readline().decode().strip()\n author = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n committer = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n\n message = []\n while True:\n line = git_log.stdout.readline().decode('utf-8', 'replace').strip('\\n')\n if line == '# END MESSAGE BODY':\n # drop trailing newline if it exists\n if not message[-1]:\n message.pop()\n break\n message.append(line)\n\n # update progress output\n progress(f'{hash} commit #{count}, {commit_date}')\n count += 1\n\n commit = GitCommit(hash, commit_date, author, committer, message)\n if not pkgs:\n yield commit\n\n # file changes\n while True:\n line = git_log.stdout.readline().decode()\n if line == '# BEGIN COMMIT\\n' or not line:\n break\n if pkgs:\n parsed = cls._parse_file_line(line.strip())\n if parsed is not None:\n atom, status = parsed\n yield GitPkgChange(atom, status, commit)", "def parse_logs(node, logs):\n entries = []\n lines = logs.splitlines(False)\n while lines:\n line = lines.pop(0)\n while lines and not ((Log.log_juba.match(lines[0]) or Log.log_zk.match(lines[0]))):\n line += '\\n' + lines.pop(0)\n try:\n entries.append(Log(node, line))\n except JubaTestAssertionError as e:\n log.warning('failed to parse log line: %s', line)\n return entries", "def parse(self) :\n self._curname = None\n self._curattributes = None\n \n self.setVersion((ord(self._data[0]), ord(self._data[1])))\n self.setOperationId(unpack(\">H\", self._data[2:4])[0])\n self.setRequestId(unpack(\">I\", self._data[4:8])[0])\n self.position = 8\n endofattributes = self.tagvalues[\"end-of-attributes-tag\"]\n maxdelimiter = self.tagvalues[\"event_notification-attributes-tag\"]\n nulloffset = lambda : 0\n #try :\n if 1:\n tag = ord(self._data[self.position])\n while tag != endofattributes :\n self.position += 1\n name = self.tags[tag]\n if name is not None :\n func = getattr(self, name.replace(\"-\", \"_\"), nulloffset)\n self.position += func()\n if ord(self._data[self.position]) > maxdelimiter :\n self.position -= 1\n continue\n oldtag = tag\n tag = ord(self._data[self.position])\n if tag == oldtag :\n self._curattributes.append([])\n #except IndexError :\n # raise IPPError, \"Unexpected end of IPP message.\"\n \n self.data = self._data[self.position+1:]\n self.parsed = True", "def get_release_date ():\n fname = os.path.join(\"doc\", \"changelog.txt\")\n release_date = \"unknown\"\n with open(fname) as fd:\n # the release date is on the first line\n line = fd.readline()\n mo = release_ro.search(line)\n if mo:\n release_date = mo.groups(1)\n return release_date", "def commit_history(cli):\n result = []\n record = OrderedDict()\n for line in cli.splitlines():\n r = re.search(' ([A-Z][a-z]+(?: ID)?): (.*?) +([A-Z][a-z]+): (.*)', line)\n if not r:\n continue\n record[r.group(1)] = r.group(2)\n record[r.group(3)] = r.group(4)\n if r.group(3) == 'Comment':\n result.append(record)\n record = OrderedDict()\n return result", "def _get_changes_metadata(document):\n return ((el.get(author_attrib),\n datetime.datetime.strptime(el.get(date_attrib), date_format))\n for el in _get_comments(document))", "def get_changelog(self, when=0, db=None):\r\n if not db:\r\n db = self.env.get_db_cnx()\r\n cursor = db.cursor()\r\n if when:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"ORDER BY time\",\r\n (self.id, when, str(self.id), when, self.id, when))\r\n else:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"ORDER BY time\", (self.id, str(self.id), self.id))\r\n log = []\r\n for t, author, field, oldvalue, newvalue in cursor:\r\n log.append((int(t), author, field, oldvalue or '', newvalue or ''))\r\n return log", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def get_version():\n import subprocess\n proc = subprocess.Popen(\n 'hg log -r tip --template \"{latesttagdistance}\"',\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n pending, _ = proc.communicate()\n return \"%(tag)sd%(pending)s\" % dict(tag=config.TAG, pending=pending)", "def getversion_nightly(path=None): # pragma: no cover\n if not path:\n path = _get_program_dir()\n\n with open(os.path.join(path, 'version')) as data:\n (tag, rev, date, hsh) = data.readlines()\n\n date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')\n\n if not date or not tag or not rev:\n raise VersionParseError\n return (tag, rev, date, hsh)", "def svn_client_invoke_get_commit_log2(svn_client_get_commit_log2_t__obj, char_log_msg, char_tmp_file, apr_array_header_t_commit_items, void_baton, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def _most_recent_log_matching(self, grep_str):\n return self.run(['git', 'log', '-1', '--grep', grep_str])", "def parse(self, line):\n try:\n (year, month, day, hour, minute, second, microseconds, offset_hour, offset_minute, source, process, logentry) = re.match('^(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d)\\.([\\d]+)\\+(\\d\\d):(\\d\\d) ([a-z]+)\\[([a-zA-Z0-9_.]+)\\]: ([0-9a-z-A-Z\\-_\\.\\[\\]:\\?\\#\\\",/\\ ={}\\'\\(\\)<>]+)$', line).groups()\n except:\n pass\n \n try:\n parsed_data = dict()\n parsed_data['timestamp'] = \" \".join([\"-\".join([year, month, day]), \":\".join([hour, minute, second])])\n parsed_data['log_time'] = datetime.datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n parsed_data['log_source'] = source\n parsed_data['log_type'] = process\n except (AttributeError, UnboundLocalError):\n PARSE_ERRORS.append(line)\n return False\n\n #TODO: This still needs work on spaces in values surrounded by \" \" \n if parsed_data['log_source'] == \"heroku\":\n if logentry.__len__() > 1:\n logentry = re.sub(', ', ',', logentry)\n line_chunks = re.split(' ', logentry)\n for chunk in line_chunks:\n line_chunks = re.split('=', chunk)\n if line_chunks.__len__() > 2:\n #fwd and path are a little clunky to parse\n pass\n elif line_chunks.__len__() > 1:\n parsed_data[line_chunks[0]] = line_chunks[1]\n else:\n pass\n else:\n return False\n else:\n # TODO: [app] \n # Needs parsing. Do that here.\n return False\n\n return parsed_data", "def last_update(self):\r\n request = http.Request('GET', '/metadata/last_update.json')\r\n return request, parsers.parse_json", "def _grab_history(self):\n self.data['history_lines'] = []\n self.data['history_file'] = None\n self.data['history_encoding'] = None\n self.data['headings'] = []\n self.data['history_last_release'] = ''\n self.data['history_insert_line_here'] = 0\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n self.data['history_file'] = history_file\n if not history_file:\n logger.warn(\"No history file found\")\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines, history_encoding = read_text_file(history_file)\n history_lines = history_lines.split('\\n')\n headings = utils.extract_headings_from_history(history_lines)\n if not headings:\n logger.warn(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n return\n self.data['history_lines'] = history_lines\n self.data['history_encoding'] = history_encoding\n self.data['headings'] = headings\n\n # Grab last header.\n start = headings[0]['line']\n if len(headings) > 1:\n # Include the next header plus underline, as this is nice\n # to show in the history_last_release.\n end = headings[1]['line'] + 2\n else:\n end = len(history_lines)\n history_last_release = '\\n'.join(history_lines[start:end])\n self.data['history_last_release'] = history_last_release\n\n # Add line number where an extra changelog entry can be inserted. Can\n # be useful for entry points. 'start' is the header, +1 is the\n # underline, +2 is probably an empty line, so then we should take +3.\n # Or rather: the first non-empty line.\n insert = start + 2\n while insert < end:\n if history_lines[insert].strip():\n break\n insert += 1\n self.data['history_insert_line_here'] = insert", "def _generate_changelog_for_form(form: PostMetaForm) -> List[str]:\n changelog = []\n changed_fields = form.changed_data.copy()\n if \"wp_id\" in changed_fields:\n changed_fields.remove(\"wp_id\")\n\n __ = lambda form, field: (\n \", \".join([str(i) for i in form.initial.get(field)]),\n \", \".join([str(i) for i in form.cleaned_data.get(field)]),\n )\n _ = lambda form, field: (form.initial.get(field), form.cleaned_data.get(field))\n for changed_field in changed_fields:\n log = None\n\n if changed_field == \"issues\":\n log = '* выпуски сменились с \"{0}\" на \"{1}\"'.format(\n *__(form, changed_field)\n )\n elif changed_field == \"editor\":\n # Initial ForeignKey value is stored as int. Populate it\n args = _(form, changed_field)\n init_editor = str(User.objects.get(id=args[0]))\n new_args = (init_editor, args[1])\n log = '* редактор cменился с \"{0}\" на \"{1}\"'.format(*new_args)\n elif changed_field == \"finished_at\":\n log = '* дедлайн этапа cменился с \"{0}\" на \"{1}\"'.format(\n *_(form, changed_field)\n )\n elif changed_field == \"published_at\":\n log = '* дата публикации сменилась с \"{0}\" на \"{1}\"'.format(\n *_(form, changed_field)\n )\n\n if log:\n changelog.append(log)\n\n return changelog", "def test_none_version_return_if_all_excluded(self): # pylint: disable=invalid-name\n version_prefix = 'v'\n tags = [_TagInfo('v1.0.1', 'commit1', version_prefix),\n _TagInfo('notsemver', 'commit2', version_prefix),\n _TagInfo('v1.0.v2', 'commit2', version_prefix)]\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)", "def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n # Extract bug type\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n # Get whether or not the bug was reproduced\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n # Skip the 'Attempted to reproduce' line if exists\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n # Populate the sequence of requests that made the bug\n while line and not line.startswith(BUG_START):\n seq += self._get_request(line)\n line = file.readline()\n # Add the bug sequence to the bug list\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(\"Failed to read bug log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException", "def _parse_logs_for_results(self, logs):\n results = {}\n for line in logs.split(\"\\n\"):\n split_line = line.split(\":\")\n if len(split_line) == 2:\n results[split_line[0].strip()] = split_line[1].strip()\n if results == {}:\n results = None\n return results", "def _parseRawInfo(self, rawinfo: Optional[Sequence[Any]] = None) -> None:\n if rawinfo is not None:\n for version in rawinfo:\n cversion = ChartVersionInfo(self, version)\n if self.latest is None or cversion.version_info > self.latest.version_info:\n self.latest = cversion\n self.versions.append(cversion)\n sorted(self.versions, key=lambda x: x.version_info, reverse=True)", "def read_commits(repo_path, from_tag):\n\n commit_list = CommitList()\n\n child = Popen(\n CommitList.create_log_command(from_tag),\n cwd=os.path.expanduser(repo_path),\n stdout=PIPE)\n\n for line in child.stdout:\n if line:\n commit_list.add(line)\n\n return commit_list", "def parseModelHistory(*args):\n return _libsbml.RDFAnnotationParser_parseModelHistory(*args)", "def retrieve_git_log(self):\n result = [str(entry).split(\"\\t\")[1]\n for entry in self.repo.head.log()]\n\n return result", "def latest_tagged_video(tag):\n if not isinstance(tag, Tag):\n try:\n tag = Tag.objects.get(text=tag)\n except Tag.DoesNotExist:\n return mark_safe('')\n video = first_or_none(Video.objects.filter(tags=tag)\n .order_by('-issue__issue_date'))\n if video:\n return mark_safe(video.key)\n return mark_safe('')", "def finish(c):\n files_to_commit = [os.path.relpath(path, start=os.curdir) for path in [CHANGELOG_ABSPATH, SETTINGS_PATH]]\n version: VersionStructure = VersionStructure.from_settings()\n\n c.run(f\"git add %s\" % \" \".join(files_to_commit))\n c.run(f'git commit -m \"version {version}\" --no-verify')\n c.run(f\"git tag {version}\")", "def svn_rev_info(path): # pragma: no cover\n if not os.path.isdir(os.path.join(path, '.svn')):\n path = os.path.join(path, '..')\n\n _program_dir = path\n filename = os.path.join(_program_dir, '.svn/entries')\n if os.path.isfile(filename):\n with open(filename) as entries:\n version = entries.readline().strip()\n if version != '12':\n for _ in range(3):\n entries.readline()\n tag = entries.readline().strip()\n t = tag.split('://', 1)\n t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',\n '')\n tag = '[{}] {}'.format(*t)\n for _ in range(4):\n entries.readline()\n date = time.strptime(entries.readline()[:19],\n '%Y-%m-%dT%H:%M:%S')\n rev = entries.readline()[:-1]\n return tag, rev, date\n\n # We haven't found the information in entries file.\n # Use sqlite table for new entries format\n from sqlite3 import dbapi2 as sqlite\n with closing(\n sqlite.connect(os.path.join(_program_dir, '.svn/wc.db'))) as con:\n cur = con.cursor()\n cur.execute(\"\"\"select\nlocal_relpath, repos_path, revision, changed_date, checksum from nodes\norder by revision desc, changed_date desc\"\"\")\n _name, tag, rev, date, _checksum = cur.fetchone()\n cur.execute('select root from repository')\n tag, = cur.fetchone()\n\n tag = os.path.split(tag)[1]\n date = time.gmtime(date / 1_000_000)\n return tag, rev, date", "def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag", "async def _update_addon_changelog(self, slug):\n try:\n changelog = await self.hassio.get_addon_changelog(slug)\n return (slug, changelog)\n except HassioAPIError as err:\n _LOGGER.warning(\"Could not fetch changelog for %s: %s\", slug, err)\n return (slug, None)", "def get_last_svn_log_entry(svn_url, rev_start, rev_end):\r\n return get_one_svn_log_entry(svn_url, rev_end, rev_start, stop_on_copy=True)", "def _GetXMLChangeLogByModule(module_name, module_src_dir,\n last_revision, current_revision):\n if (last_revision and current_revision > last_revision):\n command = [slave_utils.SubversionExe(), 'log', module_src_dir,\n '--xml', '-r', '%d:%d' % (last_revision + 1, current_revision)]\n changelog = chromium_utils.GetCommandOutput(command)\n changelog_description = '%s changeLogs from ]%d to %d]' % (\n module_name, last_revision, current_revision)\n else:\n changelog = ''\n changelog_description = 'No new ChangeLogs on %s' % (module_name)\n return (changelog, changelog_description)", "def test_none_return_if_all_excluded(self): # pylint: disable=invalid-name\n tags = [_TagInfo('1.0.1', 'commit1', ''),\n _TagInfo('notsemver', 'commit2', '')]\n self.assertEqual(_seek_last_semver_tag(tags, '1.0.1'), None)", "def rev_parse(commit_ish, short=False):\n args = [\"--short\"] if short else []\n return (\n subprocess.check_output([\"git\", \"rev-parse\"] + args + [commit_ish])\n .decode()\n .strip()\n )", "def detailed_log(self, selected_hash, current_path):\n p = Popen(\n [\"git\", \"log\", \"-1\", \"--stat\", \"--numstat\", \"--oneline\", selected_hash],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n note = [0] * 3\n count = 0\n temp = \"\"\n line_array = my_output.decode(\"utf-8\").splitlines()\n length = len(line_array)\n INSERTION_INDEX = 0\n DELETION_INDEX = 1\n MODIFIED_FILE_PATH_INDEX = 2\n if length > 1:\n temp = line_array[length - 1]\n words = temp.split()\n for i in range(0, len(words)):\n if words[i].isdigit():\n note[count] = words[i]\n count += 1\n for num in range(1, int(length / 2)):\n line_info = line_array[num].split()\n words = line_info[2].split(\"/\")\n length = len(words)\n result.append(\n {\n \"modified_file_path\": line_info[MODIFIED_FILE_PATH_INDEX],\n \"modified_file_name\": words[length - 1],\n \"insertion\": line_info[INSERTION_INDEX],\n \"deletion\": line_info[DELETION_INDEX],\n }\n )\n\n if note[2] == 0 and length > 1:\n if \"-\" in temp:\n exchange = note[1]\n note[1] = note[2]\n note[2] = exchange\n\n return {\n \"code\": p.returncode,\n \"modified_file_note\": temp,\n \"modified_files_count\": note[0],\n \"number_of_insertions\": note[1],\n \"number_of_deletions\": note[2],\n \"modified_files\": result,\n }\n else:\n return {\n \"code\": p.returncode,\n \"command\": \"git log_1\",\n \"message\": my_error.decode(\"utf-8\"),\n }" ]
[ "0.616343", "0.6080337", "0.6000073", "0.59245783", "0.58972937", "0.5849398", "0.58468693", "0.57212126", "0.569988", "0.56685996", "0.5651651", "0.5609761", "0.5598691", "0.5501291", "0.549835", "0.54694337", "0.5457894", "0.5446662", "0.5412487", "0.53381616", "0.53150606", "0.53147554", "0.5304559", "0.52862513", "0.5274378", "0.52742696", "0.52488697", "0.52463675", "0.5225317", "0.5218753", "0.52187395", "0.52163255", "0.5213564", "0.5209749", "0.5204587", "0.51352173", "0.51088506", "0.51061785", "0.5095868", "0.50940025", "0.508064", "0.50766945", "0.50547314", "0.50276506", "0.5015213", "0.5001551", "0.49720523", "0.4960515", "0.49570763", "0.49535528", "0.49494562", "0.49418792", "0.4938889", "0.49385956", "0.49372816", "0.49204296", "0.49144617", "0.48982424", "0.48864827", "0.4883411", "0.48701715", "0.4869574", "0.48653355", "0.48493457", "0.4841904", "0.48345715", "0.48208156", "0.48148698", "0.48121256", "0.4810188", "0.47941312", "0.47890547", "0.47888255", "0.4786807", "0.47857198", "0.4784182", "0.47748876", "0.47729832", "0.47717473", "0.4763436", "0.4760867", "0.47587952", "0.47574908", "0.4750244", "0.47493076", "0.474909", "0.47419953", "0.47348452", "0.47338638", "0.4727644", "0.4707482", "0.4705695", "0.47023216", "0.4699912", "0.46995735", "0.4692767", "0.46896097", "0.46885204", "0.4687569", "0.46870315" ]
0.6108666
1
Constructs release notes for Bugzilla service deployment ticket.
def get_release_notes(self): notes = self.output.get_header('RELEASE NOTES') notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \ self.repo, self.product) + '\n' notes += self.output.get_sub_header('COMPARISONS') notes += self.get_comparison(self.latest_tags[0][VERS], self.latest_tags[1][VERS]) if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1): notes += self.get_comparison(self.latest_tags[1][VERS], self.latest_tags[2][VERS]) if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW: notes += self.get_comparison(self.latest_tags[2][VERS], self.latest_tags[3][VERS]) tag_data = self.get_tag(self.latest_tags[3][SHA]) notes += self.output.get_sub_header('TAGS') notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\n' notes += self.get_url_tag_commit(tag_data["object"]["sha"]) + '\n' changelog = self.get_changelog(tag_data["object"]["sha"]) if changelog: notes += self.output.get_sub_header('CHANGELOG') notes += changelog return notes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def release_notes(version, author, git_ref_target, git_ref_source, build_type):\n print('generating release notes')\n if git_ref_source:\n if git_ref_source != 'HEAD':\n git_ref_source = 'origin/{}'.format(git_ref_source)\n changelog = run('git log origin/{}..{}'.format(git_ref_target,\n git_ref_source))\n else:\n git_ref_source = 'origin/master'\n changelog = run('git log {}..origin/{}'.format(git_ref_source, git_ref_target))\n notes = {\n 'version': version,\n 'author': author,\n 'build_type': build_type,\n 'date': datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S'),\n 'changelog': changelog.stdout\n }\n return notes", "def main():\n parser = argparse.ArgumentParser(description='Creates tickets for release certification')\n parser.add_argument('-u', '--username', help='jira username', default='admin')\n parser.add_argument('-p', '--password', help='jira password', default='admin')\n parser.add_argument('-c', '--config', help='path to config file', default='./options.ini')\n parser.add_argument('-j', '--jira', help='url of jira server', default='http://localhost:8080')\n\n args = parser.parse_args()\n\n jira_user = args.username\n jira_pass = args.password\n jira_server = args.jira\n config_file_path = args.config\n CONFIG.read(config_file_path)\n\n parent_ticket = config_map('JiraOptions')['parent_ticket']\n apprenda_version = config_map('VersionInfo')['to_version']\n jira_project = config_map('JiraOptions')['project']\n jira_issue_type = config_map('JiraOptions')['issue_type']\n jira = JIRA(jira_server, basic_auth=(jira_user, jira_pass))\n\n parent_issue = jira.issue(parent_ticket)\n ticket_list = []\n\n # create clean install tickets\n clean_strings = config_map('CleanInstallSection')\n for cloud in ['single', 'hybrid']:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(clean_strings['summary'], apprenda_version, cloud)\n ticket_to_add.format_description(clean_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create upgrade tickets\n from_versions = json.loads(config_map('VersionInfo')['from_versions'])\n upgrade_strings = config_map('UpgradeSection')\n\n # single cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"single\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # hybrid cloud\n for version in from_versions:\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(upgrade_strings['summary'], apprenda_version, version,\n \"hybrid\")\n ticket_to_add.format_description(upgrade_strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n # create testing tickets for other tasks\n for section in CONFIG.sections():\n if 'Ticket' in section:\n strings = config_map(section)\n ticket_to_add = ticket.Ticket(jira_project, jira_issue_type)\n ticket_to_add.format_summary(strings['summary'], apprenda_version)\n ticket_to_add.format_description(strings['description'])\n ticket_list.append(ticket_to_add.__dict__)\n\n print 'Created {0} tickets, now sending them to Jira'.format(len(ticket_list))\n # send issues to jira and create tickets and links\n issues = jira.create_issues(field_list=ticket_list)\n\n for item in issues:\n jira.create_issue_link(\n type=\"Task of Story\",\n outwardIssue=item['issue'].key,\n inwardIssue=parent_issue.key,\n )\n\n print 'Finished linking issues, exiting.'", "def generate_release_notes(project_id, endstr = ' <br>', **config):\n\n gl = gitlab.Gitlab(**config)\n project = gl.projects.get(project_id)\n\n if not project.mergerequests.list(state='merged'):\n raise ValueError(f\"There is not merged merge request for project {project_id} {project.name}\")\n\n if not project.releases.list():\n log = f\"Changelog of {project.name}:{endstr}\"\n last_date = '0000-01-01T00:00:00Z'\n else:\n last_release = project.releases.list()[0]\n log = f\"Changelog since release {last_release.name} of {project.name}:{endstr}\"\n last_date = last_release.released_at\n\n page = 1\n list_mrs = project.mergerequests.list(state='merged',\n order_by='updated_at',\n updated_after=last_date,\n page=page)\n if not list_mrs:\n log += f\"There is no merged merge request after {last_date}\"\n return log\n\n while list_mrs:\n for mr in list_mrs:\n line = f\" * {mr.title} (@{mr.author['username']}){endstr}\"\n log += line\n\n page += 1\n list_mrs = project.mergerequests.list(state='merged',\n order_by='updated_at',\n updated_after=last_date,\n page=page\n )\n\n return log", "def generate_release_notes(repo, repo_path,\n start_revision, end_revision,\n show_dates, skip_requirement_merges,\n is_stable, series,\n email, email_from,\n email_reply_to, email_tags,\n include_pypi_link,\n changes_only,\n first_release,\n deliverable_file, description,\n publishing_dir_name,\n ):\n repo_name = repo.split('/')[-1]\n # Determine if this is a release candidate or not.\n is_release_candidate = 'rc' in end_revision\n\n # Do not mention the series in independent model since there is none\n if series == 'independent':\n series = ''\n\n if not email_from:\n raise RuntimeError('No email-from specified')\n\n # Get the commits that are in the desired range...\n git_range = \"%s..%s\" % (start_revision, end_revision)\n if show_dates:\n format = \"--format=%h %ci %s\"\n else:\n format = \"--oneline\"\n cmd = [\"git\", \"log\", \"--no-color\", format, \"--no-merges\", git_range]\n stdout = run_cmd(cmd, cwd=repo_path)\n changes = []\n for commit_line in stdout.splitlines():\n commit_line = commit_line.strip()\n if not commit_line or is_skippable_commit(skip_requirement_merges,\n commit_line):\n continue\n else:\n changes.append(commit_line)\n\n # Filter out any requirement file changes...\n requirement_changes = []\n requirement_files = list(glob.glob(os.path.join(repo_path,\n '*requirements*.txt')))\n if requirement_files:\n cmd = ['git', 'diff', '-U0', '--no-color', git_range]\n cmd.extend(requirement_files)\n stdout = run_cmd(cmd, cwd=repo_path)\n requirement_changes = [line.strip()\n for line in stdout.splitlines() if line.strip()]\n\n # Get statistics about the range given...\n cmd = ['git', 'diff', '--stat', '--no-color', git_range]\n stdout = run_cmd(cmd, cwd=repo_path)\n diff_stats = []\n for line in stdout.splitlines():\n line = line.strip()\n if not line or line.find(\"tests\") != -1 or line.startswith(\"doc\"):\n continue\n diff_stats.append(line)\n\n # Extract + valdiate needed sections...\n sections = parse_deliverable(\n series, repo_name, deliverable_file=deliverable_file)\n change_header = [\"Changes in %s %s\" % (repo, git_range)]\n change_header.append(\"-\" * len(change_header[0]))\n\n # Look for reno notes for this version.\n if not changes_only:\n logging.getLogger('reno').setLevel(logging.WARNING)\n cfg = reno_config.Config(\n reporoot=repo_path,\n )\n branch = None\n if is_stable and series:\n branch = 'origin/stable/%s' % series\n cfg.override(branch=branch)\n ldr = loader.Loader(conf=cfg, ignore_cache=True)\n if end_revision in ldr.versions:\n rst_notes = formatter.format_report(\n loader=ldr,\n config=cfg,\n versions_to_include=[end_revision],\n )\n reno_notes = rst2txt.convert(rst_notes).decode('utf-8')\n else:\n LOG.warning(\n ('Did not find revision %r in list of versions '\n 'with release notes %r, skipping reno'),\n end_revision, ldr.versions,\n )\n reno_notes = ''\n else:\n reno_notes = ''\n\n # The recipient for announcements should always be the\n # [email protected] ML (except for\n # release-test)\n email_to = '[email protected]'\n if repo_name == 'openstack-release-test':\n email_to = '[email protected]'\n\n params = dict(sections)\n params.update({\n 'project': repo,\n 'description': description,\n 'end_rev': end_revision,\n 'range': git_range,\n 'lib': repo_path,\n 'skip_requirement_merges': skip_requirement_merges,\n 'changes': changes,\n 'requirement_changes': requirement_changes,\n 'diff_stats': diff_stats,\n 'change_header': \"\\n\".join(change_header),\n 'emotion': random.choice(EMOTIONS),\n 'stable_series': is_stable,\n 'series': series,\n 'email': email,\n 'email_from': email_from,\n 'email_to': email_to,\n 'email_reply_to': email_reply_to,\n 'email_tags': email_tags,\n 'reno_notes': reno_notes,\n 'first_release': first_release,\n 'publishing_dir_name': publishing_dir_name,\n })\n if include_pypi_link:\n params['pypi_url'] = PYPI_URL_TPL % repo_name\n else:\n params['pypi_url'] = None\n\n response = []\n if changes_only:\n response.append(expand_template(CHANGES_ONLY_TPL, params))\n else:\n if email:\n email_header = expand_template(EMAIL_HEADER_TPL.strip(), params)\n response.append(email_header.lstrip())\n if is_release_candidate:\n response.append(expand_template(RELEASE_CANDIDATE_TPL, params))\n else:\n header = expand_template(HEADER_RELEASE_TPL.strip(), params)\n response.append(parawrap.fill(header))\n response.append(expand_template(CHANGE_RELEASE_TPL, params))\n return '\\n'.join(response)", "def make_release_notes(src, dst) -> str:\n result = _subprocess(['git', 'log', '--pretty=format:\"%s\"', f\"origin/{src}...origin/{dst}\"])\n commits = \"\\n\".join([f\"- {i[1:-1]}\" for i in result.split(\"\\n\")])\n\n if args.release_notes:\n with open(args.release_notes, 'w') as f:\n f.write(commits)\n\n return commits", "def get_release_note(comments):\n release_note = \"\"\n i = 0\n for comment in comments:\n #pprint.pprint(comment)\n #print \"**** Comment-{0}: {1}\".format(i, comment['body'])\n #print \"**** Comment-{index}: {body}\".format(\n # index=i,\n # body=comment['body']\n # )\n #print \"\\tURL: {0}\".format(comment['html_url'])\n #print \"\\tURL: {url}\".format(url=comment['html_url'])\n #comment['body'].index('Changed make')\n if comment['body'].lower().find('changed make') >= 0:\n #print \"Found 'Release Note'\"\n release_note = comment['body']\n #else:\n #print \"No 'Release Note' found\"\n\n i += 1\n # print \"----------------------------------------------------------\\n\"\n return release_note", "def create_release(release_files, changelog=\"\", output=\"\") -> str:\n release_notes = \"\"\n if 'TRAVIS_TAG' not in os.environ or not os.environ['TRAVIS_TAG']:\n print('No git tag: not deploying anything')\n return release_notes\n elif os.environ['TRAVIS_SECURE_ENV_VARS'] != 'true':\n print('No secure environment variables: not deploying anything')\n return release_notes\n elif len(release_files) == 0:\n print('No file to release')\n return release_notes\n else:\n print('Creating release from tag {}'.format(os.environ['TRAVIS_TAG']))\n\n headers = {\n 'User-Agent': 'Deploy-Script',\n 'Authorization': 'token {}'.format(os.environ['GH_TOKEN'])\n }\n\n changelog_content = ''\n if changelog:\n with open(changelog, 'r') as changelog_file:\n changelog_content = changelog_file.read()\n\n create_raw_data = {\n \"tag_name\": os.environ['TRAVIS_TAG'],\n \"body\": \"\\n\\n{}\".format(changelog_content)\n }\n\n # if a release exist with this tag_name delete it first\n # this allows to create the release from github website\n url = '/repos/{repo_slug}/releases/tags/{tag}'.format(\n repo_slug=os.environ['TRAVIS_REPO_SLUG'],\n tag=os.environ['TRAVIS_TAG'])\n conn = http.client.HTTPSConnection('api.github.com')\n conn.request('GET', url, headers=headers)\n response = conn.getresponse()\n release = json.loads(response.read().decode())\n\n if 'upload_url' not in release:\n print('Failed to create release!')\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(release))\n exit(-1)\n\n conn = http.client.HTTPSConnection('uploads.github.com')\n for release_file in release_files:\n _, filename = os.path.split(release_file)\n headers['Content-Type'] = 'application/zip'\n url = '{release_url}?name={filename}'.format(release_url=release['upload_url'][:-13], filename=filename)\n print('Upload to {}'.format(url))\n\n with open(release_file, 'rb') as f:\n data = f.read()\n conn.request('POST', url, data, headers)\n\n response = conn.getresponse()\n result = response.read()\n if response.status != 201:\n print('Failed to upload filename {filename}'.format(filename=filename))\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(json.loads(result.decode())))\n print('File:')\n print(' Size: {}'.format(os.path.getsize(release_file)))\n\n if output:\n with open(output, 'w') as f:\n print(\"Writing release notes\")\n print(release_notes)\n f.write(release_notes)", "def release_notes(self, release_notes):\n self._release_notes = release_notes", "def default_changelog(release_link_format: str, breaking_change_token: str = \"BREAKING\"):\n return Changelog(\n header=\"\"\"# Changelog\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog] and this project adheres to\n[Semantic Versioning].\n\nTypes of changes are:\n* **Security** in case of vulnerabilities.\n* **Deprecated** for soon-to-be removed features.\n* **Added** for new features.\n* **Changed** for changes in existing functionality.\n* **Removed** for now removed features.\n* **Fixed** for any bug fixes.\"\"\",\n config=ChangelogConfig(\n release_link_format=release_link_format,\n breaking_change_token=breaking_change_token,\n ),\n releases=OrderedDict(\n {\n ReleaseTag(\"Unreleased\"): ReleaseSection(entries={}, timestamp=None),\n }\n ),\n links=OrderedDict(\n {\n \"Unreleased\": release_link_format.format(previous_tag=\"initial\", tag=\"HEAD\"),\n \"Keep a Changelog\": \"http://keepachangelog.com/en/1.0.0/\",\n \"Semantic Versioning\": \"http://semver.org/spec/v2.0.0.html\",\n },\n ),\n )", "def create_release_notes(yaml_file, realease_notes_file, application_name):\n try:\n with open(yaml_file) as input_file: # read yaml file AND CONVERT IT INTO DICTIONARY\n release_dict=yaml.load(input_file, Loader=yaml.FullLoader)\n logging.info(\"FILE CONVERTED TO DICTIONARY SUCCESSFULLY\")\n \n \n except (FileNotFoundError,FileExistsError) as error: #file doesn't exist\n logging.warning(\"yaml file is not exist or damaged\")\n return None\n \n except yaml.scanner.ScannerError as error: # yaml file syntax error\n logging.warning(\"wrong yaml format\")\n return None\n \n\n with open(realease_notes_file,\"w\") as output_file :# create release note and write on it\n for key,value in release_dict.items():\n output_file.write(f\"{key}: \\n\")\n if type(value) == dict:\n for key2,value2 in value.items():\n output_file.write(f\" {key2}: {value2} \\n\")\n else:\n for value2 in value:\n output_file.write(f\" {value2} \\n\")\n output_file.write(\"\\n\")\n logging.info(\"RELEASE NOTES FILE CREATED SUCCESSFULLY\") \n return release_dict", "def create_release(config, args):\n yield config.repo.create_release(args.tag_name, name=args.name,\n target_commitish=args.get(\"target_commitish\"), body=args.get(\"body\"),\n draft=args.get_bool(\"draft\"), prerelease=args.get_bool(\"prerelease\"))", "def set_note_version_server(cls):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n #Get the list of WebRtc nuget pakcages with prereleases\n packages = NugetUtility.nuget_cli('list', 'Id:WebRtc', '-PreRelease')\n packages = packages.split('\\r\\n')\n webrtcRegex = r\"^WebRtc+\\s\"\n #Search the list of the packages for a WebRtc package and set the version\n for package in packages:\n if re.match(webrtcRegex, package, flags=0):\n version = package\n\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version) \n \n # return to the base directory\n Utility.popd()", "def postreleaser_before(data):\n\n data['dev_version_template'] = '%(new_version)s.dev'", "def _append_descriptions(self, issue, dep_name, dep_latest_version):\n logging.info(\"Updating JIRA issue {0} to track {1} upgrade process\".format(\n issue.key,\n dep_name))\n description = issue.fields.description + \"\"\"\\n\\n{0}\\n\n Please review and upgrade the {1} to the latest version {2} \\n \n cc: \"\"\".format(\n datetime.today(),\n dep_name,\n dep_latest_version\n )\n _, owners = self._find_owners(dep_name)\n for owner in owners:\n description += \"[~{0}], \".format(owner)\n try:\n self.jira.update_issue(issue, description=description)\n except Exception as e:\n traceback.print_exc()\n logging.error(\"Failed updating issue: \"+ str(e))", "def get_changelog(no):\n path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))\n lines = [line.rstrip('\\n').strip() for line in open(path) if len(line.rstrip('\\n').strip()) > 0]\n changelog = []\n title = ''\n body = []\n for l in lines:\n if l.startswith('#'):\n if len(title) > 0:\n changelog.append({'title': title, 'body': body})\n body = []\n title = l.replace('### ', '')\n else:\n body.append(l.replace('- ', ''))\n\n return changelog[0:no]", "def _get_pkg_changelog_contents(ctx: Context, version: str):\n changes = _get_changelog_contents(ctx, version)\n changes = \"\\n\".join(changes.split(\"\\n\")[2:])\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Removed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Deprecated\n ----------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Changed\n -------\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Fixed\n -----\n\n \"\"\"\n ),\n \"\",\n )\n changes = changes.replace(\n textwrap.dedent(\n \"\"\"\n Added\n -----\n\n \"\"\"\n ),\n \"\",\n )\n return changes", "def generateReleaseRunBB(self, job):\n pass", "def make_release():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--destination\", action=\"store\", type=\"string\", \n dest=\"destdir\",\n help=\"directory where distributions and docs will be placed\")\n parser.add_option(\"-v\", \"--version\", action=\"store\", type=\"string\", \n dest=\"version\",\n help=\"version string applied to all openmdao distributions\")\n parser.add_option(\"-m\", action=\"store\", type=\"string\", dest=\"comment\",\n help=\"optional comment for version tag\")\n parser.add_option(\"-b\", \"--basebranch\", action=\"store\", type=\"string\", \n dest=\"base\", default='master', \n help=\"base branch for release. defaults to master\")\n parser.add_option(\"-t\", \"--test\", action=\"store_true\", dest=\"test\",\n help=\"used for testing. A release branch will not be created\")\n parser.add_option(\"-n\", \"--nodocbuild\", action=\"store_true\", \n dest=\"nodocbuild\",\n help=\"used for testing. The docs will not be rebuilt if they already exist\")\n parser.add_option(\"--host\", action='append', dest='hosts', metavar='HOST',\n default=[],\n help=\"host from config file to build bdist_eggs on. \"\n \"Multiple --host args are allowed.\")\n parser.add_option(\"-c\", \"--config\", action='store', dest='cfg', \n metavar='CONFIG', default='~/.openmdao/testhosts.cfg',\n help=\"path of config file where info for hosts is located\")\n (options, args) = parser.parse_args(sys.argv[1:])\n \n if not options.version or not options.destdir:\n parser.print_help()\n sys.exit(-1)\n \n _check_version(options.version)\n\n options.cfg = os.path.expanduser(options.cfg)\n \n config = ConfigParser.ConfigParser()\n config.readfp(open(options.cfg))\n \n haswin = False\n for host in options.hosts:\n if host == 'localhost':\n if sys.platform.startswith('win'):\n haswin = True\n elif config.has_section(host):\n platform = config.get(host, 'platform')\n if platform == 'windows':\n haswin = True\n if not haswin:\n print \"no windows host was specified, so can't build binary eggs for windows\"\n sys.exit(-1)\n \n orig_branch = get_git_branch()\n if not orig_branch:\n print \"You must run mkrelease from within a git repository. aborting\"\n sys.exit(-1)\n\n if not options.test:\n if orig_branch != options.base:\n print \"Your current branch '%s', is not the specified base branch '%s'\" % (orig_branch, options.base)\n sys.exit(-1)\n \n if _has_checkouts():\n print \"There are uncommitted changes. You must run mkrelease.py from a clean branch\"\n sys.exit(-1)\n \n if orig_branch == 'master':\n print \"pulling master\"\n os.system(\"git pull origin master\")\n if _has_checkouts():\n print \"something went wrong during pull. aborting\"\n sys.exit(-1)\n else:\n print \"WARNING: base branch is not 'master' so it has not been\"\n print \"automatically brought up-to-date.\"\n answer = raw_input(\"Proceed? (Y/N) \")\n if answer.lower() not in [\"y\", \"yes\"]:\n sys.exit(-1)\n \n relbranch = \"release_%s\" % options.version\n if relbranch in get_git_branches():\n print \"release branch %s already exists in this repo\" % relbranch\n sys.exit(-1)\n\n print \"creating release branch '%s' from base branch '%s'\" % (relbranch, orig_branch)\n check_call(['git', 'branch', relbranch])\n print \"checking out branch '%s'\" % relbranch\n check_call(['git', 'checkout', relbranch])\n \n destdir = os.path.abspath(options.destdir)\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n\n startdir = os.getcwd()\n topdir = repo_top()\n \n cfgpath = os.path.expanduser(options.cfg)\n \n try:\n _update_releaseinfo_files(options.version)\n \n # build the docs\n docdir = os.path.join(topdir, 'docs')\n idxpath = os.path.join(docdir, '_build', 'html', 'index.html')\n \n if not os.path.isfile(idxpath) or not options.nodocbuild:\n build_docs(argv=['-v', options.version])\n shutil.copytree(os.path.join(topdir,'docs','_build', 'html'), \n os.path.join(destdir,'docs'))\n\n if not options.test:\n # commit the changes to the release branch\n print \"committing all changes to branch '%s'\" % relbranch\n check_call(['git', 'commit', '-a', '-m', \n '\"updating releaseinfo files for release %s\"' % \n options.version])\n\n # build openmdao package distributions\n proj_dirs = []\n for project_name, pdir, pkgtype in openmdao_packages:\n pdir = os.path.join(topdir, pdir, project_name)\n if 'src' in os.listdir(pdir):\n os.chdir(os.path.join(pdir, 'src'))\n else:\n os.chdir(pdir)\n print 'building %s' % project_name\n _build_sdist(pdir, destdir, options.version)\n if pkgtype == 'bdist_egg':\n proj_dirs.append(pdir)\n \n os.chdir(startdir)\n _build_bdist_eggs(proj_dirs, destdir, options.hosts, cfgpath)\n \n print 'creating bootstrapping installer script go-openmdao.py'\n installer = os.path.join(os.path.dirname(__file__),\n 'mkinstaller.py')\n \n check_call([sys.executable, installer, '--dest=%s'%destdir])\n\n if options.comment:\n comment = options.comment\n else:\n comment = 'creating release %s' % options.version\n \n if options.test:\n _rollback_releaseinfo_files()\n else:\n # tag the current revision with the release version id\n print \"tagging release with '%s'\" % options.version\n check_call(['git', 'tag', '-f', '-a', options.version, '-m', comment])\n \n check_call(['git', 'checkout', orig_branch])\n print \"\\n*REMEMBER* to push '%s' up to the master branch if this release is official\" % relbranch\n \n print \"new release files have been placed in %s\" % destdir\n \n finally:\n os.chdir(startdir)", "def set_note_version(cls, version):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n cls.init()\n notes_file = 'releases.txt'\n note = cls.get_note(notes_file)\n if note is not False:\n new_note = '---------------------------------------------------------------------\\n' + \\\n 'Version: ' + version + '\\n' + \\\n '---------------------------------------------------------------------\\n'\n if os.path.isfile(notes_file):\n with open(notes_file,\"r\") as src:\n all_notes=src.readlines()\n if '--------------------------------------------' not in all_notes[0]:\n all_notes.insert(0,new_note)\n else:\n all_notes = new_note\n\n with open(notes_file, 'w') as release_notes:\n release_notes.writelines(all_notes)\n cls.logger.info(\"Release notes vesion set: \" + version)\n # return to the base directory\n Utility.popd()", "def printNotes():\n print(\"Generating testsuiteNodes.js...\", end=\"\")\n\n suite = reftest.reftestSuite()\n fp = file(MATHJAX_WEB_PATH + \"testsuiteNotes.html\", \"w\")\n stdout = sys.stdout\n sys.stdout = fp\n print('<!doctype>')\n print('<!-- ' + WARNING_GENERATED_FILE + '-->')\n print('<html>')\n print('<head>')\n print(' <meta http-equiv=\"Content-type\" content=\"text/html;charset=UTF-8\">')\n print(' <title>Testsuite Notes</title>')\n print(' <link rel=\"stylesheet\" type=\"text/css\" href=\"default.css\"/>')\n print('</head>')\n print('<body>')\n print('<div class=\"related\">')\n print(' <h3>Navigation</h3>')\n print(' <ul>')\n print(' <li><a href=\"./\">Back to home</a></li> ')\n print(' </ul>')\n print('</div>')\n\n print('<div class=\"body testsuiteNotes\">')\n print(' <h1>Testsuite Notes</h1>')\n\n suite.addReftests(\"printNotes\",\n MATHJAX_TESTSUITE_PATH, \"reftest.list\", -1)\n print('</div>')\n print('</body>')\n print('</html>')\n sys.stdout = stdout\n fp.close()\n\n print(\"done\")", "def _get_changelog_contents(ctx: Context, version: str):\n return ctx.run(\n \"towncrier\",\n \"build\",\n \"--draft\",\n f\"--version={version}\",\n capture=True,\n ).stdout.decode()", "def to_XML(self, targets):\n self.bug = et.Element(\"bug\")\n\n self.__add_subelement(\"creation_time\", \"created\",\n self.__format_time)\n self.__add_subelement(\"title\", \"summary\")\n self.__add_subelement(\"status\", \"status\",\n self.__convert_status)\n self.__add_subelement(\"reporter\", \"reporter\")\n self.__add_subelement(\"reporter\", \"creator\")\n # FIXME\n #self.__add_subelement(\"assignee\", \"assigned\")\n\n # BE will create UUIDs automatically if they are not present\n # in the XML (or if the -p flag is not specified). However,\n # we need the UUIDs to record relationships between bugs\n # and BE targets (corresponding to Ditz releases), so we \n # create our own here.\n bug_uuid = str(uuid.uuid4())\n et.SubElement(self.bug, \"uuid\").text = bug_uuid\n\n if self.desc is not None:\n self.bug.append(make_comment(self.desc,\n self.reporter,\n self.__format_time(\n self.creation_time)))\n\n if self.release is not None:\n if self.release not in targets:\n # There should already be an entry for the target\n # taken from the Ditz project.yaml file, but in case\n # this is missing for any reason we create it here\n # and assume a status of \"open\".\n target_uuid = str(uuid.uuid4())\n targets[self.release] = (target_uuid, \"open\", [])\n et.SubElement(self.bug, \"extra-string\").text = \\\n \"BLOCKS:\" + targets[self.release][0]\n targets[self.release][2].append(bug_uuid)\n\n for date, reporter, action, comment in self.log_events:\n if comment is not None and comment != \"\":\n self.bug.append(make_comment(comment,\n reporter,\n self.__format_time(\n date)))\n\n\n #for comment in get_comments(cnf['git_user'], cnf['git_password'],\n # cnf['repo'], iss[u\"number\"]):\n # self.bug.append(make_comment(comment[u\"body\"],\n # comment[u\"user\"][u\"login\"],\n # format_time(comment[u\"updated_at\"])))\n\n return self.bug", "def deploy(version):\n toolkit.readmegen(version)", "def create_changelog (component):\n vprint (\"Creating ChangeLog entry for \" + component)\n\n old_tag = get_tag (old_comp_versions, 'ACE')\n\n # Generate changelogs per component\n path = get_path(component, \"ChangeLogs\", component + \"-\" + comp_versions[component + \"_version_\"])\n ex (\"cd $DOC_ROOT/ACE_TAO && git log \" + old_tag + \"..HEAD \" + component + \" > \" + path)\n\n return [path]", "def _get_releaseinfo_str(version):\n opts = {}\n f = StringIO.StringIO()\n opts['version'] = version\n opts['date'] = get_git_log_info(\"%ci\")\n opts['comments'] = get_git_log_info(\"%b%+s%+N\")\n opts['commit'] = get_git_log_info(\"%H\")\n f.write(relfile_template % opts)\n return f.getvalue()", "def test_create_release(self):\n releases_before = self.hello_world_project.get_releases()\n latest_release = releases_before[0].tag_name\n count_before = len(releases_before)\n increased_release = \".\".join(\n [\n latest_release.rsplit(\".\", 1)[0],\n str(int(latest_release.rsplit(\".\", 1)[1]) + 1),\n ]\n )\n release = self.hello_world_project.create_release(\n tag=increased_release, name=\"test\", message=\"testing release\"\n )\n count_after = len(self.hello_world_project.get_releases())\n assert release.tag_name == increased_release\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after", "def test_preserveTicketHint(self):\n news = self.project.child('NEWS')\n news.setContent(\n 'Ticket numbers in this file can be looked up by visiting\\n'\n 'http://twistedmatrix.com/trac/ticket/<number>\\n'\n '\\n'\n 'Blah blah other stuff.\\n')\n\n self.builder.build(self.project, news, \"Super Awesometastic 32.16\")\n\n self.assertEquals(\n news.getContent(),\n 'Ticket numbers in this file can be looked up by visiting\\n'\n 'http://twistedmatrix.com/trac/ticket/<number>\\n'\n '\\n'\n 'Super Awesometastic 32.16\\n'\n '=========================\\n'\n '\\n'\n 'Features\\n'\n '--------\\n'\n ' - We now support the web. (#5)\\n'\n ' - The widget is more robust. (#12)\\n'\n ' - A very long feature which takes many words to describe with any\\n'\n ' accuracy was introduced so that the line wrapping behavior of the\\n'\n ' news generating code could be verified. (#15)\\n'\n ' - A simpler feature described on multiple lines was added. (#16)\\n'\n '\\n'\n 'Bugfixes\\n'\n '--------\\n'\n ' - Broken stuff was fixed. (#23)\\n'\n '\\n'\n 'Improved Documentation\\n'\n '----------------------\\n'\n ' - foo.bar.Baz.quux (#40)\\n'\n ' - writing Foo servers (#41)\\n'\n '\\n'\n 'Deprecations and Removals\\n'\n '-------------------------\\n'\n ' - Stupid stuff was deprecated. (#25)\\n'\n '\\n'\n 'Other\\n'\n '-----\\n'\n ' - #30, #35\\n'\n '\\n\\n'\n 'Blah blah other stuff.\\n')", "def make_release(self, **kwargs) -> CrossrefEventsRelease:\n\n start_date, end_date, first_release = self.get_release_info(**kwargs)\n\n release = CrossrefEventsRelease(\n self.dag_id, start_date, end_date, first_release, self.mailto, self.max_threads, self.max_processes\n )\n return release", "def gen_build_str_dec():\n\t#Get name of person building firmware\n\t#git config --get-all user.name\n\t#Get repo revision\n\t#git log | head -1 | cut -d \" \" -f 2\n\t#Get branch\n\t#git branch | grep \"\\*\" | cut -d \" \" -f 2\n\t#Get modified status\n\t#Date, time, gcc version (__VERSION__)\n\ts = \"Miniboard Firmware rev \"\n\treturn \"\"", "def create_release(ctx):\n # Get the head of master\n r = _get_repo()\n b = r.get_branch(branch=\"master\")\n head = b.commit\n\n faasm_ver = get_faasm_version()\n\n # Create a tag from the head\n tag_name = _tag_name(faasm_ver)\n r.create_git_tag(\n tag_name,\n \"Release {}\\n\".format(faasm_ver),\n head.sha,\n \"commit\",\n )\n\n r.create_git_release(\n tag_name,\n \"Faasm {}\".format(faasm_ver),\n \"Release {}\\n\".format(faasm_ver),\n draft=True\n )", "def release(c, bump=\"patch\"):\n assert bump in [f.name for f in fields(VersionStructure)], f'\"{bump}\" is not a version part'\n\n old_version = VersionStructure.from_settings()\n new_version = VersionStructure.bump_version(old_version, part=bump)\n\n # collecting changelog\n print(f'Collecting changelog from the last version tag \"{old_version}\"...')\n\n result = c.run(f'git log \"{old_version}\"..HEAD --pretty=format:\"%s\"', hide=\"out\")\n\n commit_messages = filter(bool, result.stdout.splitlines())\n if not commit_messages:\n sys.stderr.write(\"Error: no new commits from last version, sorry\\n\")\n sys.exit(1)\n\n # updating changelog\n with open(CHANGELOG_ABSPATH, \"r+\", encoding=\"utf-8\") as changelog:\n old_changelog = changelog.read().strip()\n\n changelog.seek(0)\n print(\"Inserting this to changelog file:\\n-----\\n\")\n new_version_line = (\n VERSION_TITLE_TMPL.format(version=new_version, day=today().strftime(\"%Y-%m-%d\")) + \"\\n\"\n )\n changelog.write(new_version_line)\n\n print(new_version_line)\n for line in sorted(commit_messages): # sort commit messages for easier edition afterwards\n line: str = line.strip()\n if line.startswith(\"Merge\"):\n continue\n\n message_line = CHANGE_LINE_TMPL.format(line=line) + \"\\n\"\n print(message_line)\n changelog.write(message_line)\n\n changelog.write(\"\\n\")\n changelog.write(old_changelog)\n\n print(\"-----\")\n\n _set_settings_version(c, SETTINGS_PATH, str(new_version))", "def get_template(update, use_template='fedora_errata_template'):\n from bodhi.models import UpdateStatus, UpdateType\n use_template = globals()[use_template]\n line = unicode('-' * 80) + '\\n'\n templates = []\n\n for build in update.builds:\n h = get_rpm_header(build.nvr)\n info = {}\n info['date'] = str(update.date_pushed)\n info['name'] = h['name']\n info['summary'] = h['summary']\n info['version'] = h['version']\n info['release'] = h['release']\n info['url'] = h['url']\n if update.status is UpdateStatus.testing:\n info['testing'] = ' Test'\n info['yum_repository'] = ' --enablerepo=updates-testing'\n else:\n info['testing'] = ''\n info['yum_repository'] = ''\n\n info['subject'] = u\"%s%s%s Update: %s\" % (\n update.type is UpdateType.security and '[SECURITY] ' or '',\n update.release.long_name, info['testing'], build.nvr)\n info['updateid'] = update.alias\n info['description'] = h['description']\n info['product'] = update.release.long_name\n info['notes'] = \"\"\n if update.notes and len(update.notes):\n info['notes'] = u\"Update Information:\\n\\n%s\\n\" % \\\n '\\n'.join(wrap(update.notes, width=80))\n info['notes'] += line\n\n # Add this updates referenced Bugzillas and CVEs\n i = 1\n info['references'] = \"\"\n if len(update.bugs) or len(update.cves):\n info['references'] = u\"References:\\n\\n\"\n parent = True in [bug.parent for bug in update.bugs]\n for bug in update.bugs:\n # Don't show any tracker bugs for security updates\n if update.type is UpdateType.security:\n # If there is a parent bug, don't show trackers\n if parent and not bug.parent:\n log.debug(\"Skipping tracker bug %s\" % bug)\n continue\n title = (bug.title != 'Unable to fetch title' and\n bug.title != 'Invalid bug number') and \\\n ' - %s' % bug.title or ''\n info['references'] += u\" [ %d ] Bug #%d%s\\n %s\\n\" % \\\n (i, bug.bug_id, title, bug.url)\n i += 1\n for cve in update.cves:\n info['references'] += u\" [ %d ] %s\\n %s\\n\" % \\\n (i, cve.cve_id, cve.url)\n i += 1\n info['references'] += line\n\n # Find the most recent update for this package, other than this one\n lastpkg = build.get_latest()\n #log.debug(\"lastpkg = %s\" % lastpkg)\n\n # Grab the RPM header of the previous update, and generate a ChangeLog\n info['changelog'] = u\"\"\n if lastpkg:\n oldh = get_rpm_header(lastpkg)\n oldtime = oldh['changelogtime']\n text = oldh['changelogtext']\n del oldh\n if not text:\n oldtime = 0\n elif len(text) != 1:\n oldtime = oldtime[0]\n info['changelog'] = u\"ChangeLog:\\n\\n%s%s\" % \\\n (to_unicode(build.get_changelog(oldtime)), line)\n\n try:\n templates.append((info['subject'], use_template % info))\n except UnicodeDecodeError:\n # We can't trust the strings we get from RPM\n log.debug(\"UnicodeDecodeError! Will try again after decoding\")\n for (key, value) in info.items():\n if value:\n info[key] = to_unicode(value)\n templates.append((info['subject'], use_template % info))\n\n return templates", "async def test_release_notes(doof, repo_info, event_loop, mocker):\n old_version = \"0.1.2\"\n update_version_mock = mocker.patch('bot.update_version', autospec=True, return_value=old_version)\n notes = \"some notes\"\n create_release_notes_mock = mocker.patch('bot.create_release_notes', autospec=True, return_value=notes)\n\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['release', 'notes'],\n loop=event_loop,\n )\n\n update_version_mock.assert_called_once_with(\"9.9.9\")\n create_release_notes_mock.assert_called_once_with(old_version, with_checkboxes=False)\n\n assert doof.said(\"Release notes since {}\".format(old_version))\n assert doof.said(notes)", "def _create_tag_message(commits: List[git.objects.commit.Commit],\n tag: semantic_version.Version) -> str:\n\n tag_message = 'Release {} \\n\\n'.format(str(tag))\n\n for message in [c.message for c in commits]:\n tag_message += ' * {}\\n'.format(message.split('\\n')[0].strip())\n return tag_message", "def notes_file_for_version(self, version: Version) -> str:\n branch_name = self._branch_name(version)\n notes_file = self._release_notes.get(branch_name)\n if notes_file is None:\n raise ValueError(\n f\"Version {version} lives in branch {branch_name}, which is not configured in \"\n f\"{self._release_notes}.\"\n )\n return notes_file", "def __init__(__self__,\n resource_name: str,\n args: ReleaseArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "async def cmd_bugger(self, args: Args, src: Src, **_):\n if self.config.get(\"trello\") is None:\n raise CommandOperationError(\n \"Sorry, the bot maintainer has not enabled Trello bug reports.\"\n )\n try:\n url = f\"https://api.trello.com/1/lists/{self.config.get('trello/list_id')}/cards\"\n params = {\n \"key\": self.config.get(\"trello/app_key\"),\n \"token\": self.config.get(\"trello/token\"),\n }\n response = requests.request(\"GET\", url, params=params)\n\n except KeyError:\n raise CommandOperationError(\n \"The Trello keys are misconfigured, check your config file\"\n )\n\n if not response:\n raise CommandOperationError(\n \"Could not get cards for the list ID provided. Talk to your bot\"\n \" owner.\"\n )\n\n ticketnumber = str(\n max(\n (\n int(card[\"name\"])\n for card in (response.json())\n if card[\"name\"].isnumeric()\n )\n )\n + 1\n )\n\n params.update(\n {\n \"name\": ticketnumber.zfill(3),\n \"desc\": (\n \"{message}\\n\\n\\n\\n\\n\"\n \"Submitted by: {author.name} ({author.id})\\n\"\n \"Timestamp: {time}\\n\"\n \"Guild: {guild.name} ({guild.id})\\n\"\n \"Channel: {channel.name} ({channel.id})\".format(\n message=\" \".join(args),\n author=src.author,\n channel=src.channel,\n guild=src.guild,\n time=dt.utcnow(),\n )\n ),\n \"pos\": \"bottom\",\n \"idList\": self.config.get(\"trello/list_id\"),\n \"username\": self.config.get(\"trello/username\"),\n }\n )\n\n response = requests.request(\n \"POST\", \"https://api.trello.com/1/cards\", params=params\n )\n\n if not response:\n raise CommandOperationError(\n \"Could not create bug report. Talk to your bot owner.\"\n )\n\n return f\"Created bug report with ID `{ticketnumber}`\"", "def write_debversion(self, latest_branch_version, src_path):\n kube_git_version_fn = src_path / \"DEBVERSION\"\n kube_git_version = textwrap.dedent(\n \"\"\"KUBE_GIT_TREE_STATE=archive\n KUBE_GIT_VERSION={}\n KUBE_GIT_MAJOR={}\n KUBE_GIT_MINOR={}\n \"\"\".format(\n f\"v{str(latest_branch_version)}\",\n latest_branch_version.major,\n latest_branch_version.minor,\n )\n )\n kube_git_version_fn.write_text(kube_git_version)", "def get_msg() -> None:\n\n changelog = [\n \"\"\"\n version 0.0.2\n -- Jan 20, 2019 --\n * Huge Bug fixed with gradually increasing nT\n * Reorganize utils.py\n \"\"\",\n\n \"\"\"\n version 0.0.3\n -- Jan 21, 2019 --\n * Adding test of convergece\n \"\"\",\n\n \"\"\"\n version 0.0.3.1\n -- Jan 23, 2019 --\n * Roll back to x_0 = 1\n \"\"\",\n\n \"\"\"\n version 0.0.3\n -- Jan 21, 2019 --\n * Roll back 0.0.3\n \"\"\",\n\n \"\"\"\n version 0.0.3.2\n -- Jan 26, 2019 --\n * Adding outputs for test_convergence()\n \"\"\",\n\n \"\"\"\n version 0.0.3.3\n -- Jan 30, 2019 --\n * use last 20 time step for test_convergence()\n \"\"\",\n\n \"\"\"\n version 0.0.3.4\n -- Jan 31, 2019 --\n * use 0.1 as initial values for alpha and eps variable\n \"\"\",\n\n \"\"\"\n version 0.0.4\n -- Feb 11, 2019 --\n * Roll back 0.0.3.3\n * Add constraints on direct regulation from drug nodes to phenotypic nodes\n \"\"\",\n\n \"\"\"\n version 0.0.5\n -- Feb 21, 2019 --\n * Add function to normalize mse loss to different nodes.\n \"\"\",\n\n \"\"\"\n version 0.1.0\n -- Aug 21, 2019 --\n * Re-structure codes for publish.\n \"\"\",\n\n \"\"\"\n version 0.1.1\n -- Oct 4, 2019 --\n * Add new kinetics\n * Add new ODE solvers\n * Add new envelop forms\n \"\"\",\n\n \"\"\" \n version 0.2.0\n -- Feb 26, 2020 --\n * Add support of matrix operation rather than function mapping\n * Roughly 5x faster\n \"\"\",\n\n \"\"\" \n version 0.2.1\n -- Apr 5, 2020 --\n * Reformat for better code style\n * Revise docs\n \"\"\",\n\n \"\"\"\n version 0.2.2\n -- Apr 23, 2020 --\n * Add support to tf.Datasets\n * Add support to tf.sparse\n * Prepare for sparse single-cell data\n \"\"\",\n\n \"\"\" \n version 0.2.3\n -- Jun 8, 2020 --\n * Add support to L2 loss (alone or together with L1, i.e. elastic net)\n * Clean the example configs folder\n \"\"\",\n\n \"\"\"\n version 0.3.0\n -- Jun 8, 2020 --\n Add support for alternative form of perturbation\n * Previous: add u on activity nodes\n * New: fix activity nodes directly\n - 1) changing the x_0 from zeros to u\n - 2) adding mask on dxdt\n - 3) the previous format should work fine due to numpy broadcast\n * Revised printing log\n \"\"\",\n\n \"\"\"\n version 0.3.1\n -- Sep 25, 2020 --\n * Release version for publication\n * Add documentation \n * Rename package to 'cellbox' \n \"\"\",\n \n \"\"\"\n version 0.3.2\n -- Feb 10, 2023 --\n * Modify CellBox to support TF2 \n \"\"\"\n ]\n print(\n \"=\" * 80 + '\\n'\n \" _____ _ _ ____ \\n\"\n \" / ____| | | | _ \\ \\n\"\n \" | | ___| | | |_) | _____ __ \\n\"\n \" | | / _ \\ | | _ < / _ \\ \\/ / \\n\"\n \" | |___| __/ | | |_) | (_) > < \\n\"\n \" \\_____\\___|_|_|____/ \\___/_/\\_\\ \\n\"\n \"Running CellBox scripts developed in Sander lab\\n\"\n \"Maintained by Bo Yuan, Judy Shen, and Augustin Luna; contributions by Daniel Ritter\"\n )\n\n print(changelog[-1])\n print(\n \"Tutorials and documentations are available at https://github.com/sanderlab/CellBox\\n\"\n \"If you want to discuss the usage or to report a bug, please use the 'Issues' function at GitHub.\\n\"\n \"If you find CellBox useful for your research, please consider citing the corresponding publication.\\n\"\n \"For more information, please email us at [email protected] and [email protected], \"\n \"[email protected]\\n\",\n \"-\" * 80\n )", "def prepare_ticket(self, req, ticket, fields, actions):", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def package_software(self, version: str) -> None:\n logger.info(f\"Generating a release package [{version}]\")\n pass", "def release_notes(self):\n return self._release_notes", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def _get_description(actor, commit, run_id):\n return (\"Requested by @%s on commit %s\\n\" % (actor, commit) +\n \"Last updated: %s \\n\" % _get_datetime() +\n \"**[View integration test log & download artifacts](https://github.com/firebase/firebase-cpp-sdk/actions/runs/%s)**\\n\" % run_id)", "def bugreport(app):\n ...", "def about():\n# return about string\n about = (\"\"\"\nJADM %s\n-------------\ncreator: Nikolay Georgiev Dachev, <[email protected]>\nsupport: [email protected] (only for bugs report and jadm issues)\n\nJadm is FreeBSD jail administration framework with jail.conf, vnet and zfs support.\n\n---------------- JADM is BSD 3-Clause Licensed ---------------------\n\nCopyright (c) <2014>, <Nikolay Georgiev Dachev> <[email protected]>\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distr\n\n3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\nINCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\nIN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\nOR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\") % (jadm_version)\n return about", "def to_release_brach_name(self) -> str:\n return f\"release/{self.major}.{self.minor}\"", "def create_release(\n repository, tag, release_version, changelog_url, *, additional_assets=None\n):\n api_key = os.environ.get(\"GITHUB_API_KEY\")\n if api_key is None:\n api_key = getpass(\"API key: \")\n\n git = Github(api_key)\n\n repo = git.get_repo(repository.path.strip(\"/\"))\n\n release = repo.create_git_release(\n tag,\n f\"Version {release_version}\",\n f\"See {changelog_url}\",\n draft=True,\n )\n\n for asset in [] if additional_assets is None else additional_assets:\n release.upload_asset(asset, label=asset)\n\n return release", "def display_for_triage(bugs):\n # bug title is like:\n # '\n # Bug #1724025 in openstack-ansible:\n # invalid regular expression...\"\n # '\n for bug in bugs:\n bug_name = u\"\".join(bug.title.split(\":\")[1:])\n print(u\"#link {link}\\n\\t{name}\".format(link=bug.web_link, name=bug_name))", "def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()", "def create_releases(name):\n data = get_object(name)\n if not data:\n return None\n\n data = copy.deepcopy(data)\n\n # pop out the works\n works = data.pop('work_version', None)\n\n # create a dictionary of the object parameters with release as the key\n dd = {}\n dd[data.get('release', 'DR15')] = data\n\n if works:\n # add any other work objects found\n for work in works:\n work_object = create_work_version(work, data)\n dd[f\"WORK-{work_object['version_info']['number']}\"] = work_object\n\n # expand the path envvars\n for k, v in dd.items():\n release = 'SDSSWORK' if 'WORK' in k else k\n tree.replant_tree(release.lower())\n dd[k]['path'] = os.path.expandvars(v.get('path', ''))\n\n return dd", "def getnotes():", "def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n if self._config.get_boolean('releasable', False):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % project)\r\n data['name'] = project.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = project.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + project + \" because the 'baseline.release' property is missing.\")\r\n return data", "def __create_ticket(user, subject, description, topic):\n\n target = settings.SLACK_TARGET_TFED\n if topic == 'Database':\n target = settings.SLACK_TARGET_TFED_DB\n user_email = user['user']['profile'].get('email', '[email protected]')\n display_name = user['user']['profile']['real_name']\n resp = rt_api.create_ticket(topic, user_email, subject, description + \"\\n\\n- \" + display_name)\n ticket_id = resp.get('id', None)\n if ticket_id:\n ticket_info = {\n \"url\": 'https://lnl-rt.wpi.edu/rt/Ticket/Display.html?id=' + ticket_id,\n \"id\": ticket_id,\n \"subject\": subject,\n \"description\": description,\n \"status\": \"New\",\n \"assignee\": None,\n \"reporter\": user['user']['name']\n }\n ticket = views.tfed_ticket(ticket_info)\n slack_post(target, text=description, content=ticket, username='Request Tracker')\n return\n error_message = \"Whoops! It appears something went wrong while attempting to submit your request. \" \\\n \"Please wait a few minutes then try again. If the problem persists, please email \" \\\n \"us directly at [email protected].\"\n post_ephemeral(target, error_message, user['user']['id'], username=\"Request Tracker\")", "def changelog_entries():\n changelog_entries = comments or []\n for o in options or self._DEFAULT_PORT_OPTIONS:\n changelog_entries.append(\"{keyword}: {option}\".format(keyword=mini_buildd.changes.Changes.Options.KEYWORD, option=o))\n return changelog_entries", "def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)", "def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(context, exec_cmd=\"git checkout main\", pty=False, error_message=\"Failed to checkout main!\")\n\n run_cmd(context, exec_cmd=\"git pull origin main\", pty=False, error_message=\"Failed to pull from origin/main\")\n\n run_cmd(\n context, exec_cmd=f\"git tag v{IMAGE_VER}\", pty=False, error_message=f\"Failed to create the tag 'v{IMAGE_VER}'!\"\n )\n\n run_cmd(context, exec_cmd=\"git push --tags\", pty=False, error_message=f\"Failed to push the tag 'v{IMAGE_VER}'!\")", "def update_release_notes(\n provider_package_id: str,\n version_suffix: str,\n force: bool,\n verbose: bool,\n answer: str | None,\n base_branch: str,\n regenerate_missing_docs: bool,\n) -> bool:\n verify_provider_package(provider_package_id)\n proceed, latest_change, changes = get_all_changes_for_package(\n provider_package_id, verbose, base_branch, force\n )\n if not force:\n if proceed:\n if not confirm(\"Provider marked for release. Proceed\", answer=answer):\n return False\n elif not latest_change:\n console.print()\n console.print(\n f\"[yellow]Provider: {provider_package_id} - skipping documentation generation. No changes![/]\"\n )\n console.print()\n return False\n else:\n type_of_change = get_type_of_changes(answer=answer)\n if type_of_change == TypeOfChange.DOCUMENTATION:\n if isinstance(latest_change, Change):\n mark_latest_changes_as_documentation_only(provider_package_id, latest_change)\n else:\n raise ValueError(\n \"Expected only one change to be present to mark changes \"\n f\"in provider {provider_package_id} as docs-only. \"\n f\"Received {len(latest_change)}.\"\n )\n elif type_of_change == TypeOfChange.SKIP:\n return False\n elif type_of_change in [TypeOfChange.BUGFIX, TypeOfChange.FEATURE, TypeOfChange.BREAKING_CHANGE]:\n add_new_version(type_of_change, provider_package_id)\n proceed, latest_change, changes = get_all_changes_for_package(\n provider_package_id, verbose, base_branch, force\n )\n provider_details = get_provider_details(provider_package_id)\n provider_info = get_provider_info_from_provider_yaml(provider_package_id)\n jinja_context = get_provider_jinja_context(\n provider_info=provider_info,\n provider_details=provider_details,\n current_release_version=provider_details.versions[0],\n version_suffix=version_suffix,\n )\n jinja_context[\"DETAILED_CHANGES_RST\"] = changes\n jinja_context[\"DETAILED_CHANGES_PRESENT\"] = bool(changes)\n update_changelog_rst(\n jinja_context,\n provider_package_id,\n provider_details.documentation_provider_package_path,\n regenerate_missing_docs,\n )\n update_security_rst(\n jinja_context,\n provider_package_id,\n provider_details.documentation_provider_package_path,\n regenerate_missing_docs,\n )\n if not force:\n update_commits_rst(\n jinja_context,\n provider_package_id,\n provider_details.documentation_provider_package_path,\n regenerate_missing_docs,\n )\n return True", "def package(notes, version):\n print('creating tarball')\n archive_name = '{}.tgz'.format(version.replace('.', '-'))\n run('tar --exclude=\".git\"'\n ' --exclude=\"{}/wp-content/themes/tmp\"'\n ' --exclude=\"{}/wp-content/uploads\" -zcf ../tmp/{} *'\n .format(DOC_ROOT, DOC_ROOT, archive_name))\n os.chdir(previous_dir)\n return archive_name", "def create_deb_for_target(ctx, target=target):\n pass", "def run(self):\n env = cast(\"BuildEnvironment\", self.state.document.settings.env)\n foot_old_refs = env.temp_data.setdefault(\"bibtex_foot_old_refs\", set())\n foot_new_refs = env.temp_data.setdefault(\"bibtex_foot_new_refs\", set())\n footbibliography_count = env.temp_data[\"bibtex_footbibliography_count\"] = (\n env.temp_data.get(\"bibtex_footbibliography_count\", 0) + 1\n )\n if not foot_new_refs:\n return []\n else:\n foot_old_refs |= foot_new_refs\n foot_new_refs.clear()\n # bibliography stored in env.temp_data[\"bibtex_foot_bibliography\"]\n foot_domain = cast(\"BibtexFootDomain\", env.get_domain(\"footcite\"))\n foot_bibliography, env.temp_data[\"bibtex_foot_bibliography\"] = (\n env.temp_data[\"bibtex_foot_bibliography\"],\n foot_domain.bibliography_header.deepcopy(),\n )\n domain = cast(\"BibtexDomain\", env.get_domain(\"cite\"))\n for bibfile in domain.bibdata.bibfiles:\n env.note_dependency(bibfile)\n foot_bibliography[\"ids\"] += _make_ids(\n docname=env.docname,\n lineno=self.lineno,\n ids=set(self.state.document.ids.keys()),\n raw_id=env.app.config.bibtex_footbibliography_id.format(\n footbibliography_count=footbibliography_count\n ),\n )\n self.state.document.note_explicit_target(\n foot_bibliography, foot_bibliography\n )\n return [foot_bibliography]", "def run_build_pipeline_november_comments():\n build_pipeline_november_comments('politics', 100)", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def run(self):\n report_info = self.api_client.create_task(self.host_id,\n CompatibilityReport.Spec(self.targetRelease))\n print(\"Compatibility Report API Task ID : \", report_info.get_task_id())", "def debian_description(self):\n text = [\"Python package\", self.python_name, \"converted by py2deb on\"]\n # The %e directive (not documented in the Python standard library but\n # definitely available on Linux which is the only platform that py2deb\n # targets, for obvious reasons :-) includes a leading space for single\n # digit day-of-month numbers. I don't like that, fixed width fields are\n # an artefact of 30 years ago and have no place in my software\n # (generally speaking :-). This explains the split/compact duo.\n text.extend(time.strftime('%B %e, %Y at %H:%M').split())\n return ' '.join(text)", "def create_tag(path, name, version, notes, test=False):\n\n tag_name = \"{}-{}\".format(name, version)\n tag_contents = \"Release %s for %s\\n\\n%s\" % (version, name, notes)\n\n if test:\n tag_name = \"test@\" + tag_name\n tag_contents = \"Test \" + tag_contents\n\n print(\"Creating annotated release tag: %s\" % tag_name)\n run_in_component(path, ['git', 'tag', '-a', '-F', '-', tag_name], stdin=tag_contents)", "def createChangeLog(outputType: str = 'json', isExposeEmail: bool = False, remSignedOff = True, isExtended: bool = False):\r\n tags = {}\r\n commits = {}\r\n references = {}\r\n out = \"\"\r\n print(\"Starting...\")\r\n if isWin:\r\n hasTags = Popen('git for-each-ref --sort=\"*authordate\" --format=\"%(refname:short)\" refs/tags', shell=True, stdout=PIPE).stdout.read().decode()\r\n else:\r\n hasTags = Popen('git for-each-ref --sort=\"*authordate\" --format=\"%(refname:short)\" refs/tags | grep -v \"^$\"#', shell=True, stdout=PIPE).stdout.read().decode()\r\n\r\n if hasTags.strip() == '':\r\n hasTags = None\r\n if hasTags is not None:\r\n hasTags = hasTags.strip().split('\\n')\r\n print(\"Found: \" + str(len(hasTags)) + \" tag(s).\")\r\n\r\n for i in hasTags:\r\n tags[len(tags)] = i\r\n tags = collections.OrderedDict(reversed(sorted(tags.items())))\r\n\r\n if len(tags) > 0:\r\n\r\n for key, tag in tags.items():\r\n if key - 1 < 0:\r\n search = '\"' + tag + '\"'\r\n else:\r\n search = '\"' + tags[key - 1] + '\"..\"' + tag + '\"'\r\n\r\n\r\n if isWin:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" ' + search + ' | findstr /v /C:\"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n else:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" ' + search + ' | grep -v \"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n partialCommit = partialCommit.strip().split(',|;,')\r\n k = 0\r\n for i in partialCommit:\r\n i = i.strip()\r\n if not i or i.strip().strip('\\n') == '':\r\n continue\r\n\r\n if remSignedOff is True:\r\n i = i.split(\"Signed-off-by:\")\r\n i = i[0].strip().strip('\\n')\r\n\r\n i = i.split(',;|,')\r\n if len(i) <= 3:\r\n continue\r\n if not isExposeEmail:\r\n i[3] = None\r\n else:\r\n references[i[2]] = i[3]\r\n if not tag in commits:\r\n commits[tag] = {}\r\n out += \"\\n####Version \" + tag.strip('v') + \" (\" + datetime.datetime.fromtimestamp(int(i[5])).strftime('%d.%m.%Y') + \")\\n\"\r\n if isExtended >= 3:\r\n comment = \"* [\" + i[0] + \"](../../commit/\" + i[0] + \") - [[\" + i[2] + \"]]: \" + i[6] + \"\\n\"\r\n commits[tag][\"commit_h\"] = i[1]\r\n commits[tag][\"by\"] = i[2]\r\n commits[tag][\"date\"] = i[4]\r\n commits[tag][\"date_unix\"] = i[5]\r\n elif isExtended == 2:\r\n comment = \"* **\" + i[0] + \"**: \" + i[6] + \"\\n\"\r\n commits[tag][\"commit_h\"] = i[1]\r\n else:\r\n comment = \"* \" + i[6] + \"\\n\"\r\n commits[tag][int(k)] = {\r\n 'commit': i[0],\r\n 'email': i[3],\r\n 'comment': i[6]\r\n }\r\n for r in needToBold:\r\n if r in comment:\r\n comment = comment.replace(r, '**' + r + '**')\r\n out += comment\r\n k += 1\r\n else:\r\n print(\"Error on tags\")\r\n exit(2)\r\n else:\r\n print(\"No Tags found. Switching to commit mode...\")\r\n if isWin:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" | findstr /v /C:\"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n else:\r\n partialCommit = Popen('git log --pretty=format:\" %h,;|,%H,;|,%cn,;|,%ce,;|,%cD,;|,%ct,;|,%s%n%n%-b,|;,\" | grep -v \"Merge branch\"', shell=True, stdout=PIPE).stdout.read().decode()\r\n\r\n partialCommit = partialCommit.strip().split(',|;,')\r\n k = 0\r\n for i in partialCommit:\r\n i = i.strip()\r\n if not i or i.strip().strip('\\n') == '':\r\n continue\r\n\r\n if remSignedOff is True:\r\n i = i.split(\"Signed-off-by:\")\r\n i = i[0].strip().strip('\\n')\r\n\r\n i = i.split(',;|,')\r\n if not isExposeEmail:\r\n i[3] = None\r\n else:\r\n references[i[2]] = i[3]\r\n\r\n print(\"COMMIT: \" + i[0])\r\n out += \"\\n####\" + i[0] + \" (\" + datetime.datetime.fromtimestamp(int(i[5])).strftime('%d.%m.%Y') + \")\\n\"\r\n\r\n commits[i[0]] = {\r\n 'commit': i[0],\r\n 'email': i[3],\r\n 'comment': i[6]\r\n }\r\n if isExtended >= 3:\r\n comment = \"* [\" + i[0] + \"](../../commit/\" + i[0] + \") - [[\" + i[2] + \"]]: \" + i[6] + \"\\n\"\r\n commits[i[0]][\"commit_h\"] = i[1]\r\n commits[i[0]][\"by\"] = i[2]\r\n commits[i[0]][\"date\"] = i[4]\r\n commits[i[0]][\"date_unix\"] = i[5]\r\n elif isExtended == 2:\r\n comment = \"* **\" + i[0] + \"**: \" + i[6] + \"\\n\"\r\n\r\n commits[i[0]][\"commit_h\"] = i[1]\r\n else:\r\n comment = \"* \" + i[6] + \"\\n\"\r\n\r\n for r in needToBold:\r\n if r in comment:\r\n comment = comment.replace(r, '**' + r + '**')\r\n out += comment\r\n k += 1\r\n js = None\r\n if outputType == 'json':\r\n js = json.dumps(commits, indent=4, separators=(',', ': '))\r\n #sort_keys=True,\r\n else:\r\n out += \"\\n\\n\"\r\n if isExtended >= 3:\r\n for k, v in references.items():\r\n out += \"[\" + k + \"]:mailto://\" + v + \"\\n\"\r\n\r\n filename = \"CHANGELOG.md\"\r\n if js is not None:\r\n filename = \"version.json\"\r\n out = js\r\n\r\n file = codecs.open(filename, \"w\", \"utf-8\")\r\n file.write(out)\r\n file.close()\r\n print('Done.')", "def print_push_info(ctx, patches, sha1s, ticket_numbers, tickets):\n remote = ctx.config['remote']\n branches = sha1s.keys()\n\n ctx.push_info = {}\n pagure_log = []\n bugzilla_log = ['Fixed upstream']\n for branch in branches:\n pagure_log.append('%s:\\n' % branch) # we need extra newline for pagure\n bugzilla_log.append('%s:' % branch)\n log_result = ctx.runprocess(\n ['git', 'log', '--graph', '--oneline', '--abbrev=99',\n '--color=never', '%s/%s..%s' % (remote, branch, sha1s[branch])])\n pagure_log.extend(\n line.rstrip()\n for line in reversed(log_result.stdout.splitlines()))\n pagure_log.append('\\n') # add newline to fix github/pagure formatting\n\n log_result = ctx.runprocess(\n ['git', 'log', '--pretty=format:%H',\n '%s/%s..%s' % (remote, branch, sha1s[branch])])\n bugzilla_log.extend(\n ctx.config['commit-url'] + line.strip()\n for line in reversed(log_result.stdout.splitlines()))\n\n bugzilla_urls = []\n bugzilla_re = re.compile('(%s\\d+)' %\n re.escape(ctx.config['bugzilla-bug-url']))\n jira_urls = []\n jira_re = re.compile('(%s\\d+)' % re.escape(ctx.config['jira-ticket-url']))\n\n for ticket in tickets:\n if ticket.rhbz:\n for match in bugzilla_re.finditer(ticket.rhbz):\n bugzilla_urls.append(match.group(0))\n for match in jira_re.finditer(ticket.rhbz):\n jira_urls.append(match.group(0))\n\n for branch in branches:\n print(ctx.term.cyan('=== Diffstat for %s ===' % branch))\n log_result = ctx.runprocess(\n ['git', 'diff', '--stat', '--color=%s' % ctx.color_arg,\n '%s/%s..%s' % (remote, branch, sha1s[branch])],\n verbosity=2)\n print(ctx.term.cyan('=== Log for %s ===' % branch))\n log_result = ctx.runprocess(\n ['git', 'log', '--reverse', '--color=%s' % ctx.color_arg,\n '%s/%s..%s' % (remote, branch, sha1s[branch])],\n verbosity=2)\n\n print(ctx.term.cyan('=== Patches pushed ==='))\n for patch in patches:\n print(patch.filename)\n\n print(ctx.term.cyan('=== Mail summary ==='))\n if len(branches) == 1:\n print('Pushed to ', end='')\n else:\n print('Pushed to:')\n for branch in branches:\n print('%s: %s' % (branch, sha1s[branch]))\n\n print(ctx.term.cyan('=== Ticket comment ==='))\n pagure_msg = '\\n'.join(pagure_log)\n print(pagure_msg)\n ctx.push_info['pagure_comment'] = pagure_msg\n\n print(ctx.term.cyan('=== Bugzilla/JIRA comment ==='))\n bugzilla_msg = '\\n'.join(bugzilla_log)\n print(bugzilla_msg)\n ctx.push_info['bugzilla_comment'] = bugzilla_msg\n\n if ticket_numbers:\n print(ctx.term.cyan('=== Tickets fixed ==='))\n for number in sorted(ticket_numbers):\n print('%s%s' % (ctx.config['ticket-url'], number))\n\n if bugzilla_urls:\n print(ctx.term.cyan('=== Bugzillas fixed ==='))\n print('\\n'.join(bugzilla_urls))\n \n if jira_urls:\n print(ctx.term.cyan('=== Jira tickets fixed ==='))\n print('\\n'.join(jira_urls))\n\n print(ctx.term.cyan('=== Ready to push ==='))", "def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)", "def ticket_created(self, ticket):", "def test_unreleased_version_label_string(self):\n\n gitchangelog.file_put_contents(\n \".gitchangelog.rc\",\n \"unreleased_version_label = 'bar'\")\n changelog = w('$tprog \"HEAD^..HEAD\"')\n self.assertNoDiff(\n textwrap.dedent(\"\"\"\\\n bar\n ---\n\n New\n ~~~\n - Begin. [Bob]\n\n\n \"\"\"),\n changelog)", "def generate_pr_link(pr_num):\n return (\n '[PR #{0}](https://github.com/sendgrid/smtpapi-python/pulls/{0})'\n ).format(pr_num)", "def test_issue_create_comment(self):\n pass", "def construct_email_content(self):\n # Construct header of the message\n content = MAIL_HEAD_CONTENT.replace(\"TITLE_HOLDER\", self.title).replace('FAIL_JOB_HOLDER',\n self.fail_job_content).replace(\n \"TIME_HOLDER\", os.getenv(\"START_TIME\")).replace(\"GRAPH_HOLDER\", os.getenv(\"BENCHMARK_GRAPH\")).replace(\n \"JOB_HOLDER\", os.getenv(\"BENCHMARK_TYPE\")).replace(\"DEVICE_HOLDER\", os.getenv(\"DEVICE_TYPE\")).replace(\"CUDA_HOLDER\", os.getenv(\"VERSION_CUDA\")).replace('DISPLAY', self.job_display)\n\n if not self.alarm_info:\n return\n # Construct alarm content\n content += self.alarm_info\n # Construct the tail of the message\n content += MAIL_TAIL_CONTENT.replace(\"BENCHMARK_WEBSITE1\", os.getenv(\"BENCHMARK_WEBSITE1\", \"\")).strip().replace(\n 'RUN_ENV_HOLDER', self.env_content).replace(\"BENCHMARK_WEBSITE2\", os.getenv(\"BENCHMARK_WEBSITE2\"))\n\n with open(os.path.join(self.log_path, \"mail.html\"), \"w\") as f_object:\n f_object.write(content)", "def Name(self):\n return 'Release Authorised SBL Security Settlement STP Hook'", "def generateNotifyMessage(self):\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n today = date.today()\n current_date = today.strftime(\"%B %d, %Y\")\n\n subject = \"Progam operating warning - Not Running\"\n body = \"Since \" + current_date + \" at \" + current_time \n msg = f'Subject: {subject} \\n\\n{body}'\n return msg", "def changelog(count, name):\n for n in name:\n print(f\"\\n \\033[1m{n} changelog\\033[0m\")\n print(head(rpm(\"-q\", \"--changelog\", n), \"-n\", count))", "def PublishIt(name, path, comments, task=os.getenv('TASK'), status=\"WORK IN PROGRESS\"):\n\n db = get_connection()\n\n PubCollections = db['submissions']\n\n # creation of the dailies submission entry\n publishDict = dict()\n publishDict['date'] = now\n publishDict['type'] = \"publish\"\n publishDict['user_name'] = main_user\n publishDict['task'] = task\n publishDict['status'] = status\n publishDict['asset'] = name\n publishDict['path'] = path\n publishDict['comment'] = comments\n PubCollections.save(publishDict)\n notifications.push_notifications({\"name\": main_user, \"email\": os.getenv('USER_EMAIL')}, users_list, \"publish\", shot, now)", "def releases(releaser, count):\n releases = sorted(\n releaser.get_releases().values(),\n key=lambda rel: rel[\"end_timestamp\"],\n reverse=True,\n )\n click.echo(f\"Latest {count} releases:\")\n for release in releases[:count]:\n click.echo(f'{release[\"end_timestamp\"]} {release[\"commit\"]}')", "def notes_setup(self):\n pass", "def __init__(self,\n project_id='issue-label-bot-dev',\n topic_name='event_queue',\n subscription_name='subscription_for_event_queue',\n embedding_api_endpoint='https://embeddings.gh-issue-labeler.com/text'):\n # TODO(chunhsiang): change the embedding microservice to be an internal DNS of k8s service.\n # see: https://v1-12.docs.kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services\n self.project_id = project_id\n self.topic_name = topic_name\n self.subscription_name = subscription_name\n self.embedding_api_endpoint = embedding_api_endpoint\n self.embedding_api_key = os.environ['GH_ISSUE_API_KEY']\n self.app_url = os.environ['APP_URL']\n\n # init GitHub app\n github_init()\n # init pubsub subscription\n self.create_subscription_if_not_exists()", "def makeReleaseFileName(cls, version: str) -> str:\n\n from peek_platform import PeekPlatformConfig\n\n return os.path.join(\n PeekPlatformConfig.config.platformSoftwarePath,\n 'peek-release-%s.tar.gz' % version)", "def _generate_changelog_for_form(form: PostMetaForm) -> List[str]:\n changelog = []\n changed_fields = form.changed_data.copy()\n if \"wp_id\" in changed_fields:\n changed_fields.remove(\"wp_id\")\n\n __ = lambda form, field: (\n \", \".join([str(i) for i in form.initial.get(field)]),\n \", \".join([str(i) for i in form.cleaned_data.get(field)]),\n )\n _ = lambda form, field: (form.initial.get(field), form.cleaned_data.get(field))\n for changed_field in changed_fields:\n log = None\n\n if changed_field == \"issues\":\n log = '* выпуски сменились с \"{0}\" на \"{1}\"'.format(\n *__(form, changed_field)\n )\n elif changed_field == \"editor\":\n # Initial ForeignKey value is stored as int. Populate it\n args = _(form, changed_field)\n init_editor = str(User.objects.get(id=args[0]))\n new_args = (init_editor, args[1])\n log = '* редактор cменился с \"{0}\" на \"{1}\"'.format(*new_args)\n elif changed_field == \"finished_at\":\n log = '* дедлайн этапа cменился с \"{0}\" на \"{1}\"'.format(\n *_(form, changed_field)\n )\n elif changed_field == \"published_at\":\n log = '* дата публикации сменилась с \"{0}\" на \"{1}\"'.format(\n *_(form, changed_field)\n )\n\n if log:\n changelog.append(log)\n\n return changelog", "def process_release(vb, options):\n if options.release_type:\n vb.set_release(type=options.release_type)\n\n if options.release_stack:\n vb.set_release(stack=options.release_stack)\n\n if options.release_version:\n vb.set_release(version=options.release_version)\n\n if options.release_build:\n vb.set_release(build=options.release_build)\n\n if options.release_compatible:\n vb.set_release(compatible=options.release_compatible)\n\n if options.release_notes:\n vb.set_release(notes=options.release_notes)\n\n if options.release_display:\n vb.set_release(display=options.release_display)\n\n if options.release_package_version:\n vb.set_release(package_version=options.release_package_version)", "def deb(ctx):\n pass", "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "def parse_buginfo(entry):\n bugname = entry['bug']['name'].replace(' ','').replace('/','_')\n bugsrc = entry['ent']['policyContentLocation']\n bugpattern = entry['bug']['pattern']\n try :\n bugaffiliation = entry['bug']['affiliation']\n except KeyError:\n bugaffiliation = \"\"\n bugtype = entry['bug']['type']\n bugpathname = entry['ent']['pathname']\n return WebBug(name=bugname, src=bugsrc, affiliation=bugaffiliation,\n bug_type=bugtype, matched_pattern=bugpattern, pathname=bugpathname)", "def generate_email(start_ref, end_ref, release_date=None):\r\n if release_date is None:\r\n release_date = default_release_date()\r\n prbe = prs_by_email(start_ref, end_ref)\r\n\r\n email = \"\"\"\r\n To: {emails}\r\n\r\n You merged at least one pull request for edx-platform that is going out\r\n in this upcoming release, and you are responsible for verifying those\r\n changes on the staging servers before the code is released. Please go\r\n to the release page to do so:\r\n\r\n https://edx-wiki.atlassian.net/wiki/display/ENG/Release+Page%3A+{date}\r\n\r\n The staging servers are:\r\n\r\n https://www.stage.edx.org\r\n https://stage-edge.edx.org\r\n\r\n Note that you are responsible for verifying any pull requests that you\r\n merged, whether you wrote the code or not. (If you didn't write the code,\r\n you can and should try to get the person who wrote the code to help\r\n verify the changes -- but even if you can't, you're still responsible!)\r\n If you find any bugs, please notify me and record the bugs on the\r\n release page. Thanks!\r\n \"\"\".format(\r\n emails=\", \".join(prbe.keys()),\r\n date=release_date.isoformat(),\r\n )\r\n return textwrap.dedent(email).strip()", "def report_bug(self) -> str:\n\n width = os.get_terminal_size().columns\n title = \"YIKES! There's a bug!\".center(width, \"-\")\n title += (\n \"If you are seeing this, then there is something wrong with \"\n \"Miroslava. Please report this issue here: 'https://github.com/\"\n \"kaamiki/miroslava/issues/new' so that we can fix it at the \"\n \"earliest. It would be a great help if you provide the steps, \"\n \"traceback information or even a code sample for reproducing this \"\n \"bug while submitting an issue.\"\n )\n return textwrap.fill(title, width)", "def add_ticket_note(ticket_id, due_date):\n url = f\"{BASE_URL}/api/v2/tickets/{ticket_id}/notes\"\n headers = {\"AUTHorization\": f\"Basic {AUTH}\", \"Content-Type\": \"application/json\"}\n data = {\"body\": f\"Past due date: {due_date}\", \"private\": False}\n\n r = requests.post(url, data=json.dumps(data), headers=headers)\n if r.ok:\n print(f\"Added note on ticket - Ticket ID: {ticket_id}\")\n else:\n logging.debug(f\"Error - {r.status_code} - {r.content}\")", "def create_control():\n os.makedirs('build/DEBIAN', exist_ok=True)\n\n # The description needs to be formatted in a special way.\n description = ''\n for line in textwrap.wrap(qastetray.LONG_DESC, 71):\n description += ' '\n if line:\n description += line\n else:\n description += '.'\n description += '\\n'\n\n data = textwrap.dedent('''\\\n Package: qastetray\n Priority: extra\n Section: net\n Installed-Size: {size}\n Maintainer: {maintainer}\n Architecture: all\n Version: {version}\n Depends: {depends}\n Description: {short_desc}\n {long_desc}\n ''')\n data = data.format(\n size=math.ceil(_get_dir_size('build') / 1024),\n maintainer=qastetray.MAINTAINER,\n version=qastetray.VERSION,\n depends=', '.join(qastetray.DEBIAN_DEPENDS),\n short_desc=qastetray.SHORT_DESC.rstrip('.'),\n long_desc=description,\n )\n with open('build/DEBIAN/control', 'w') as f:\n f.write(data)", "def create_readme(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"create_readme\")\n os.chdir(case_dict[\"archive_temp_dir\"])\n\n fname = open(\"README.archive\", \"w\")\n fname.write(\"Archived metadata is available for this case at URL:\\n\")\n fname.write(case_dict[\"base_expdb_url\"])\n fname.close()", "def _create_releaseinfo_file(projname, relinfo_str):\n dirs = projname.split('.')\n os.chdir(os.path.join(*dirs))\n print 'updating releaseinfo.py for %s' % projname\n with open('releaseinfo.py', 'w') as f:\n f.write(relinfo_str)", "def _create_issue(self, dep_name, dep_latest_version, is_subtask=False, parent_key=None):\n logging.info(\"Creating a new JIRA issue to track {0} upgrade process\".format(dep_name))\n assignee, owners = self._find_owners(dep_name)\n summary = _ISSUE_SUMMARY_PREFIX + dep_name\n if dep_latest_version:\n summary = summary + \" \" + dep_latest_version\n description = \"\"\"\\n\\n{0}\\n\n Please review and upgrade the {1} to the latest version {2} \\n \n cc: \"\"\".format(\n datetime.today(),\n dep_name,\n dep_latest_version\n )\n for owner in owners:\n description += \"[~{0}], \".format(owner)\n try:\n if not is_subtask:\n issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, assignee=assignee)\n else:\n issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, assignee=assignee, parent_key=parent_key)\n except Exception as e:\n logging.error(\"Failed creating issue: \"+ str(e))\n raise e\n return issue", "def __get_ticket_print(self, **kwargs):\n # TODO: выяснить используется ли pdf в принципе. В эл.регестратуре он никак не используется\n # TODO: pdf creator based on Flask templates and xhtml2pdf\n return \"\"", "def __init__(self, name: unicode, version: unicode, releaseName: unicode):\n ...", "def description(self):\n publisher = self.parent\n\n shotgun_url = publisher.sgtk.shotgun_url\n\n media_page_url = \"%s/page/media_center\" % (shotgun_url,)\n review_url = \"https://www.shotgunsoftware.com/features/#review\"\n\n return \"\"\"\n Separate layers and upload to Shotgun for review.<br><br>\n\n A <b>Version</b> entry will be created in Shotgun and a transcoded\n copy of the file will be attached to it. The file can then be reviewed\n via the project's <a href='%s'>Media</a> page, <a href='%s'>RV</a>, or\n the <a href='%s'>Shotgun Review</a> mobile app.\n \"\"\" % (media_page_url, review_url, review_url)", "def Releases():\n return releases", "def _create_db_entries(self, qa):\n\n def _package_description(raw):\n return raw[2:].replace(' - ', ' - ')\n\n log.debug('Creating database entries')\n\n\n # Parse component and section from field in changes\n component, section = parse_section(self.changes['files'][0]['section'])\n\n # Check whether package is already in the database\n package_query = meta.session.query(Package).filter_by(name=self.changes['Source'])\n if package_query.count() == 1:\n log.debug('Package %s already exists in the database' % self.changes['Source'])\n package = package_query.one()\n # Update description to make sure it reflects the latest upload\n package.description = _package_description(self.changes['Description'])\n else:\n log.debug('Package %s is new to the system' % self.changes['Source'])\n package = Package(name=self.changes['Source'], user=self.user)\n package.description = _package_description(self.changes['Description'])\n package.needs_sponsor = 0\n meta.session.add(package)\n\n # No need to check whether there is the same source name and same version as an existing\n # entry in the database as the upload controller tested whether similar filenames existed\n # in the repository. The only way this would be wrong is if the filename had a different\n # version in than the Version field in changes..\n\n\n try:\n closes = self.changes['Closes']\n except KeyError:\n closes = None\n\n # TODO: fix these magic numbers\n if qa.stop():\n qa_status = 1\n else:\n qa_status = 0\n\n maintainer_matches = re.compile(r'(.*) <(.*)>').match(self.changes['Changed-By'])\n maintainer = maintainer_matches.group(2)\n\n package_version = PackageVersion(package=package, version=self.changes['Version'],\n section=section, distribution=self.changes['Distribution'], qa_status=qa_status,\n component=component, priority=self.changes.get_priority(), closes=closes,\n uploaded=datetime.now(), maintainer=maintainer)\n meta.session.add(package_version)\n\n source_package = SourcePackage(package_version=package_version)\n meta.session.add(source_package)\n\n binary_package = None\n\n # Add PackageFile objects to the database for each uploaded file\n for file in self.files:\n filename = os.path.join(self.changes.get_pool_path(), file)\n # This exception should be never caught.\n # It implies something went wrong before, as we expect a file which does not exist\n try:\n sum = md5sum(os.path.join(pylons.config['debexpo.repository'], filename))\n except AttributeError as e:\n self._fail(\"Could not calculate MD5 sum: %s\" % (e))\n\n size = os.stat(os.path.join(pylons.config['debexpo.repository'], filename))[ST_SIZE]\n\n # Check for binary or source package file\n if file.endswith('.deb'):\n # Only create a BinaryPackage if there actually binary package files\n if binary_package is None:\n binary_package = BinaryPackage(package_version=package_version, arch=file[:-4].split('_')[-1])\n meta.session.add(binary_package)\n\n meta.session.add(PackageFile(filename=filename, binary_package=binary_package, size=size, md5sum=sum))\n else:\n meta.session.add(PackageFile(filename=filename, source_package=source_package, size=size, md5sum=sum))\n\n meta.session.commit()\n log.warning(\"Finished adding PackageFile objects.\")\n\n # Add PackageInfo objects to the database for the package_version\n for result in qa.result:\n meta.session.add(PackageInfo(package_version=package_version, from_plugin=result.from_plugin,\n outcome=result.outcome, rich_data=result.data, severity=result.severity))\n\n # Commit all changes to the database\n meta.session.commit()\n log.debug('Committed package data to the database')\n\n subscribers = meta.session.query(PackageSubscription).filter_by(package=self.changes['Source']).filter(\\\n PackageSubscription.level <= constants.SUBSCRIPTION_LEVEL_UPLOADS).all()\n\n if len(subscribers) > 0:\n email = Email('package_uploaded')\n self.send_email(email, [s.user.email for s in subscribers], package=self.changes['Source'],\n version=self.changes['Version'], user=self.user)\n\n log.debug('Sent out package subscription emails')\n\n # Send success email to uploader\n email = Email('successful_upload')\n dsc_url = pylons.config[\n 'debexpo.server'] + '/debian/' + self.changes.get_pool_path() + '/' + self.changes.get_dsc()\n rfs_url = pylons.config['debexpo.server'] + url('rfs', packagename=self.changes['Source'])\n self.send_email(email, [self.user.email], package=self.changes['Source'],\n dsc_url=dsc_url, rfs_url=rfs_url)" ]
[ "0.7133973", "0.62649", "0.6180011", "0.61652756", "0.57979625", "0.5698693", "0.56655735", "0.56425494", "0.55498487", "0.55108356", "0.5495176", "0.5493605", "0.5473608", "0.54344285", "0.5391342", "0.5365955", "0.5322172", "0.5317257", "0.5311151", "0.52918744", "0.52649474", "0.52396494", "0.5229949", "0.5215989", "0.5195518", "0.51755357", "0.51697934", "0.5121394", "0.5102438", "0.50863516", "0.5083688", "0.5068698", "0.50559604", "0.50148535", "0.5010473", "0.49948615", "0.49858275", "0.49842945", "0.4981544", "0.49751952", "0.49733067", "0.49728417", "0.49606395", "0.4950921", "0.4947838", "0.4939157", "0.49359706", "0.49213618", "0.49181458", "0.49176362", "0.4913224", "0.49032977", "0.4899616", "0.48825887", "0.4866653", "0.48474568", "0.48434836", "0.48243666", "0.48169336", "0.4809209", "0.4808288", "0.47993836", "0.47944382", "0.47896823", "0.47872245", "0.47848615", "0.47793913", "0.47761482", "0.47735417", "0.47720584", "0.47596222", "0.47583675", "0.47386822", "0.4733764", "0.47295117", "0.47216904", "0.47164276", "0.47065312", "0.47056645", "0.47032753", "0.47008252", "0.46968317", "0.4692701", "0.46865776", "0.46845675", "0.46817392", "0.46734166", "0.46681672", "0.4665994", "0.46659082", "0.46639857", "0.46636432", "0.46561942", "0.46555477", "0.46516588", "0.4651296", "0.46493384", "0.46475956", "0.4644518", "0.46433845" ]
0.6374402
1
Gets the confidence of this PcrTestRecordResult.
def confidence(self): return self._confidence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence(self) -> float:\n return self._confidence", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def detection_confidence(self):\n return self._detection_confidence", "def get_medie_confidence(self):\n return self.__medie_confidence", "def get_min_confidence(self):\n return self.__min_confidence", "def confidence_at_tpr(self, tpr):\r\n\r\n assert self.validation_confidences is not None\r\n assert tpr > 0\r\n\r\n # true positives are correctly classified examples\r\n if self.sorted_correct_validation_confidences is None:\r\n correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))\r\n # rounding is a hack see tests\r\n cutoff = math.floor(self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2))\r\n assert cutoff >= 0\r\n assert cutoff < self.sorted_correct_validation_confidences.shape[0]\r\n return self.sorted_correct_validation_confidences[cutoff]", "def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score", "def min_confidence(self) -> float:\n return self._min_confidence", "def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)", "def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)", "def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)", "def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)", "def landmarking_confidence(self):\n return self._landmarking_confidence", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)", "def confidence(self, filename):\n f = open(filename, 'rb')\n content = list(f.read())\n f.close()\n\n file_entropy = self.entropy(content)\n\n return (round(file_entropy / 8 * 100), filename)", "def __determina_media_confidence(self):\n media = 0\n nr = 0\n for el in self.__results['conf']:\n media += int(el)\n nr += 1\n media /= nr\n return media", "def confidence(self, value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n request_data = {'confidence': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )", "def is_successful(self):\n try:\n if self.is_skipped:\n return TestCase.EX_TESTCASE_SKIPPED\n assert self.criteria\n assert self.result is not None\n if (not isinstance(self.result, str) and\n not isinstance(self.criteria, str)):\n if self.result >= self.criteria:\n return TestCase.EX_OK\n else:\n # Backward compatibility\n # It must be removed as soon as TestCase subclasses\n # stop setting result = 'PASS' or 'FAIL'.\n # In this case criteria is unread.\n self.__logger.warning(\n \"Please update result which must be an int!\")\n if self.result == 'PASS':\n return TestCase.EX_OK\n except AssertionError:\n self.__logger.error(\"Please run test before checking the results\")\n return TestCase.EX_TESTCASE_FAILED", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def fpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[self.test_errors] >= threshold) / float(numpy.sum(self.test_errors))", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def result(self):\n prec_value = self.precision.result()\n recall_value = self.recall.result()\n return 2 * math_ops.div_no_nan(prec_value * recall_value,\n prec_value + recall_value)", "def confidence(self, confidence):\n self._confidence = confidence", "def confidence(self, confidence):\n self._confidence = confidence", "def confidence(self, confidence: float):\n\n self._confidence = confidence", "def confidence_values(self) -> List[Union[int, str]]:\n\n return self._confidence_values", "def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)", "def calc_confidence_level(self, z_value):\n\n confidence_level = 0.5 * (1 + math.erf(z_value/2**0.5))\n\n return confidence_level", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def get_result(self):\n config = self.bisect_config\n results_confidence = 0\n if self.culprit:\n results_confidence = self.api.m.math_utils.confidence_score(\n self.lkgr.values, self.fkbr.values)\n\n if self.failed:\n status = 'failed'\n elif self.bisect_over:\n status = 'completed'\n else:\n status = 'started'\n\n aborted_reason = None\n if self.failed_initial_confidence:\n aborted_reason = _FAILED_INITIAL_CONFIDENCE_ABORT_REASON\n elif self.failed_direction:\n aborted_reason = _DIRECTION_OF_IMPROVEMENT_ABORT_REASON\n return {\n 'try_job_id': config.get('try_job_id'),\n 'bug_id': config.get('bug_id'),\n 'status': status,\n 'buildbot_log_url': self._get_build_url(),\n 'bisect_bot': self.get_perf_tester_name(),\n 'command': config['command'],\n 'test_type': config['test_type'],\n 'metric': config['metric'],\n 'change': self.relative_change,\n 'score': results_confidence,\n 'good_revision': self.good_rev.commit_hash,\n 'bad_revision': self.bad_rev.commit_hash,\n 'warnings': self.warnings,\n 'aborted_reason': aborted_reason,\n 'culprit_data': self._culprit_data(),\n 'revision_data': self._revision_data()\n }", "def cov(self):\n return self.cond_proba.cov", "def get(self):\n score = self._evaluate(self.y_true, self.y_pred)\n\n return score", "def cal_confidence(dat):\n\n\talpha = 40.0\n\tconfidence = np.zeros(dat.shape)\n\tconfidence = 1 + alpha * dat\n\treturn np.matrix(confidence)", "def sensitivity(self):\n return self.recall", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def success_code(self):\n if self._results is None:\n return None\n return self._results.fields['omci_message'].fields['success']", "def score(self):\n return 1 if self.succeeded() else 0", "def get_score(self):\r\n return self.lcp.get_score()", "def get_success_probability(self):\n\t\treturn min(self.get_raw_probability(), RunOrder.MAX_PERCENTS)", "def test_confidences(self):\n\n # Add alignments to pipeline\n for hit, aln in zip(self.pipeline[\"templates\"], self.ALIGNMENTS):\n hit[\"alignment\"] = aln\n\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"][\"confidence\"],\n \"---5-4-----\")\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"][\"confidence\"],\n \"----3-----\")", "def precision(y_test, y_pred):\n\treturn precision_score(y_test, y_pred)", "def cci(self) -> float:\n return self._cci", "def getConfidence(self,LeftTup,RightTup):\n\n tup=LeftTup+RightTup\n _intersection=self.getSupport(tup)\n _LHS=self.getSupport(LeftTup)\n _confidence=_intersection/_LHS\n return (_confidence)", "def setIncludeConfidence(self, value):\n return self._set(includeConfidence=value)", "def setIncludeConfidence(self, value):\n return self._set(includeConfidence=value)", "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def find_confidence(self, chi2, df):\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n # Subtract from one to get confidence.\n confidence = (1.0 - float(col))\n return confidence", "def confidence(self, confidence):\n\n self._confidence = confidence", "def confidence(self, confidence):\n\n self._confidence = confidence", "def get_confidence_metrics(\n prediction_result: Mapping[str, Any]\n) -> Mapping[str, Any]:\n conf_metrics = {}\n conf_metrics['plddt'] = confidence.compute_plddt(\n prediction_result['predicted_lddt']['logits'])\n if 'predicted_aligned_erorr' in prediction_result.keys():\n conf_metrics.update(confidence.compute_predicted_aligned_error(\n prediction_result['predicted_aligned_error']['logits'],\n prediction_result['predicted_aligned_error']['breaks']\n ))\n conf_metrics['ptm'] = confidence.predicted_tm_score(\n prediction_result['predicted_aligned_error']['logits'],\n prediction_result['predicted_aligned_error']['breaks']\n )\n return conf_metrics", "def result(self):\n return (\"Precision@\" + str(self.length) + \": \"), (self.hit / self.test)", "def percent_covered(self):\n out = self.coverage\n return out and out.cover", "def cov(self):\n return self._cov", "def getPredictedResult(self):\n output = self.svclassifier.predict([self.inputData])\n return output[0]", "def _get_result(self):\n try:\n # get test data\n test_id = self._feature_processor.test_data_id\n test_feature = self._feature_processor.test_data_feature\n test_target = self._feature_processor.test_data_target\n\n # process data\n test_feature = test_feature.astype(\"float64\", errors='ignore')\n\n # predict\n predict_res = self._model.predict(test_feature)\n predict_res_df = pd.DataFrame(predict_res, columns=[PredictConstance.PRE])\n proba_res = self._model.predict_proba(test_feature)\n proba_res_df = pd.DataFrame([str(x) for x in proba_res],\n columns=[PredictConstance.PROBA])\n\n res = [test_id, predict_res_df, proba_res_df]\n # get model score\n if test_target is not None:\n res.append(test_target)\n model_auc = pre_utils.PredictUtils.get_roc_score(test_target, proba_res)\n model_score = pre_utils.PredictUtils.get_model_score(test_target, predict_res)\n model_score.update(model_auc)\n with open(os.path.join(self._result_path, PredictConstance.TEST_SCORE), \"w\") as ftp:\n ftp.write(str(model_score))\n\n # joint predict result\n self._joint_predict_result(res)\n\n return True\n except Exception as err:\n self.managerlogger.logger.error(\"base ml get result error: %s\" % err)\n self.errorlogger.logger.error(\"base ml get result error:\\n %s\" % traceback.format_exc())\n return False", "def uncertainty(self):\n return self._uncertainty", "def uncertainty(self) -> float:\n return self.__uncertainty", "def uncertainty(self) -> float:\n return self.__uncertainty", "def get_confidence(cls, X, y=None):\n scores = []\n for metric_wrapper, weight in cls.confidence_computation.items():\n scores.append(metric_wrapper.calculate(X) * weight)\n return sum(scores)", "def predict(self, expression=None):\r\n confidence_score = 0\r\n intent = None\r\n\r\n #intent, confidence_score = self.ncf(expression)\r\n intent, confidence_score = \"Test\",100\r\n l_msg = \"model found : {} and score is {}\".format(intent, confidence_score)\r\n log.debug(l_msg)\r\n\r\n return intent, confidence_score", "def success_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"success_threshold\")", "def success_threshold(self) -> Optional[int]:\n return pulumi.get(self, \"success_threshold\")", "def get_failure_rate(self) -> float:\n return self.failurerate", "def corr_coeff(self) -> float:\n correlation_coefficient = np.corrcoef(self.true, self.predicted)[0, 1]\n return float(correlation_coefficient)", "def calcAccuracy(measuredConc, expectedConc):\n accuracy = (numpy.mean(measuredConc) / expectedConc) * 100\n return accuracy", "def score_coefficient(self):\n return self.predictor._score_coefficient", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def compute_confidence_interval(self) -> bool:\n return False", "def alert_sensitivity(self) -> str:\n return pulumi.get(self, \"alert_sensitivity\")", "def _calculate_tp_confidences(images, test_class):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{test_class}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if class_name == test_class and response_json[\"response\"] == class_name:\n confidences.append(response_json[\"confidence\"])\n return confidences", "def get_precision(self, y_true, y_pred):\n model_entities_filter = (y_pred != 3).astype(\"int\") # of the words our model say has a NER class\n precision_correct_entities = (y_pred[np.where(model_entities_filter)] == y_true[np.where(model_entities_filter)]).astype(\"int\")\n precision = np.sum(precision_correct_entities)/np.sum(model_entities_filter)\n return precision", "def coherence(self):\r\n coherence = np.abs(self.coherency ** 2)\r\n\r\n return coherence", "def baseline(self):\n return self.data[self.data['treatment'] == 'Baseline']", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def contrast(self):\n return self._contrast", "def receiver_operating_characteristic_labels_scores(self):\r\n\r\n return numpy.logical_not(self.test_errors).astype(int), self.test_confidences", "def accuracy(self):\n\t\treturn self.accuracy_", "def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)", "def get_test_status(self) -> str:\n return self.__test_result[Result.__RESULT]", "def pred_sentiment(self):\n return self._pred_sentiment", "def extract_confidence_from_human_detections(self, i):\n self.confidence = self.detections[0, 0, i, 2]", "def cv(self):\n return self.close.std() / self.close.mean()", "def risk_score(self):\n return GoalMetric.objects.filter(group=self.metric_group,\n type=GoalMetric.METRIC_TYPE_RISK_SCORE).values_list('configured_val',\n flat=True).first()", "def get_score(self):\n\n return self._score", "def precision_score(y_true, y_pred):\n return ((y_true == 1) * (y_pred == 1)).sum() / (y_pred == 1).sum()", "def get_sensitivity(self) -> int:\n\n return self._sensitivity", "def get_score(self):\n return float(self._score)", "def estimates_conf(self):\n return self._est_L, self._est_R", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def precision(y_true, y_pred):\n true_positives = bk.sum(bk.round(bk.clip(y_true * y_pred, 0, 1)))\n predicted_positives = bk.sum(bk.round(bk.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + bk.epsilon())\n return precision", "def GetProportion(self):\r\n\r\n return self.proportion", "def _calculate_fp_confidences(images, test_classes):\n confidences = []\n for (response_json, class_name) in predict(images, desc=f\"[{BACKGROUND}] inference\"):\n if response_json[\"status\"] != \"ok\":\n raise Exception(f\"Not OK response in {class_name}\")\n if response_json[\"response\"] in test_classes:\n confidences.append(response_json[\"confidence\"])\n return confidences", "def actualthreshold(self):\n return self._actualthreshold" ]
[ "0.74183095", "0.695079", "0.6752761", "0.62172127", "0.6101365", "0.59268916", "0.59007055", "0.58964837", "0.58102906", "0.5795349", "0.5786901", "0.57829434", "0.5746195", "0.56174123", "0.56115687", "0.5601758", "0.55095553", "0.55019873", "0.545608", "0.5437329", "0.5400275", "0.5359699", "0.53264403", "0.5325444", "0.5325444", "0.5318751", "0.5312392", "0.5309795", "0.53082025", "0.5288559", "0.52741116", "0.52674645", "0.52575874", "0.52511233", "0.5222463", "0.5221529", "0.5218998", "0.51974595", "0.5194397", "0.5187064", "0.51771086", "0.5171609", "0.51621425", "0.51419044", "0.5139576", "0.5139576", "0.51281154", "0.5127179", "0.5119843", "0.5119843", "0.5109918", "0.5077303", "0.5069659", "0.5049414", "0.50346076", "0.5009875", "0.4994154", "0.49900293", "0.49900293", "0.49867824", "0.49761856", "0.49636424", "0.49636424", "0.49629778", "0.49437544", "0.49431685", "0.49139714", "0.49136248", "0.49113816", "0.4911185", "0.490952", "0.4907194", "0.48982406", "0.4884112", "0.48824325", "0.4880344", "0.48462993", "0.4838795", "0.4831662", "0.48312703", "0.48290402", "0.4819049", "0.48109907", "0.48105115", "0.4808781", "0.4797666", "0.47971234", "0.47967064", "0.47953096", "0.47929415", "0.47929415", "0.47929415", "0.4791664", "0.4791664", "0.4791664", "0.47882688", "0.47847393", "0.47847205", "0.47812036" ]
0.7580267
1
Sets the confidence of this PcrTestRecordResult.
def confidence(self, confidence): self._confidence = confidence
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence(self, confidence: float):\n\n self._confidence = confidence", "def confidence(self, confidence):\n\n self._confidence = confidence", "def confidence(self, confidence):\n\n self._confidence = confidence", "def confidence(self) -> float:\n return self._confidence", "def confidence(self):\n return self._confidence", "def confidence(self):\n return self._confidence", "def setIncludeConfidence(self, value):\n return self._set(includeConfidence=value)", "def setIncludeConfidence(self, value):\n return self._set(includeConfidence=value)", "def setIncludeConfidence(self, b):\n return self._set(includeConfidence=b)", "def setIncludeConfidence(self, b):\n return self._set(includeConfidence=b)", "def confidence(self, value):\n if not self.can_update():\n self._handle_error(910, [self.type])\n request_data = {'confidence': value}\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def scanner_confidence(self, scanner_confidence):\n\n self._scanner_confidence = scanner_confidence", "def set_min_confidence(self, new_min):\n self.__min_confidence = new_min", "def confidence_at_tpr(self, tpr):\r\n\r\n assert self.validation_confidences is not None\r\n assert tpr > 0\r\n\r\n # true positives are correctly classified examples\r\n if self.sorted_correct_validation_confidences is None:\r\n correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))\r\n # rounding is a hack see tests\r\n cutoff = math.floor(self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2))\r\n assert cutoff >= 0\r\n assert cutoff < self.sorted_correct_validation_confidences.shape[0]\r\n return self.sorted_correct_validation_confidences[cutoff]", "def _process_confidence(self, metadata: MetadataTransformModel | None):\n self.add_confidence(self._transform_value(metadata))", "def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score", "def min_confidence(self) -> float:\n return self._min_confidence", "def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)", "def test_confidences(self):\n\n # Add alignments to pipeline\n for hit, aln in zip(self.pipeline[\"templates\"], self.ALIGNMENTS):\n hit[\"alignment\"] = aln\n\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"][\"confidence\"],\n \"---5-4-----\")\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"][\"confidence\"],\n \"----3-----\")", "def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)", "def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)", "def detection_confidence(self):\n return self._detection_confidence", "def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)", "def get_min_confidence(self):\n return self.__min_confidence", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def get_medie_confidence(self):\n return self.__medie_confidence", "def fpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[self.test_errors] >= threshold) / float(numpy.sum(self.test_errors))", "def __determina_media_confidence(self):\n media = 0\n nr = 0\n for el in self.__results['conf']:\n media += int(el)\n nr += 1\n media /= nr\n return media", "def confidence(self, filename):\n f = open(filename, 'rb')\n content = list(f.read())\n f.close()\n\n file_entropy = self.entropy(content)\n\n return (round(file_entropy / 8 * 100), filename)", "def test_error_at_confidence(self, threshold):\r\n\r\n nominator = numpy.sum(numpy.logical_and(self.test_errors, self.test_confidences >= threshold))\r\n denominator = numpy.sum(self.test_confidences >= threshold)\r\n if denominator > 0:\r\n return nominator / float(denominator)\r\n else:\r\n return 0", "def set_result(self, result):\n self.__test_result[Result.__RESULT] = result", "def test_chao1_confidence(self): \n #NOTE: EstimateS rounds to 2 dp\n self.assertFloatEqual(chao1_confidence(self.TestData), (9.07,17.45), \\\n eps=0.01)\n self.assertFloatEqual(chao1_confidence(self.TestData, \\\n bias_corrected=False), (9.17,21.89), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles),\\\n (4, 4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoSingles, \\\n bias_corrected=False), (4,4.95), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles), \\\n (4.08,17.27), eps=0.01)\n self.assertFloatEqualAbs(chao1_confidence(self.NoDoubles, \\\n bias_corrected=False), (4.08,17.27), eps=0.01)", "def get_confidence_metrics(\n prediction_result: Mapping[str, Any]\n) -> Mapping[str, Any]:\n conf_metrics = {}\n conf_metrics['plddt'] = confidence.compute_plddt(\n prediction_result['predicted_lddt']['logits'])\n if 'predicted_aligned_erorr' in prediction_result.keys():\n conf_metrics.update(confidence.compute_predicted_aligned_error(\n prediction_result['predicted_aligned_error']['logits'],\n prediction_result['predicted_aligned_error']['breaks']\n ))\n conf_metrics['ptm'] = confidence.predicted_tm_score(\n prediction_result['predicted_aligned_error']['logits'],\n prediction_result['predicted_aligned_error']['breaks']\n )\n return conf_metrics", "def landmarking_confidence(self):\n return self._landmarking_confidence", "def set_tolerance(self, tol):\n self.precision = tol\n return", "def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)", "def _set_insufficient_confidence_warning(\n self): # pragma: no cover\n self.failed_initial_confidence = True\n self.surface_result('LO_INIT_CONF')\n self.warnings.append(\n 'Bisect failed to reproduce the regression with enough confidence.')", "def confidence_values(self) -> List[Union[int, str]]:\n\n return self._confidence_values", "def classification_evaluation(self, test_set, predicted_values, certainty):\r\n\r\n percent_accuracy = self.percent_accuracy(test_set, predicted_values)\r\n one_zero = self.one_zero_loss(test_set, predicted_values)\r\n log_loss = self.log_loss(test_set, predicted_values, certainty)\r\n print(f\"Percent correct:\\t{percent_accuracy * 100:.2f}%\")\r\n print(f\"1/0 Loss:\\t\\t\\t{one_zero:.2f}\")\r\n print(\"Log Loss: \", log_loss)", "def precision(y_test, y_pred):\n\treturn precision_score(y_test, y_pred)", "def is_successful(self):\n try:\n if self.is_skipped:\n return TestCase.EX_TESTCASE_SKIPPED\n assert self.criteria\n assert self.result is not None\n if (not isinstance(self.result, str) and\n not isinstance(self.criteria, str)):\n if self.result >= self.criteria:\n return TestCase.EX_OK\n else:\n # Backward compatibility\n # It must be removed as soon as TestCase subclasses\n # stop setting result = 'PASS' or 'FAIL'.\n # In this case criteria is unread.\n self.__logger.warning(\n \"Please update result which must be an int!\")\n if self.result == 'PASS':\n return TestCase.EX_OK\n except AssertionError:\n self.__logger.error(\"Please run test before checking the results\")\n return TestCase.EX_TESTCASE_FAILED", "def final_result(self, hyp, confidence):\n print(\"Final:\"+hyp)", "def OnSetContrast(self, evt=None):\n\t\twith self.playerLock :\n\t\t\t#self.player.video_set_adjust_int( vlc.VideoAdjustOption.Enable, 1 )\n\t\t\tcontrast = self.contrastSlider.GetValue() * 2\n\t\t\tprint( 'new contrast: {}'.format( contrast ) )\n\t\t\tif self.player.video_set_adjust_float( vlc.VideoAdjustOption.Contrast, contrast/100.0 ) == -1:\n\t\t\t\tself.errorDialog(\"Failed to set contrast\")\n\t\t\telse:\n\t\t\t\tself.State.contrast = contrast/2\n\t\t\t\tself.SaveState()", "def cci(self, cci: float):\n\n self._cci = cci", "def setIncludeAllConfidenceScores(self, value):\n return self._set(includeAllConfidenceScores=value)", "def setIncludeAllConfidenceScores(self, value):\n return self._set(includeAllConfidenceScores=value)", "def setContrast(self, contrast):\n raise NotImplementedError", "def extract_confidence_from_human_detections(self, i):\n self.confidence = self.detections[0, 0, i, 2]", "def test_robbins_confidence(self):\n c = array([1,2,3,0,1])\n r = robbins_confidence(c, 0.05)\n n = 7\n s = 2\n k = sqrt(8/0.05)\n self.assertEqual(r, ((s-k)/(n+1), (s+k)/(n+1)))", "def test_set_score_scores(self, credit_dict, result):\n self.xblock.credit_dict = credit_dict\n self.xblock.set_score()\n self.assertEqual(self.xblock.score, result)", "def cal_confidence(dat):\n\n\talpha = 40.0\n\tconfidence = np.zeros(dat.shape)\n\tconfidence = 1 + alpha * dat\n\treturn np.matrix(confidence)", "def set_completed(self, result: str = None):\n self._has_run = True\n self.exp_metadata.result = result\n self.exp_metadata.status = ExperimentState.COMPLETED\n\n self._finish_exp_run()", "def contrast(self, val):\n self._contrast = max(0, min(val, 0x7F)) # Clamp to values 0-0x7f\n self.write_cmd(self.CMD_SET_VOLUME_FIRST)\n self.write_cmd(self.CMD_SET_VOLUME_SECOND | (self._contrast & 0x3F))", "def set_test_passed(self):\n self.set_result(Status.PASSED)", "def calc_confidence_level(self, z_value):\n\n confidence_level = 0.5 * (1 + math.erf(z_value/2**0.5))\n\n return confidence_level", "def ci(self):\n var_assumptions = self.var_assumptions if self.var_assumptions == \"pooled\" else \"unequal\"\n ci_vals = self.comparison.zconfint_diff(self.alpha, self.hypothesis_sm, var_assumptions)\n\n return [ci_vals, self.ci_percents]", "def statistics_on_test(self, predicted_results, result):\n # Print confusion matrix and mean average precision score\n predicted_results_binary = self.predicted_results_to_binary(predicted_results)\n print(\"\\nConfusion matrix : \")\n print(confusion_matrix(result, predicted_results_binary))\n print(\"\\nAverage precision score : \", average_precision_score(result, predicted_results_binary))", "def pe_ratio(self, pe_ratio):\n self._pe_ratio = pe_ratio", "def set_QUALITY(self,newQual):\n\t\tself.QUALITY = newQual", "def success_condition(self, success_condition):\n\n self._success_condition = success_condition", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def SetProportion(self, p):\r\n\r\n self.proportion = p", "def result_code(self, result_code):\n\n self._result_code = result_code", "def setUncertainty(self, uncertainty):\n self.uncertainty = uncertainty", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def compute_confidence_interval(self) -> bool:\n return False", "def setNewResult(self, value):\n return self._set(newResult=value)", "def coverage(self):\n try:\n return self.found * 100 / self.needed\n except ZeroDivisionError:\n return 100.0", "def _record_result(self, result: bool, note: Optional[str]) -> None:\n assert isinstance(result, bool)\n\n if self._passed is not None:\n raise RuntimeError('Attempt to pass/fail a completed test')\n self._passed = result\n\n if isinstance(note, HTTPError):\n self._note = 'An HTTP error occured: ' + str(note.code)\n try:\n self._note += '. The API returned the following:\\n'\n self._note += ' ' + note.read().decode('utf-8')\n self._note += '\\n Traceback follows:'\n self._note += '\\n ' + traceback.format_exc()\n except Exception:\n pass\n return\n\n if isinstance(note, Exception):\n self._note = traceback.format_exc()\n return\n\n self._note = note\n\n return", "def set_critical(self, critical=1):\n return m2.x509_extension_set_critical(self.x509_ext, critical)", "def SetPCurveCriterion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivideContinuity_SetPCurveCriterion(self, *args)", "def scatter_chart_score(self, grouped):\n score = np.abs(np.corrcoef(grouped.keys(), grouped.values)[0][1])\n if score > 0.3:\n score = 3\n return score", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def tol(self, value):\n self._tol = value", "def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)", "def result(self, val):\n self._result = val\n\n if val != None:\n Images._display_cv_image(self._result_label, val)", "def validation_tpr_at_confidence(self, threshold):\r\n\r\n validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n return numpy.sum(validation_confidences >= threshold) / float(validation_confidences.shape[0])", "def accuracyPrecision(self, onlyPrecisionReferences=False):\n #from ..enumerations import AssayRole\n #from ..objects import TargetedDataset\n\n def calcAccuracy(measuredConc, expectedConc):\n \"\"\"\n Calculate the accuracy of measurement for a column of data.\n accuracy = (mean(measuredConcentration)/expectedConcentration)*100\n\n :param numpy.ndarray measuredConc: *n* by 1 numpy array of data, with a single feature in column, and samples in rows\n :param float expectedConc: expected concentration\n :return: accuracy value\n :rtype: float\n \"\"\"\n accuracy = (numpy.mean(measuredConc) / expectedConc) * 100\n return accuracy\n\n def calcPrecision(measuredConc):\n \"\"\"\n Calculate the precision of measurement (percent RSD) for a column of data.\n Allow for -inf, inf values in input.\n\n :param numpy.ndarray measuredConc: *n* by 1 numpy array of data, with a single feature in column, and samples in rows\n :return: precisin value\n :rtype: float\n \"\"\"\n std = numpy.std(measuredConc)\n rsd = (std / numpy.mean(measuredConc)) * 100\n if numpy.isnan(rsd):\n rsd = numpy.inf\n return rsd\n\n #if not isinstance(dataset, TargetedDataset):\n # raise TypeError('dataset must be an instance of TargetedDataset.')\n\n # Init\n accuracy = dict()\n precision = dict()\n\n # Restrict to PrecisionReference if necessary\n if onlyPrecisionReferences:\n startMask = (self.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)\n else:\n startMask = numpy.squeeze(numpy.ones([self.sampleMetadata.shape[0], 1], dtype=bool), axis=1)\n\n # Unique concentrations\n uniqueConc = pandas.unique(self.expectedConcentration.loc[startMask, :].values.ravel()).tolist()\n uniqueConc = sorted([x for x in uniqueConc if str(x) != 'nan'])\n\n # Each SampleType\n sampleTypes = self.sampleMetadata['SampleType'].unique()\n for sampleType in sampleTypes:\n # init\n acc = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)\n prec = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)\n # Restrict to sampleType\n # Allow for the case where sampleType is not defined\n if pandas.isnull(sampleType):\n sampleTypeMask = numpy.logical_and(startMask, self.sampleMetadata['SampleType'].isnull())\n else:\n sampleTypeMask = numpy.logical_and(startMask, self.sampleMetadata['SampleType'].values == sampleType)\n # Each feature\n for feat in self.featureMetadata['Feature Name'].tolist():\n # Each unique concentrations\n for conc in uniqueConc:\n # Restrict to concentration\n mask = numpy.logical_and(sampleTypeMask, self.expectedConcentration[feat].values == conc)\n # minimum of samples\n if sum(mask) < 2:\n continue\n # fill accuracy/precision df\n featID = (self.featureMetadata['Feature Name'] == feat).values\n acc.loc[conc, feat] = calcAccuracy(self.intensityData[mask, featID], conc)\n prec.loc[conc, feat] = calcPrecision(self.intensityData[mask, featID])\n # Store accuracy/precision + clean empty rows\n accuracy[sampleType] = acc.dropna(axis=0, how='all')\n precision[sampleType] = prec.dropna(axis=0, how='all')\n\n # All samples\n acc = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)\n prec = pandas.DataFrame(numpy.full([len(uniqueConc), self.featureMetadata.shape[0]], numpy.nan), index=uniqueConc, columns=self.featureMetadata['Feature Name'].values)\n # Each feature\n for feat in self.featureMetadata['Feature Name'].tolist():\n # Each unique concentrations\n for conc in uniqueConc:\n # Restrict to concentration\n mask = numpy.logical_and(startMask, self.expectedConcentration[feat].values == conc)\n # minimum of samples\n if sum(mask) < 2:\n continue\n # fill accuracy/precision df\n featID = (self.featureMetadata['Feature Name'] == feat).values\n acc.loc[conc, feat] = calcAccuracy(self.intensityData[mask, featID], conc)\n prec.loc[conc, feat] = calcPrecision(self.intensityData[mask, featID])\n # Store accuracy/precision\n accuracy['All Samples'] = acc.dropna(axis=0, how='all')\n precision['All Samples'] = prec.dropna(axis=0, how='all')\n\n # Output\n return {'Accuracy': accuracy, 'Precision': precision}", "def calcAccuracy(measuredConc, expectedConc):\n accuracy = (numpy.mean(measuredConc) / expectedConc) * 100\n return accuracy", "def result(self, result):\n\n self._result = result", "def result(self, result):\n\n self._result = result", "def clan_score(self, clan_score):\n\n self._clan_score = clan_score", "def _test_result(effect, margin, se, dof, coverage, effect_size_constituents):\n t_stat = (effect + margin) / se\n if margin:\n p_value = _one_sided_p_value(t_stat, dof)\n else:\n p_value = _two_sided_p_value(t_stat, dof)\n t_alpha = scipy.stats.t.isf((1 - coverage) / 2.0, dof)\n lower = effect - t_alpha * se\n upper = effect + t_alpha * se\n return TestResult(\n effect=effect,\n ci=(lower, upper),\n statistic=t_stat,\n dof=dof,\n pvalue=p_value,\n effect_size_constituents=effect_size_constituents)", "def pred_sentiment(self, value):\n self._pred_sentiment = value", "def test_predict_ci():\n df = pd.DataFrame(dict(x=[1.0, 2.0, 3.0, 4.0]))\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(x=dict(transformer=lambda x: x.x, prior=dist.Normal(0, 1)))\n\n # ci = yhat when no variation in samples\n config = {\"samples\": {\"x\": onp.ones((2, 10))}}\n model = Model.from_dict(config)\n pred = model.predict(df, ci=True).round(5).astype(\"float32\")\n assert pred.y.equals(pred.ci_lower)\n assert pred.y.equals(pred.ci_upper)\n\n # lower < yhat < upper when some variation in samples\n config = {\"samples\": {\"x\": onp.random.normal(size=(2, 100)) * 0.1}}\n model = Model.from_dict(config)\n pred = model.predict(df, ci=True)\n assert (pred.y > pred.ci_lower).all()\n assert (pred.y < pred.ci_upper).all()", "def test_result_code(self):\n result_avp = avp.AVP('Result-Code', avp.ResultCode.DIAMETER_SUCCESS)\n self.assertEqual(result_avp.value, avp.ResultCode.DIAMETER_SUCCESS)\n\n self._compare_avp(\n avp.AVP('Result-Code', avp.ResultCode.DIAMETER_SUCCESS),\n avp.ResultCodeAVP(\n 268, 2001, vendor=avp.VendorId.DEFAULT,\n flags=avp.FLAG_MANDATORY,\n name='Result-Code',\n ),\n )", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._adjust_contrast_img(results, self.factor)\n return results", "def AppliesToResult(self, result: 'BaseResult') -> bool:\n assert isinstance(result, BaseResult)\n return (self._comp(result.test) and self.tags <= result.tags)", "def set_result(self, result):\n self._result = result\n self._set_done()", "def set_contrast(self, contrast):\n if contrast < 0 or contrast > 255:\n raise ValueError('Contrast must be a value from 0 to 255 (inclusive).')\n self.command(SSD1306_SETCONTRAST)\n self.command(contrast)", "def score(self, X, y, predict_results=None, style=\"accuracy\"):\n results = predict_results\n if results is None:\n results = np.reshape(self.predict(X)[0], np.shape(y))\n if style=='accuracy':\n correct = 0\n for scored, expected in zip(results, y):\n if scored == expected:\n correct += 1\n return 0 if len(results) == 0 else (correct / len(results)) * 100.0\n if style=='mse':\n summer = 0\n count = 0\n for scored, expected in zip(results, y):\n summer = summer + ((scored - expected) ** 2)\n count = count + 1\n return summer / count", "def compute_prec(self):\n cond_number = np.linalg.cond(self.cov_)\n if cond_number > 100: # 1/sys.float_info.epsilon:\n print('Bad conditioning! ' +\n 'condition number is {}'.format(cond_number))\n self.prec_ = linalg.inv(self.cov_)\n\n return self" ]
[ "0.73178154", "0.72186536", "0.72186536", "0.64052767", "0.624179", "0.624179", "0.6131234", "0.6131234", "0.60332805", "0.60332805", "0.5945399", "0.59088707", "0.5768737", "0.5661176", "0.51978236", "0.5128352", "0.5126153", "0.5094373", "0.5067891", "0.50525135", "0.5010972", "0.4993349", "0.49844217", "0.4849719", "0.4830913", "0.47913557", "0.4768947", "0.47072694", "0.46972948", "0.4671615", "0.46100256", "0.46081093", "0.46071675", "0.4568349", "0.45658818", "0.45473647", "0.4541649", "0.45085144", "0.44913217", "0.44420385", "0.44417423", "0.44322526", "0.44282585", "0.44130406", "0.43755004", "0.43641788", "0.43633595", "0.43612805", "0.43612805", "0.43199602", "0.42962855", "0.42791313", "0.4277939", "0.42725167", "0.42593586", "0.4247824", "0.4246273", "0.42383343", "0.41806123", "0.4166495", "0.4152302", "0.41464967", "0.41336706", "0.41322476", "0.41275918", "0.41245455", "0.4119344", "0.41096652", "0.41096652", "0.41096652", "0.41096652", "0.41096652", "0.40986252", "0.40976557", "0.408953", "0.40872353", "0.40819284", "0.40750366", "0.40724903", "0.40651292", "0.40638977", "0.40547115", "0.40545762", "0.40515763", "0.404671", "0.40455928", "0.4044338", "0.4044338", "0.40389964", "0.40367818", "0.40269172", "0.4017521", "0.4010473", "0.4006746", "0.4002884", "0.4000649", "0.39983457", "0.39964837", "0.39955246" ]
0.72880876
2
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n for prop in self.__properties__\n if prop in vars(self)\n }\n rv.update(self._props)\n return rv", "def get_properties(self):\n return self.properties", "def get_properties():", "def getProperties():", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def properties(self):\r\n if self._properties is None:\r\n res = self._con.get(self._url, {'f':'json'})\r\n self._properties = PropertyMap(res)\r\n return self._properties", "def getProperties(self):\n return self.properties", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def json_properties(self):\n attributes = []\n all = vars(self)\n for var in all:\n if var[:1] != '_':\n attributes.append(var)\n if isinstance(self, db.Model):\n properties = self.properties().keys()\n for property in properties:\n if property[:1] != '_':\n attributes.append(property)\n return attributes", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def as_dict(self):\n result = {}\n for attr in self.__attr:\n result[attr] = getattr(self, attr)\n return result", "def to_dict_model(self) -> dict:\n return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())", "def get_properties():\n properties = dict()\n properties['size'] = list()\n properties['color'] = list()\n properties['quality'] = list()\n u = models.Size.query.all()\n for i in u:\n properties['size'].append(i.size_name)\n u = models.Color.query.all()\n for i in u:\n properties['color'].append(i.color_name)\n u = models.Quality.query.all()\n for i in u:\n properties['quality'].append(i.quality_name)\n return make_response(jsonify(properties))", "def get_modelDict(self):\n return self.__modelDict", "def attributes(self):\n return dict(self.__attributes)", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def to_dict(self):\n result = {}\n for p in self.json_properties():\n value = getattr(self, p)\n if isinstance(value, datetime.datetime):\n value = value.strftime('%s%f')[:-3]\n result[Jsonifiable.transform_to_camelcase(p)] = value\n return result", "def properties(self):\n return self._props", "def properties(self):\n pass", "def to_dict(self):\n d = {}\n for attr in self.__class__.attributes:\n d[attr] = getattr(self, attr)\n return d", "def properties_get(self):\n return self._get('properties')", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def getPropertyDict(self):\n \n d = self.getChild('__properties')\n if d:\n return d.getDict()\n else:\n return {}", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def get_attributes(self):\n return dict(self.attributes) # return the attributes", "def to_dict(self, include=None):\n _MODEL = type(self)\n repr_dict = {}\n if include is None:\n include = []\n for name, prop in _MODEL._properties.iteritems():\n if hasattr(prop, 'public') and getattr(prop, 'public', False):\n include.append(name)\n\n for name in include:\n # check if this property is even allowed to be public\n # or has a value set\n if not hasattr(self, name):\n continue\n\n value = getattr(self, name)\n if type(getattr(_MODEL, name)) == ndb.StructuredProperty:\n if isinstance(value, list):\n items = []\n for item in value:\n items.append(item.to_dict(include=None))\n repr_dict[name] = items\n else:\n repr_dict[name] = value.to_dict(include=None)\n elif isinstance(value, date):\n repr_dict[name] = value.isoformat()\n elif isinstance(value, ndb.Key):\n repr_dict[name] = value.urlsafe()\n else:\n repr_dict[name] = value\n\n if self._key:\n repr_dict['key'] = self.get_key_urlsafe()\n return repr_dict", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k == 'POSSIBLE_METHODS':\n continue\n if k == 'keysamplers':\n properties[k] = [i.to_dict() for i in self.__dict__[k] if hasattr(i,'to_dict')]\n elif k in {'pooler'}:\n properties[k] = self.__dict__[k].to_dict()\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def get_all_properties(cls):\n return ['key', 'id'] + _.keys(cls._properties)", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def properties(self):\n\n return self._properties", "def ToDict(self):\n atributes_dictionary = {}\n for key, value in self.__dict__.iteritems():\n atributes_dictionary[key] = value\n return atributes_dictionary", "def properties(self):", "def properties(self):", "def properties(self):", "def modelPropertiesDictionary(sql_row_list):\n \n properties_dictionary = \\\n {\n \"id\": sql_row_list[0],\n \"name\": sql_row_list[1],\n \"last_deploy_timestamp\": sql_row_list[2],\n \"active_version\": sql_row_list[3],\n \"build_id\": sql_row_list[4]\n };\n\n return properties_dictionary;", "def as_dict(self):\n data = dict()\n for name in self.fields:\n val = getattr(self, name)\n if isinstance(val, Model):\n val = val.as_dict()\n elif isinstance(val, list) and val and isinstance(val[0], Model):\n val = [sub.as_dict() for sub in val]\n data[name] = val\n return data", "def to_dict(self):\n if self._dict is not None:\n return self._dict\n\n result = {}\n for key in self.ATTRIBUTES:\n value = getattr(self, key)\n if value:\n result[key] = value\n self._dict = result\n return result", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def to_dict(self):\n _dict = {}\n for f in self._meta.fields:\n if f.name == 'created':\n _dict[f.name] = str(f.value_from_object(self))\n else:\n _dict[f.name] = f.value_from_object(self)\n\n return _dict", "def to_dict(self):\r\n return self.__dict__", "def properties(self):\n return None", "def properties(self):\n return None", "def to_dict(self):\n return attr.asdict(self)", "def as_dict(self):\n return self.__dict__", "def _get_model_state(self) -> dict:\n return dict(model=self.model, kwargs=self._model_kwargs)", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n # \"created_by\": self.created_by,\n # \"created_on\": self.created_on,\n # \"modified_by\": self.modified_by,\n # \"modified_on\": self.modified_on\n }", "def properties(self):\r\n return resources.Properties(self)", "def attributes(self):\n params = self.model.param_array\n return {'parameters': params}", "def properties(self, pk):\n return JsonResponse(self._get_properties(pk))", "def to_dict(self):\n return vars(self)", "def to_dict(self):\n\n # Check if is the right instance.\n if isinstance(self, db.Model):\n # construct a dictionary from column names and values.\n dict_representation = {c.name: getattr(self, c.name) for c in self.__table__.columns}\n return dict_representation\n else:\n raise AttributeError(type(self).__name__ + \" is not instance of \" + db.Model.__name__)", "def bson_properties(self):\n return []", "def to_dict(self):\n return {\n \"id\": self.id,\n \"name\": self.name\n }", "def get_dict(self):\n return", "def to_dict(self):\n return to_dict(self.__dict__)", "def to_json(self):\n properties = self.to_dict()\n if isinstance(self, db.Model):\n properties['id'] = unicode(self.key().id())\n return json.dumps(properties)", "def to_dict(self):", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def to_dict(self):\n return self.__dict__", "def get_attributes(self) -> Dict[str, str]:\n pass", "def config(self) -> ModelConfigDict:\n return self.config_obj.to_dict()", "def properties(self):\n return self.properties_with_uid[1:]", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def to_dict(self):\n properties = {}\n for k in self.__dict__:\n if k in {'idsSoFar'}:\n continue\n else:\n properties[k] = deepcopy(self.__dict__[k])\n return properties", "def serialise(self):\n return {\n 'id': self.id,\n 'category_id': self.category_id,\n 'name': self.name,\n 'description': self.description,\n 'quantity': self.quantity,\n 'price': self.price,\n 'user_id': self.user_id\n }", "def getPropertiesAll():", "def get_all_properties(self) -> dict:\n return self._request(\n \"post\",\n URL,\n json=attr.asdict(\n Body(\"getAllProperties\", API_VERSION),\n filter=attr.filters.exclude(attr.fields(Body).params),\n ),\n )", "def model_info(self):\n if not self._model_info:\n self._load_model_info()\n try:\n data = json.loads(self._model_info)\n except (TypeError, ValueError):\n data = {}\n return data", "def to_dict(self):\n return {\n 'name': self.get_name(),\n 'description': self.get_description()\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n }", "def as_dict(self):\n return self.__dict__", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n }", "def asdict(self):\n return attr.asdict(self)", "def to_dict(self) -> dict:", "def getDict(self):\n res = {}\n for attr, value in self.__dict__.iteritems():\n if type(attr) is IntType or type(attr) is StringType or type(attr) is LongType or type(attr) is UnicodeType:\n res[attr] = value\n elif isinstance(attr, datetime.datetime):\n res[attr] = value.isoformat('-')\n \n return res", "def attributes(self):\n return self.__dict.keys()", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def dict(self):\n return self.__dict__", "def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}" ]
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.690053", "0.6881568", "0.6881568", "0.6857664", "0.68415916", "0.68122137", "0.680096", "0.67914945", "0.6757063", "0.6753585", "0.6741746", "0.6741746", "0.6741746", "0.6735291", "0.67126125", "0.6697801", "0.6695801", "0.6689893", "0.6680752", "0.66802895", "0.66802895", "0.66802895", "0.66547817", "0.66495687", "0.6633999", "0.6619567", "0.6619567", "0.66156983", "0.66049474", "0.6590706", "0.6590706", "0.6590206", "0.6587873", "0.65861845", "0.65822417", "0.65794736", "0.65792733", "0.657747", "0.6571183", "0.65662557", "0.65637356", "0.6539919", "0.65396816", "0.65283066", "0.65252614", "0.6513477", "0.65098846", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.65077883", "0.6507418", "0.6505772", "0.65015876", "0.64951885", "0.64951885", "0.64951885", "0.64857763", "0.6474329", "0.6469453", "0.64684683", "0.6453606", "0.6453024", "0.6453024", "0.6430734", "0.6429058", "0.6426903", "0.64215595", "0.64201874", "0.6417152", "0.6414739", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.6411571", "0.64035517" ]
0.0
-1
Returns the string representation of the model
def to_str(self): import simplejson as json if six.PY2: import sys reload(sys) sys.setdefaultencoding("utf-8") return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n model_fields = get_model_fields(\n opts.model,\n foreign=False,\n m2m=False,\n exclude=self.exclude_from_str\n )\n # TODO: replace the above with the below to remove the get_model_fields call:\n # model_fields = [\n # f for f in opts.get_fields()\n # if f.concrete\n # and not (f.primary_key or f.is_relation or f.name in self.exclude_from_str)\n # ]\n result = \" \".join(\n [\n str(fld.value_from_object(self))\n for fld in model_fields\n if fld.value_from_object(self)\n ]\n )\n return result.strip() or super().__str__()", "def __str__(self):\n return '%s%s' % (self.name, ' - %s' % self.model if self.model else '')", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __str__(self):\n model = self._meta.verbose_name.title()\n return f\"{model:s}: {self.name:s}\"", "def __repr__(self):\n\n mod = f\"{self.__class__.__name__} Model\"\n try:\n mod += f': {self.filename}'\n except AttributeError:\n pass\n s = [mod]\n for name, v in self.metadata.items():\n s += [f\"{name:16} : {v}\"]\n return '\\n'.join(s)", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def __str__(self):\n return str(self.serialize())", "def __str__ (self) :\n\n return self.as_string()", "def __str__(self):\n\n return self.toString()", "def __str__(self):\n msg = [\n f'{self.model=}',\n f'{self.field=}',\n f'{self.fxx=}',\n f'{self.date=}',\n f'{self.priority=}',\n ]\n return '\\n'.join(msg)", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.extended_object.get_title()\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return grid_search_to_str(self.model)", "def __str__(self):\n return self.toString()", "def __str__(self):\n return str(self.__dict__)", "def __str__(self):\n return str(self.__dict__)", "def to_representation(self) -> str:\n raise NotImplementedError()", "def __str__(self):\n return str(self.obj)", "def __str__(self):\n return self.make_flat()", "def dump_model(self):", "def __str__(self):\n return str(self.__dict__['_obj'])", "def __str__(self) -> str:\n model_str = [\"\\nModel info:\\n\", \" Unimodal encoder:\\n\"]\n\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_encoder[modality]}\")\n\n model_str.append(\"\\n\\n Unimodal decoder:\\n\")\n for modality in range(self.num_modalities):\n model_str.append(f\" ({modality + 1}) {self.unimodal_decoder[modality]}\")\n\n if self.multimodal_decoder is not None:\n model_str.append(\"\\n\\n Multimodal decoder:\\n\")\n model_str.append(f\" {self.multimodal_decoder}\")\n\n return \"\".join(model_str)", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __repr__(self):\n return '<ModelSignature(model_name=%r)>' % self.model_name", "def __str__(self):\n return str(self.get_data())", "def __str__(self):\n return f\"model {self._name}\"", "def __str__(self):\n\n return self.raw_field", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def serialize(self):\n\n\t\treturn str(self)", "def __str__(self):\n return self.get_str()", "def serialize(self):\n\n return str(self)", "def __str__(self) -> str:\n if self.name_field:\n return str(getattr(self, self.name_field))\n # noinspection PyUnresolvedReferences\n data = [\n # Collect the string representations of related objects.\n # getattr(self, fk_field.attname) and\n # fk_field.value_from_object(self) would only return the primary\n # key of the related object.\n str(getattr(self, fk_field.name))\n for fk_field in get_model_fields(\n self._meta.model, base=False, foreign=True, m2m=False\n )\n if not fk_field.null\n ]\n if len(data) < 2:\n # Cannot build a more meaningful representation than the default.\n return super().__str__()\n else:\n template = \"{}\" + \" ({})\" * (len(data) - 1)\n return template.format(*data)", "def __str__(self):\n return self.s", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def dumps(self, indent=0):\n outstr = \" \"*indent + \"MewloDbModel object '{0}' attribute values:\\n\".format(self.__class__.__name__)\n public_props = (name for name in dir(object) if not name.startswith('_'))\n for name in public_props:\n outstr += \" \"*indent + \"{0}: {1}\\n\".format(name, str(getattr(self,name)))\n return outstr", "def __str__(self):\n model = self._meta.verbose_name.title()\n title = self.title or str(_(\"Empty title\"))\n\n return f\"{model:s}: {title:s}\"", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()", "def __repr__(self):\n return self.to_str()" ]
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442643", "0.74416703", "0.7433768", "0.7411771", "0.7405439", "0.7379557", "0.7361716", "0.7361716", "0.732774", "0.7325511", "0.732528", "0.73097324", "0.73078936", "0.73001266", "0.7296789", "0.7292791", "0.7289445", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7287187", "0.7279803", "0.7261615", "0.7250399", "0.7244789", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068", "0.7223068" ]
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): if not isinstance(other, PcrTestRecordResult): return False return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if isinstance(self, other.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False", "def __eq__(self, other):\r\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n # Ensure same class and values match\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n else:\n return False", "def is_equal(self, a, b):\n return a is b", "def is_equal(self, a, b):\n return a == b", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\n return self is other", "def __eq__(self, other):\r\n if isinstance(other, self.__class__):\r\n return self.__dict__ == other.__dict__\r\n else:\r\n return False", "def is_equal(o1: object, o2: object) -> bool:\n if o1 is None and o2 is None:\n return True\n if o1 is None:\n return False\n return o1 == o2", "def __eq__(self,other):\n return self is other", "def is_equal(self, a, b):\n return a.X[0] == b.X[0]", "def __eq__(self, other):\n return type(self) == type(other) and self.id == other.id", "def __eq__(self, other) -> bool:\n if json.dumps(self.data,sort_keys=True) == json.dumps(other.data,sort_keys=True):\n return True\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Single2HaObject):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.__dict__ == other", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__hash__() == other.__hash__()\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n if self.primary != other.primary:\n return False\n return True", "def __eq__(self, other) -> bool:\n if other is None:\n return False\n return self.__hash__() == other.__hash__()", "def __eq__(self, other):\n if not isinstance(other, ObjectInfo):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: _TT, other: object) -> bool:\n return self.eq(other) # type: ignore", "def __eq__(self, other):\n return id(self) == id(other)", "def __eq__(self, other) -> bool:\n return type(self) == type(other) and \\\n self._id == other.id and \\\n self.code == other.code and \\\n self.name == other.name and \\\n self.gender == other.gender and \\\n self.date_of_birth == other.date_of_birth", "def equals(self, other): # -> bool:\n ...", "def equals(self, obj: object) -> bool:\n ...", "def __eq__(self, other):\n for attr in self._attrs_to_save:\n try:\n if getattr(self, attr) != getattr(other, attr):\n return False\n except AttributeError:\n return False\n return True", "def __eq__(self, other):\n if type(other) is type(self):\n return (self.x == other.x and self.y == other.y and self.z == other.z)\n return False", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.x == other.x and self.y == other.y\n return False", "def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return NotImplemented\n\n return (\n self.name,\n self.submit_at,\n self.subreddit,\n self.title,\n self.body_template,\n ) == (\n other.name,\n other.submit_at,\n other.subreddit,\n other.title,\n other.body_template,\n )", "def __eq__(self, other):\n # Check that we share the same class as this object\n if not isinstance(other, type(self)):\n return False\n\n return hash(self) == hash(other)", "def __eq__(self, other):\n if not isinstance(other, PreviewObjectAutofill):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return equal(self, other)", "def __eq__(self, other: Any) -> bool:\n return self.__class__ is other.__class__ and self.identifier == other.identifier", "def __eq__(self, other):\n return self.__id == other.get_id()", "def __eq__ (self, other):\n if type(self) == type(other):\n return self._m == other._m\n else:\n return False", "def __eq__(self, other):\n if not isinstance(other, Referent):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return self.properties == other.properties", "def __eq__(self, other):\n return self.items() == other.items()", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return hash(self) == hash(other)", "def __eq__(self, other):\n return self.x == other.x and self.y == other.y", "def __eq__(self, other):\n\n if self is other:\n return True\n return hash(self) == hash(other)", "def __eq__(self, other):\n if other._field1 == self._field1:\n return True\n return False", "def same_as(self, other):\n return super().__eq__(other)", "def __eq__(self, other):\n try:\n return other and \\\n self.id == other.id\n\n except AttributeError:\n return False", "def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()", "def __eq__(self, other):\n if not isinstance(other, Fiddle):\n return False\n\n return self.__dict__ == other.__dict__" ]
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.79670393", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068274", "0.7796298", "0.7794721", "0.7784825", "0.77790844", "0.7769397", "0.77534705", "0.7746211", "0.7741107", "0.77282816", "0.7725766", "0.7719537", "0.770273", "0.7685999", "0.7677552", "0.76739407", "0.7664857", "0.76557016", "0.7655046", "0.76282835", "0.7625795", "0.76242626", "0.76237214", "0.76237214", "0.76237214", "0.7617347", "0.7600536", "0.7599156", "0.7595863", "0.75945824", "0.7594092", "0.75899327" ]
0.0
-1
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n return not self.__ne__(other)", "def __ne__(self, other):\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def __ne__ (self, other):\n return not self == other" ]
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
Set the angle of the servo motor to input angle in degrees
def set_angle(self, angle): new_angle = angle # Declaring conversion constants angle_min = 0 angle_max = 180 angle_range = angle_max - angle_min dc_range = self._dc_max - self._dc_min # Enforcing angle range if new_angle > angle_max: new_angle = angle_max elif new_angle < angle_min: new_angle = angle_min # Scaling input angle to an appropriate duty cycle duty_cycle = ((dc_range / angle_range) * (new_angle - angle_min)) + self._dc_min self._servo_pwm.changeDutyCycle(duty_cycle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_angle(self, value):\n if not -90 <= value <= 90:\n raise ValueError('Servo angle must be between -90 and 90 degrees')\n self.duty_cycle = ...", "def set_angle(self, ang):\n if ang < 0:\n ang = 0\n elif ang > 180:\n ang = 180\n dutyCycle = 5 + (ang*5/180)\n self.servoPort.ChangeDutyCycle(dutyCycle)", "def set_angle(self, req_angle):\n self._current_angle = req_angle\n req_angle_pulse = (self._pulse_max - self._pulse_min) / (self._angle_max - self._angle_min) * (\n req_angle - self._angle_max) + self._pulse_max\n self.pwm.set_pwm(SERVO_CHANEL, 0, int(round(req_angle_pulse)))", "def wheel_angle(self, angle):\n self.angle = angle", "def set_servo_angle(self, servo_number, angle):\n cmd = protocol.SET_ANGLE.format(str(servo_number), str(angle))\n response = self.__send_and_receive(cmd)\n if response.startswith(protocol.OK.lower()):\n return True\n else:\n return False", "def angle(self, to_angle):\n\n # Restrict to -90..+90 degrees\n to_angle = int(min(max(to_angle, -90), 90))\n\n ratio = (to_angle + 90) / 180.0\n pulse_range = self.pulse_left_ns - self.pulse_right_ns\n pulse = self.pulse_left_ns - round(ratio * pulse_range)\n\n self.pi.set_servo_pulsewidth(self.gpio, pulse)", "def setAngle(channel, angle, delta=170, min_delay=0.02):\n delay = max(delta * 0.003, min_delay)\n zero_pulse = (servoMin + servoMax) / 2 # half-way == 0 degrees\n pulse_width = zero_pulse - servoMin \n pulse = zero_pulse + (pulse_width * angle / 80)\n pwm.setPWM(channel, 0, int(pulse))\n time.sleep(delay)", "def setAngle(self,angle = 2.5):\n pass", "def setAngle(self, angle):\n self.vector.angle = angle", "def setAngle(self, angle):\n self._angle = (angle + math.pi / 2) % math.pi - math.pi / 2\n # self._angle = angle % (2*math.pi)", "def adjustAngle(self, angle):\n\t\tif self.timeout <= 0:\n\t\t\tself.angle = (self.angle + angle) % 360", "def set_angle(self, angle=0.0):\n self.angle = angle", "def angle(self, value):\n if value is None:\n value = 0.0\n\n self.__angle = value", "def _joint_angle_control(self):\n\n error = self.target_pos - self.robot_arm_pos\n return self._pd_control(error) + self.torque", "def set_angle(self, angle):\n return self.bot_client.send_command(_Command.SetAngle, angle)", "def set_param_motor():\n servo.setSpeed(0, 0) # max = 255\n servo.setAccel(0, 0)\n servo.setSpeed(1, 150) # max = 255\n servo.setAccel(1, 150)", "def angle(self) -> float:\n ...", "def set_servo_angle(self, channel: int, angle: float):\n if channel < 0 or channel > 15:\n raise ValueError('Channel must be between 0 and 15')\n\n if channel not in self._servos:\n raise KeyError('There is no servo registered on channel %d' % channel)\n \n servo = self._servos[channel]\n servo.set_angle(angle)", "def angle(self, angle: int, time: int = 0, /) -> None:", "def change_angle(self, new_angle):\r\n self.angle = new_angle", "def rotate(self, value):\n self.pi.set_servo_pulsewidth(self.steering_pin, self.convert_radians_to_PW(value))", "def set_angle(self, angle_key: Union[EKT, str], v: float): # -> None:\n ...", "def setAngle(self, value):\n n, a = Vector.polar(self.components)\n self.components = Vector.cartesian([n, value])", "def on_cam_base_set_angle_btn_clicked(self):\n pitch = self.cam_base_pitch_hSlider.value()\n yaw = self.cam_base_yaw_hSlider.value()\n pitch, yaw = self.control1.device.set_init_basecam_angle(pitch, yaw)\n status = \"set INIT angles as, pitch: \" + str(pitch) + \", yaw: \" + str(yaw)\n self.cam_set_status_txt(status)", "def setAngle(self,a):\n self.angle = a\n if self.drawn == True:\n self.draw()", "def angle(self, angle):\n\n self._angle = angle", "def convert_angle(self, event):\n try:\n #Compare other unit to one unit(degree)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"arcminute\": 0.016667, \"arcsecond\": 0.000278, \"circle\": 360, \"degree\": 1.0, \"gon\": 0.9, \"gradian\": 0.9, \"mil(Nato)\": 0.05625, \"mil(Soviet Union)\": 0.06, \"mil(Sweden)\": 0.057143, \"octant\": 45.0, \"quadrant\": 90.0, \"radian\": 57.29578, \"revolution\": 360.0, \"sextant\": 60.0, \"sign\": 30.0, \"turn\": 360.0}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def change_angle(self, up_or_down):\n self.angle += up_or_down * math.pi / 180", "def go(self, position):\n if self._is_on:\n val = min(180.0, position)\n val = max(0.0, position)\n val = (val / 180.0) * (self._max_duty - self._min_duty) + self._min_duty\n val = val * 100.0\n self._pwms.set_duty(self._pin_index, val)\n else:\n raise Exception(\"You must turn the servo on by calling the `on()` method before you can tell the servo to `go()`!\")", "def go_to_angle(user_theta):\n global rate\n theta_new = user_theta - theta\n if theta_new > 0:\n # Left\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = 0.4\n pub.publish(speed)\n rate.sleep()\n else:\n # Take a Right\n while abs(user_theta - theta) > 0.05:\n speed.linear.x = 0\n speed.angular.z = - 0.4\n pub.publish(speed)\n rate.sleep()\n speed.linear.x = 0\n speed.angular.z = 0\n pub.publish(speed)", "def setArticulateAngle(self, angle):\n self.articulatePID.setSetpoint(angle)", "def set_angle(self, node_uuid, index, data, pin=None):\n self._bus.i2c_acquire()\n try:\n if pin is None:\n pin = self.values['num'].data\n angle, angle_min, angle_max = data.split('|')\n value = self.translate(float(angle), float(angle_min), float(angle_max))\n logger.debug('[%s] - set_angle on pin %s to %s', self.__class__.__name__, pin, angle)\n self._bus.pca9685_manager.set_pwm(pin, 0, value)\n self.values['angle']._data = data\n except Exception:\n logger.exception('[%s] - Exception when set_angle', self.__class__.__name__)\n finally:\n self._bus.i2c_release()", "def set_wrist(self, angle):\n return self.set_servo_angle(protocol.SERVO_HAND, angle)", "def angle(self, angle):\n self._angle = angle\n self.x_rate = self._rate * cos(self._angle)\n self.y_rate = self._rate * sin(self._angle)", "def set_angle(self, value):\n scene = self.scenes[self.current_scene]\n scene.set_perspective(angle=value)\n angle, ratio, near, far = scene.perspective()\n self.redraw()", "def angle(self):\n return 0", "def angle(self):\n self.convert_window(\"Angle\", \"degree\", [\"arcminute\", \"arcsecond\", \"circle\", \"degree\", \"gon\", \"gradian\", \"mil(Nato)\", \"mil(Soviet Union)\", \"mil(Sweden)\", \"octant\", \"quadrant\", \"radian\", \"revolution\", \"sextant\", \"sign\", \"turn\"])", "def servo_rotate(self,position):\n \n if not(isinstance(position, int)):\n print(\"la position passée doit être un int, ca n'est pas le cas\")\n elif (position < 0) or (position > 180):\n print(\"la position passée doit être entre 0 et 180 degrès, ca n'est pas le cas\", e)\n else:\n print(\"Le robot a maintenant une position de \" + str(position))", "def settle(self):\n if (self.angle >= self.max_angle) or (\n self.angle <= -self.max_angle\n ): # time to reverse\n print(\"reverse\", self.angle, self.max_angle)\n self.speed *= -0.9 # damped\n self.max_angle *= 0.9\n if self.speed > 0:\n self.angle = self.max_angle\n else:\n self.angle = -self.max_angle\n\n self.angle += radians(self.speed)\n print(self.angle, self.max_angle, self.speed)\n self.x = self.cx + self.length * sin(self.angle)\n self.y = self.cy + self.length * cos(self.angle)", "def do_azangle(self):\n angle_1, angle_2 = cbp.potentiometer.main()\n current_angle = angle_2\n #print(current_angle)\n self.azangle = current_angle\n return current_angle", "def calibrer(self):\n self._angle_courant = self._angle_initial\n self.angle(self._angle_initial)", "def my_turn_in_place(robot, angle, speed):\n\n t = (1/speed) * numpy.abs(angle)\n\n circum = 2 * math.pi * get_distance_between_wheels()\n arc_length = (numpy.abs(angle)/360) * circum\n mm_speed = arc_length / t\n mm_speed = mm_speed if angle>0 else -mm_speed\n\n robot.drive_wheels(-mm_speed, mm_speed, duration=t)", "def angle(self, angle_deg) -> None:\n ...", "def move(self, theta, phi):\n print('Writing the theta angle: {}'.format(str(theta)))\n self.servo.write(theta)\n #print(\"Serial Port: \" + str(self.servo.read()))\n print('Writing the theta angle: {}'.format(str(phi)))\n self.servo.write(phi)\n #print(\"Serial Port: \" + str(self.servo.read()))", "def angle(self) -> int:", "def move_to_angle(self, target_angle):\n\n self.motor.directly_goto_position(target_angle)\n self.current_angle = target_angle", "def turn_by(self, dangle, dt):\n # Don't turn too fast\n self.angle += np.clip(dangle, -dt * self.turning_rate, dt * self.turning_rate)\n\n # Keep angle in range [-pi, pi)\n self.angle = normalize_angle(self.angle)", "def detector_angle(self, angle):\n self.rotate_rad(-radians(angle))", "def turn_to(self, angle, dt):\n a = normalize_angle(angle - self.angle)\n self.turn_by(a, dt)", "def srotate(self, angle):\n\n self.angle = self.angle + angle", "def settiltangle(self, angle):\n tilt = -angle * self._degreesPerAU * self._angleOrient\n tilt = (tilt * math.pi / 180.0) % (2*math.pi)\n self.pen(resizemode=\"user\", tilt=tilt)", "def change_angle(self, new_angle):\n if type(new_angle) not in [int, float]:\n raise ValueError('angle must be int or float.')\n self.__angle = new_angle", "def update_angle(self, mouse):\n offset = (mouse[1]-self.player.rect.centery, mouse[0]-self.player.rect.centerx)\n self.angle = degrees(atan2(*offset))\n print(\"angle:\", self.angle)", "def move_to_angle(self, target_angle, divide_count, delay):\n\n self.motor.softly_goto_position(target_angle, divide_count, delay)\n self.current_angle = target_angle", "def get_servo_angle(self, servo_id=None, is_radian=None, is_real=False):\r\n return self._arm.get_servo_angle(servo_id=servo_id, is_radian=is_radian, is_real=is_real)", "def correctAngle(self):\n\n self.linearVector = Vector3(x=0.2, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=self.angleError)", "def angle(self, dangle_deg: float) -> None:\n ...", "def turn_angle(self, angle, speed=1.0):\n mt_buf = bytearray()\n error = random.normalvariate(0.5, self.standard_deviation)\n\n res, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'GetRobotAngle', [], [], [], mt_buf,\n BLOCKING_MODE)\n\n start_angle = ret_floats[0] + error\n delta = 0\n\n # вызов скрипта поворота\n vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'Turn', [],\n [speed], [], mt_buf,\n BLOCKING_MODE)\n\n while delta <= angle:\n res, ret_ints, ret_floats, ret_strings, ret_buffer = vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'GetRobotAngle', [], [], [], mt_buf,\n BLOCKING_MODE)\n\n current_angle = ret_floats[0] + error\n delta += math.fabs(current_angle - start_angle)\n start_angle = current_angle\n\n vrep.simxCallScriptFunction(\n self.client_id,\n 'youBot_ref' + self.postfix,\n vrep.sim_scripttype_childscript,\n 'Turn', [], [0.0], [], mt_buf,\n BLOCKING_MODE)", "def teleopPeriodic(self):\n\n turningValue = (self.angleSetpoint - self.gyro.getAngle()) * self.pGain\n if self.joystick.getY() <= 0:\n # forwards\n self.myRobot.arcadeDrive(self.joystick.getY(), turningValue)\n elif self.joystick.getY() > 0:\n # backwards\n self.myRobot.arcadeDrive(self.joystick.getY(), -turningValue)", "def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1", "def rotate_rad(self, angle):\n self.beam_angle += angle\n self.xy = rotate(self.xy, angle)\n self.angle += angle", "def normalize_angle(self, angle):\n while angle > np.pi:\n angle -= 2.0 * np.pi\n\n while angle < -np.pi:\n angle += 2.0 * np.pi\n\n return angle", "def adjAngle(self, amt): \r\n\r\n self.angle = self.angle + radians(amt)\r\n self.redraw()", "def degPerRotChanged(self, val):\n self.degreesPerRotation = val", "def transmission(self, angle):\n if isnan(angle):\n return nan\n\n if angle == self.motor_max:\n return pow(10, -self.OD_max)\n if angle == self.motor_min:\n return pow(10, -self.OD_min)\n\n p_min = self.motor_range[0]\n p_max = self.motor_range[1]\n # Assume the transmission is flat outside the angular range\n if angle < min(p_min, p_max):\n angle = min(p_min, p_max)\n if angle > max(p_min, p_max):\n angle = max(p_min, p_max)\n # Inside the angular range, assume that there is a linear gradient of OD \n OD = self.OD_range[0] + (angle - p_min) / (p_max - p_min) * self.OD_range[1]\n transmission = pow(10, -OD)\n return transmission", "def platform_auto_calibrate_azimuth_servo(self):\n self._platform_auto_calibrate_check()\n self.platform.auto_calibrate_azimuth_servo()", "def turn(self, direction, map_direction, gyro_angle):\n\n initial_angle = gyro_angle.value\n start_frequency = 150\n max_frequency = 300\n add = 0\n\n # Change the wheel spinning direction to spin in place\n direction_pin = \"DirectionMotor\" + str(direction)\n GPIO.output(pins[direction_pin], not GPIO.input(pins[direction_pin]))\n\n self.motor_right.ChangeFrequency(start_frequency)\n self.motor_left.ChangeFrequency(start_frequency)\n\n self.motor_left.start(50.0)\n self.motor_right.start(50.0)\n\n print(\"Initial angle: \" + str(initial_angle))\n\n while int(round(gyro_angle.value)) not in map_direction:\n # print(\"Angle: %.2f\" % gyro_angle.value)\n\n if start_frequency + add < max_frequency:\n add += 1\n self.motor_right.ChangeFrequency(start_frequency + add)\n self.motor_left.ChangeFrequency(start_frequency + add)\n sleep(0.005)\n\n self.motor_left.stop()\n self.motor_right.stop()\n\n print(\"End angle: \" + str(gyro_angle.value))\n\n # change the motor back to the original direction\n GPIO.output(pins[direction_pin], not GPIO.input(pins[direction_pin]))", "def adjust_susan(self, angle):\n ms = angle*78.8644\n ms = 5* round(ms/5, 0)\n self.susan_current_pos = int(self.susan_current_pos+ms)\n print self.susan_current_pos\n wiringpi.pwmWrite(18, self.susan_current_pos)\n time.sleep(10)", "def adjAngle(self, amt):\n \n self.angle = self.angle+radians(amt)\n self.redraw()", "def setAzimuthAngle(self, angle):\n angle = int(round(angle))\n if angle != self._azimuth:\n self._azimuth = angle\n self._updateLight()\n self.sigAzimuthAngleChanged.emit()", "def rotate(self, angle):\n n, a = Vector.polar([self.x, self.y])\n a += angle\n self.x = n * cos(a)\n self.y = n * sin(a)", "def normalize_angle(self, angle):\n angle = math.fmod(angle, 2 * math.pi)\n\n if (angle > math.pi):\n angle = angle - 2 * math.pi\n elif (angle < -math.pi):\n angle = angle + 2 * math.pi\n return angle", "def rotate_to(self, angle, degrees = False):\n\t\ttarget = angle * pi / 180 if degrees else angle\n\n\t\tcurr = self.angle\n\t\tdiff = (target - curr) % (2*pi)\n\t\tif abs(diff - (2*pi)) < diff:\n\t\t\tdiff = diff - (2*pi)\n\t\tself.rotate_by(diff)", "def input_angle(self, reqAngle):\n # clear flags when new data arrives\n if self.finalAngle != reqAngle:\n self.flag1 = False\n self.flag2 = False\n\n if reqAngle < self.currentAngle and not self.flag2:\n #print '[1] Requested angle:',reqAngle,' - Current angle:',self.currentAngle\n self.currentAngle -= self.speed\n self.flag1 = True\n\n elif reqAngle > self.currentAngle and not self.flag1:\n #print '[2] Requested angle:',reqAngle,' - Current angle:',self.currentAngle\n self.currentAngle += self.speed\n self.flag2 = True\n\n self.finalAngle = reqAngle\n self._rotate(self.currentAngle)", "def rotate_servo_rel(pi, pin, pct):\n try:\n pw_old = pi.get_servo_pulsewidth(pin)\n except:\n pw_old = 0 # no PWM has been set yet, so assume 0 \n pct_old = pulsewidth2pct(pw_old)\n if pct_old == -25: # no PWM output commanded, go to center first to get a reference point\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(50))\n pct_old = pulsewidth2pct(pi.get_servo_pulsewidth(pin))\n pct_cmd = pct_old + pct\n # saturate input to protect servo \n if pct_cmd < 10:\n pct_cmd = 10\n elif pct_cmd > 90:\n pct_cmd = 90\n pi.set_servo_pulsewidth(pin, pct2pulsewidth(pct_cmd))", "def get_angle(self, angle_):\n return self.two_pi * angle_", "def servo_force(self, *args, **kwargs) -> Any:\n pass", "def servo_config(self, pin, min_pulse=544, max_pulse=2400, angle=0):\n if pin > len(self.digital) or self.digital[pin].mode == UNAVAILABLE:\n raise IOError(\"Pin %s is not a valid servo pin\")\n data = itertools.chain([pin], to_two_bytes(min_pulse),\n to_two_bytes(max_pulse))\n self.send_sysex(SERVO_CONFIG, data)\n \n # set pin._mode to SERVO so that it sends analog messages\n # don't set pin.mode as that calls this method\n self.digital[pin]._mode = SERVO\n self.digital[pin].write(angle)", "def steps_to_angle():\n pass", "def change_angle_by(self, delta_angle, divide_count, delay, direction):\n\n target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)\n\n self.move_to_angle(target_angle, divide_count, delay)\n self.current_angle = target_angle", "def change_angle_by(self, delta_angle, direction):\n target_angle = round(self.__calc_target_angle(degree_to_radian(delta_angle), direction), 5)\n\n self.move_to_angle(target_angle)\n self.current_angle = target_angle", "def configure_servo(self, board):\n self.servo = board.get_pin(f\"d:{self.pin}:p\")\n board.servo_config(\n pin = self.pin,\n min_pulse = 544,\n max_pulse = 2400,\n angle = 93\n )", "def angle(self) -> float:\n return self._angle", "def angle(self) -> float:\n return self._angle", "def angle(self):\n if self.__trigger == gyro_trigger_mode.GET_ANGLE_TRIGGER_READ:\n self.read_and_update_angle()\n return self.__angle", "def set_speed(self, ss, radians=False):\n if ss >= 1.0 and not radians:\n self._servo_speed = math.radians(ss)\n elif ss >= 0.016 and radians:\n self._servo_speed = ss\n return self._servo_speed", "def input_angle(self, reqAngle):\n # clear flags when new data arrives\n if self.finalAngle != reqAngle:\n self.flag1 = False\n self.flag2 = False\n\n if reqAngle < self.currentAngle and not self.flag2:\n #print '[1] Requested angle:',reqAngle,' - Current angle:',self.currentAngle\n self.currentAngle -= self.speed\n self.flag1 = True\n\n elif reqAngle > self.currentAngle and not self.flag1:\n #print '[2] Requested angle:',reqAngle,' - Current angle:',self.currentAngle\n self.currentAngle += self.speed\n self.flag2 = True\n\n self.finalAngle = reqAngle\n print_value3(self.screen, self.currentAngle)\n\n # blit dial\n self.screen.blit(self.dial, (self.dialPos))\n # then rotate\n self._rotate_1000ft_needle(self.currentAngle)\n self._rotate_100ft_needle(self.currentAngle)", "def set_speed(self, om_left, om_right):\n analog_om_left = self.LEFT_CONST + om_left*4\n analog_om_right = self.RIGHT_CONST - om_right*4\n self.servoWriteMicroseconds(self.PIN_LEFT, analog_om_left)\n self.servoWriteMicroseconds(self.PIN_RIGHT, analog_om_right)", "def anticlockwise_rotate(self, speed):\n\t\tif self._last_dir != 'a': # \"a\" indicates that the last rotation of this wheel was anticlockwise.\n\t\t\tGPIO.output(self._dir_pin_1, GPIO.LOW)\n\t\t\tGPIO.output(self._dir_pin_2, GPIO.HIGH)\n\t\t\tself._last_dir = 'a'\n\n\t\tself._current_dc_val = speed\n\t\tif self._current_dc_val != self._last_dc_val:\n\t\t\tself._motor_pwm.ChangeDutyCycle(speed) # 0.0 - 100.0\n\t\t\tself._last_dc_val = self._current_dc_val", "def _rotate(self, angle):\n angle *= self._degreesPerAU\n self._orient = self._orient.rotate(angle)", "def on_cam_base_yaw_hSlider_valueChanged(self, value):\n self.cam_base_yaw_ledit.setText(str(50 + value))", "def move(self, dt):\n lims = self.settings['agent']['jointLimits']\n # print '[move] curr joint Angle:'\n # print self.jointAngle\n # print '[move] curr speed:'\n # print self.speed\n\n J = self.jointAngle + dt * np.array(self.speed)\n self.jointAngle[0] = min(max(J[0], lims[0][0]), lims[0][1])\n self.jointAngle[1] = min(max(J[1], lims[1][0]), lims[1][1])\n self.forward_kinematics()", "def clockwise_rotate(self, speed):\n\t\tif self._last_dir != 'c': # \"c\" indicates that the last rotation of this wheel was clockwise.\n\t\t\tGPIO.output(self._dir_pin_1, GPIO.HIGH)\n\t\t\tGPIO.output(self._dir_pin_2, GPIO.LOW)\n\t\t\tself._last_dir = 'c'\n\n\t\tself._current_dc_val = speed\n\t\tif self._current_dc_val != self._last_dc_val:\n\t\t\tself._motor_pwm.ChangeDutyCycle(speed) # 0.0 - 100.0\n\t\t\tself._last_dc_val = self._current_dc_val", "def rel_angle(self, angle):\n steps = int(angle / 360 * self.steps_per_rev)\n self.steps(steps)", "def set_actuator(self, action):\n deltav = action[0]\n vt = np.clip(self.vt + deltav, -self.maxV, self.maxV)\n self.vt = vt\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=0,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=vt)\n p.setJointMotorControl2(bodyUniqueId=self.botId,\n jointIndex=1,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=-vt)", "def read_and_update_angle(self):\n if self.is_disposed:\n raise ObjectDisposedException(\"AxisGyroscope\")\n\n self.__multiAxisGyro.read_gyro()\n angular_velocity = (((self.__value - self.__offset) / 40) * 40)\n if self.__factorSet:\n angular_velocity /= self.__degPerSecondFactor\n\n delta = self.__multiAxisGyro.time_delta\n self.__angle = (self.__angle + angular_velocity * delta / 1000)\n return angular_velocity", "def right(self, angle):\r\n self.dir += math.radians(angle)", "def do_altangle(self):\n nave = 10000\n x, y, z, angle = cbp.phidget.main(nave)\n current_angle = angle\n #print(current_angle)\n self.altangle = current_angle\n return current_angle", "def set_angel(self):\n self.angle = math.degrees(math.atan2(self.next.y - self.y, self.next.x - self.x)\n - math.atan2(self.prev.y - self.y, self.prev.x - self.x))\n\n if self.angle < 0:\n self.angle += 360", "def motorDirection(self, motorPin, direction):\n # print \"motorPin: \", motorPin\n # print \"direction: \", direction\n GPIO.output(motorPin, direction)" ]
[ "0.8180504", "0.78847355", "0.7495005", "0.733505", "0.7213522", "0.71889293", "0.71747357", "0.7051908", "0.7026474", "0.70241266", "0.69745034", "0.6967439", "0.6912899", "0.6871412", "0.6827222", "0.681534", "0.67669934", "0.674949", "0.6662368", "0.66219145", "0.6615597", "0.65893835", "0.65709454", "0.6533985", "0.65146327", "0.6499005", "0.64938873", "0.6442631", "0.64220995", "0.6419179", "0.6389302", "0.63837326", "0.63797176", "0.63101596", "0.628135", "0.62716734", "0.6211227", "0.6197511", "0.6196123", "0.6193385", "0.6179731", "0.6177166", "0.61762047", "0.6175382", "0.61529464", "0.61484843", "0.614", "0.61378235", "0.61285174", "0.61019874", "0.6072164", "0.60631764", "0.6037589", "0.6032297", "0.6030346", "0.60257447", "0.5990104", "0.59565157", "0.594801", "0.5946541", "0.594067", "0.5939792", "0.5936184", "0.5935491", "0.5931359", "0.5922977", "0.59187824", "0.59177595", "0.5903461", "0.5902055", "0.5900299", "0.58982146", "0.58950496", "0.5893433", "0.5891158", "0.58910877", "0.588897", "0.58793515", "0.58739704", "0.587186", "0.5861766", "0.5845043", "0.5843205", "0.5843205", "0.5843115", "0.582312", "0.58181787", "0.58174217", "0.5812661", "0.581118", "0.5808146", "0.5806723", "0.5804016", "0.5780978", "0.57689065", "0.5766031", "0.5764848", "0.57628644", "0.57607746", "0.5759947" ]
0.7820531
2
Will be called before every test
def setUp(self): # Create table db.create_all() #Create test registree mcdonalds = Store(name='mcdonalds', shop_address='63 Northbrook st', shop_postcode='rg14 1ae', takeaway=True) tesco = Store(name='tesco', shop_address='London rd, Newbury', shop_postcode='rg14 2bp', takeaway=False) coop = Store(name='coop', shop_address='Andover rd', shop_postcode='rg19 3bp', takeaway=False) #adding test receipts to db receipt1 = Receipts(most_expensive=5.09, cost_of_alcohol=0, receipt_total=11.36, takeaway=True, delivery_fee=1.99, delivery_time_mins=28, store_id=1, shop=mcdonalds) receipt2 = Receipts(most_expensive=2.80, cost_of_alcohol=16, receipt_total=11.90, store_id=2, shop=tesco) receipt3 = Receipts(most_expensive=3.00, cost_of_alcohol=0, receipt_total=18.76, store_id=2, shop=tesco) receipt4 = Receipts(most_expensive=2.00, cost_of_alcohol=0, receipt_total=20.91, store_id=2, shop=tesco) #Add and save to database store_list = [mcdonalds, tesco, coop] receipt_list = [receipt1, receipt2, receipt3, receipt4] for i in store_list: db.session.add(i) for i in receipt_list: db.session.add(i) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self) :\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\r\n pass", "def setUp(self):\n\n return", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n \n pass", "def setUp(self):\n\n BaseTest.setUp(self)", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\r\n pass # nothing required by all\r", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def setUp(self):\n setUp()", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setUp(self):\n pass #because we dont have anything to setup.", "def setUp(self) -> None:\n return super().setUp()", "def setUp(self):\n print('Calling \\'setUp\\'')", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def do_before(self):\r\n pass", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUpTestCase(self):\n pass", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def setUp(self):\n raise NotImplementedError", "def setUp(self):\n test_env_setup()", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n super(TestCase, self).setUp()\n self._context = CallContext()", "def setUp(self):\n self.Reinitialize()" ]
[ "0.8457524", "0.83148706", "0.82529396", "0.82529396", "0.81978506", "0.81978506", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.81887835", "0.8182578", "0.8182239", "0.81814253", "0.81651247", "0.8163294", "0.81149805", "0.81015486", "0.80990344", "0.80671906", "0.80671906", "0.80671906", "0.80671906", "0.80671906", "0.80671906", "0.80671906", "0.80671906", "0.80671906", "0.80523765", "0.80339086", "0.80339086", "0.8031016", "0.8022039", "0.8021113", "0.7933479", "0.78991604", "0.789816", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.7895648", "0.78877145", "0.783448", "0.7788758", "0.7768576", "0.7768576", "0.7768576", "0.7768576", "0.7768576", "0.7768576", "0.775879", "0.7748891", "0.77073926", "0.7672057", "0.76657", "0.76657", "0.7662876", "0.7652488" ]
0.0
-1
Will be called after every test
def teardown(self): db.session.remove() db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def after_test(self, test_results):\n pass", "def after_all(self) -> None:", "def tearDown(self):\n pass", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\n\t\tpass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass" ]
[ "0.84368247", "0.8336089", "0.82743084", "0.8255529", "0.8255529", "0.8255529", "0.8220987", "0.8220987", "0.8220987", "0.8220987", "0.8220987", "0.8202792", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264", "0.81971264" ]
0.0
-1
Prompt user for input and continue to do so until input is valid. This function takes two required inputs, the message to display, and the limit of characters required. If the user enters something too long, they are prompted again until the input is correct. If the optional isNumber parameter is True, then it will also continue to prompt the user until a valid number is input.
def LimitedInput(message, limit, isNumber=False): keepAsking = True while keepAsking: answer = input(message) if len(answer) > limit: print("The input must be", limit, "characters or less.") else: keepAsking = False if isNumber is True and CheckNumber(answer) is False: print("The input must be a number.") keepAsking = True return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number", "def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue", "def get_number_input(msg=\"Provide a number: \", num_type=int):\n while True:\n try:\n num = num_type(input(msg))\n except ValueError:\n print(f\"Whoops!! Please enter a correct number of {num_type}!!\")\n continue\n else:\n print(\"Number accepted!!\")\n return num", "def getSecretMessage(limit):\n\n\tsecret = None\n\twhile secret == None or len(secret) not in range(1, limit+1):\n\t\tsecret = raw_input(\"Enter the secret message (Max length %d): \" % limit)\n\t\tif len(secret) > limit:\n\t\t\tprint \"Invalid message: too long!\"\n\t\telif len(secret) < 1:\n\t\t\tprint \"Invalid message: empty input!\"\n\n\treturn secret", "def pedir_entero(msg, min, max):\n while True:\n n = str(raw_input(msg))\n if not n.isdigit() :\n show_msg(\"Oops! Parece que eso no era un numero entero\")\n continue\n n = int(n)\n if n <= max and n >= min :\n return n\n else:\n show_msg(\"Numero fuera de rango\")\n continue", "def prompt_with_limits(prompt, default=None, low_limit=None, high_limit=None):\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n try:\n v = float(value)\n if (low_limit is not None and v < low_limit) or \\\n (high_limit is not None and v > high_limit):\n value = None\n except (ValueError, TypeError):\n value = None\n elif default is not None:\n value = default\n\n return value", "def PickNumber(lenList, message = ' To select the correct option pick a number in range ',min = 1, typeInput = int):\n while True:\n try:\n input1 = typeInput(input('\\n'+message+str(min)+'-'+str(lenList)+': \\t'))\n except ValueError:\n print( 'That\\'s not a number!')\n else:\n if min <= input1 <= lenList:\n return input1\n else:\n print( 'Number out of range. Try again!')", "def get_user_text() -> str:\n validinput = False\n while not validinput:\n intext = input(\"Which of your most favorite quotes can Polly cook up for you?\")\n if len(intext) > POLLY_CHAR_LIMIT:\n print(\"You have entered in more text that Polly can support in one call.\")\n validinput = False\n else:\n validinput = True\n return intext", "def enterInteger(CustomMessage=\"Please enter an integer: \",\r\n CustomErrorMessage=\"The input is not an integer, please try again...\",\r\n min=None, max=None):\r\n \r\n isInteger = False\r\n while not isInteger:\r\n try:\r\n number = int(input(CustomMessage))\r\n isInteger = True\r\n except ValueError:\r\n print(CustomErrorMessage)\r\n\r\n # range parameter\r\n if type(min) is int and type(max) is int:\r\n if min > max:\r\n raise ValueError(\"parameter 'min' is larger than 'max'\")\r\n else:\r\n while min > number or number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number within \"+str(min)+\" to \"+str(max)+\": \")\r\n elif type(min) is int:\r\n while min > number:\r\n number = enterInteger(CustomMessage=\"Please input a number larger than \" + str(min) + \": \")\r\n elif type(max) is int:\r\n while number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number smaller than \" + str(max) + \": \")\r\n\r\n return number", "def validate(prompt, char_type, case):\n if char_type == 'A' and case == \"U\":\n while True:\n user_input = input(prompt).upper()\n try:\n if len(user_input) > 245:\n print(f'\\n.............\\n'\n f'Invalid input you entered {len(user_input)} characters\\n'\n f'Character limit is 245.\\n')\n elif user_input.replace(\" \", \"\").isalpha():\n return user_input\n print(\"\\n.............\\n\"\n \"Invalid input, non letter character.\\n\")\n except (ValueError, TypeError):\n print(\"\\n.............\\n\"\n \"Invalid input, non letter character.\\n\")\n elif char_type == 'I':\n while True:\n user_input = input(prompt)\n try:\n if 26 > int(user_input) > 0:\n return int(user_input)\n print(\"\\n.............\\n\"\n \"Invalid input, outside range of 1-25.\\n\")\n except (ValueError, TypeError):\n print(\"\\n.............\\n\"\n \"Invalid input, not a number.\\n\")", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length", "def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass", "def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))", "def prompt_user():\n print()\n while True:\n print('Please choose one of the following options:')\n print(\"1: Send a Thank You\")\n print(\"2: Create a report\")\n print(\"3: Send letters to everyone\")\n print(\"4: Match donations\")\n print(\"5: Quit\")\n try:\n return int(input(\"Option: \"))\n except ValueError as e:\n print(\"***INVALID Option Selected***\")", "def get_employee_input_int(message):\n while True:\n user_input = input('{}: '.format(message))\n\n # Type validation\n try:\n number = int(user_input)\n break\n except ValueError:\n print('You must enter a whole number.')\n continue\n\n #Range Validation\n # if valid_range and number not in valid_range:\n # _min = min(valid_range)\n # _max = max(valid_range)\n # print('You must enter a number from {} to {}.'.format(_min, _max))\n # continue\n return number", "def confirm():\n end_loop = False\n while not end_loop:\n confirmation = input(\"\"\"Would you like to continue with your choice?\n[1] No [2] Yes\nEnter a number please: \"\"\")\n if not confirmation or confirmation.isspace():\n print(\"You have not entered anything!\")\n try_again()\n elif confirmation.isnumeric() == True:\n if 0 < int(confirmation) < 3:\n if int(confirmation) == 1:\n confirmation = False\n return confirmation\n else:\n confirmation = True\n return confirmation\n end_loop = True\n else:\n print(\"You have not entered a valid number. Please enter a number between 1 and 2.\")\n else:\n print(\"Please enter a number only.\")\n try_again()", "def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)", "def clean_input(prompt='Error'): # A special input function that will reject a\r\n # user's input of text when a number is requested -- if no prompt is\r\n # specified in the program, it will display \"Error\"\r\n text = True\r\n phrase = '0'\r\n while text:\r\n phrase = input(prompt + '\\n')\r\n try: # Adapted from an example in the ThinkPython textbook (15.7) -\r\n # Checks whether the input is a number, positive or negative. If\r\n # not, rejects the input and user gets to try again\r\n float(phrase)\r\n text = False\r\n except ValueError:\r\n print(\"Error: Non-Numeric Entry Detected\")\r\n # if phrase.isnumeric(): # Checks for a positive number (negative\r\n # rejected as well as text) - replaced with superior form from textbook\r\n # example\r\n # return float(phrase) # Return the number the user entered\r\n # else:\r\n # print(\"Error: Non-Numeric Entry Detected\")\r\n return float(phrase) # Return the number the user entered\r", "def not_number_rejector(message):\n actual_number = False\n\n while not actual_number:\n guess = str(input(message))\n if guess.isdigit():\n actual_number = True\n return int(guess)\n else:\n print(\"Not a number\")", "def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input", "def get_number():\n\n while True:\n user_number_str = input('Digite um número para saber o seu fatorial: ').strip()\n\n if user_number_str.isnumeric():\n return int(user_number_str)\n else:\n print('Valor inválido.')", "def get_input():\n\n end_loop = True # Used to stop the loop for user input\n while end_loop:\n try:\n user_input = str(float(input(\"Please enter a number: \")))\n end_loop = False # The loop breaks once the user has entered valid input\n except():\n print(\"Invalid input, please try again.\")\n\n return user_input", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")", "def get_input(msg):#function which catches all user input which is invalid (not numbers) for all the shapes\n value = None\n while not value:\n value = input(msg)\n if not value.isnumeric():#if not a valid number print the following message \n print(\"Please enter a valid number\")\n value = None\n else:\n return int(value)#once a correct number is entered the number is returned and program contiues ", "def number_format(num):\n while True:\n try:\n user_input = float(input(num))\n return user_input\n except ValueError:\n print(\"Error. Please enter the desired number. You may use \"\n \"decimals.\")\n except:\n print(\"Error: unknown.\")", "def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')", "def validation_method(input_value):\r\n while True:\r\n try:\r\n valor = float(input(input_value))\r\n return valor\r\n except ValueError:\r\n print(\" ingresa un número\")", "def maximum():\n if len(a_variable.get()) > MAX_CHARACTERS:\n messagebox.showwarning(title=\"Max Characters Exceeded!\",\n message=\"Please enter no more than 25\\n\"\n \"characters, thanks.\")\n clear_box() # Clears the entry field", "def prompt(msg, default=NO_DEFAULT, validate=None):\n while True:\n response = input(msg + \" \").strip()\n if not response:\n if default is NO_DEFAULT:\n continue\n return default\n if validate is None or validate(response):\n return response", "def user_input(msg, valid, default=None, timeout=None):\n\n # Add trailing whitespace to `msg` if not already present and append\n # default reply (if provided)\n suffix = \"\" + \" \" * (not msg.endswith(\" \"))\n if default is not None:\n default = default.replace(\"[\", \"\").replace(\"]\",\"\")\n assert default in valid\n suffix = \"[Default: '{}'] \".format(default)\n query = msg + suffix\n\n if timeout is None:\n return _get_user_input(query, valid, default)\n else:\n procQueue = multiprocessing.Queue()\n proc = multiprocessing.Process(target=_queuing_input,\n args=(procQueue,\n sys.stdin.fileno(),\n query,\n valid,\n default)\n )\n proc.start()\n countdown = tqdm(desc=\"Time remaining\", leave=True, bar_format=\"{desc}: {n} \",\n initial=timeout, position=1)\n ticker = 0\n while procQueue.empty() and ticker < timeout:\n time.sleep(1)\n ticker += 1\n countdown.n = timeout - ticker\n countdown.refresh() # force refresh to display elapsed time every second\n countdown.close()\n proc.terminate()\n\n if not procQueue.empty():\n choice = procQueue.get()\n else:\n choice = default\n return choice", "def get_user_input(prompt):\n while True:\n user_input = input(prompt)\n try:\n tmp = int(user_input)\n return tmp\n except ValueError:\n print('Not a number')", "def prompt(prompt, validator=(lambda x: True), hint=None):\n user_input = input(prompt)\n while not validator(user_input):\n user_input = input(prompt)\n return user_input", "def _multiple_choice_validate(s: str, len_options: int):\n if not s:\n raise ValueError('Please enter a value between {} and {}'.format(\n 1, len_options + 1))\n\n if not str.isnumeric(s):\n raise ValueError('Please enter a numeric value')\n\n if 1 <= int(s) <= (len_options + 1):\n return\n else:\n raise ValueError('Please enter a value between {} and {}'.format(\n 1, len_options + 1))", "def answer_input_validation():\r\n user_input_is_valid = False\r\n while user_input_is_valid == False:\r\n try:\r\n users_answer_to_problem = int(input(\"\\nEnter the answer to the addition problem: \"))\r\n user_input_is_valid = True\r\n break\r\n # The ValueError is used because the user must enter an integer. If the \r\n # answer given is not an integer, they are scolded and reprompted.\r\n except ValueError:\r\n user_input_is_valid = False\r\n print(\"That is not an integer. Please enter an appropriate answer.\")\r\n return users_answer_to_problem", "def validated_input(input_msg: str, error_msg: str, validator, screenshot:str =None):\n while(True):\n reset_screen()\n\n if screenshot is not None:\n print(screenshot)\n\n data = input(input_msg)\n\n try:\n return validator(data)\n except:\n reset_screen()\n popup(error_msg.format(data), screenshot)\n input(\"\")", "def get_num_names_from_user():\n valid = False\n num_names = 1 #default value\n\n # user info\n print(\"\\nType number of names to generate.\")\n print(\"(You can directly Enter to generate 1 name)\")\n\n while not valid:\n num_names = input(\"Your input: \")\n\n if num_names.strip() == \"\":\n num_names = 1\n break\n\n try:\n num_names = max(int(num_names), 1)\n valid = True\n except:\n print(\"\\nPlease type an integer number.\\n\")\n\n return num_names", "def main():\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n while not is_valid_password(password):\n print(\"Invalid password!\")\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n print(\"*\" * len(password))", "def _validate_clear_args(limit):\n min_limit = 1\n max_limit = 20\n default_error = f\"[Limit] The `limit` argument must be a number between {min_limit} and {max_limit}\"\n try:\n limit = int(limit)\n except (ValueError, TypeError):\n return default_error\n if not (min_limit <= limit <= max_limit):\n return default_error\n return None", "def validatePhoneNumber(self):\n ## Declaring a Flag to control a while loop\n phone_number_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_ok:\n ## Asking for a phone number and checkig to see if it is 10 digits\n if self.phone_number.isdigit():\n if len(self.phone_number) == 10:\n phone_number_ok = True\n return True\n else:\n print(\"Please Enter a 10 digit phone number.\")\n return False\n \n else:\n print(\"You have enetered an invalid phone number. Please try again.\")\n return False", "def take_easy():\r\n length = int(input())\r\n message = input()\r\n if length >= len(message):\r\n print(\"||||||\" + \"%.5s\" %message + \"||||||\")\r\n else:\r\n print(\" -----\")\r\n print(\"-|-|-|-|-|-----\" + \"%.4s\" %message + \")))\")\r\n print(\" -----\")", "def get_num(*, prompt='Number? '):\n num = 0\n while True:\n try:\n num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return num", "def valid(question, first, last):\n\n while 1:\n try:\n choice = input(question)\n if choice < first or choice > last or not isinstance(choice, int):\n print \"\\nInvalid input, please try again.\"\n else:\n return choice\n except Exception:\n print \"\\nInvalid input, please try again.\"", "def guest_num(max=20):\n rand_num = random.randint(1, 101)\n retries = 0\n while retries <= max:\n try:\n n = int(input('Input a number: '))\n if n == rand_num:\n print('YOU WIN!')\n break\n elif n > rand_num:\n print('Iputed number is great than result number. Just retry!')\n retries += 1\n else:\n print('Iputed number is less than result number. Just retry!')\n retries += 1\n except ValueError:\n print('Only can input a number!')\n except:\n print('Only can input a number!')\n else:\n print('YOU LOST!')", "def getInput(image):\r\n length=ImageUtilities.getImageLength(image);\r\n message=input(\"Please input a message of: \"+str(length)+\" or less characters.\");\r\n if(len(message)>length):\r\n print(\"Error, too many characters imput!\");\r\n print(\"Expected: \"+str(length)+\" characters or less but got: \"+str(len(message))+\" characters.\");\r\n return;\r\n return message;", "def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy", "def voteInput(number):\n\n counter = False\n while counter == False:\n\n if isInteger(number) == True:\n number = int(number)\n if voteCheck(number) == True:\n counter = True\n else:\n print(\"\\n\\t\\tPlease enter an integer between {} and {}\"\n .format(MIN_VOTES, MAX_VOTES))\n number = input(\"\\n\\tEnter votes: \")\n\n else:\n print(\"\\n\\t\\tPlease enter an integer between {} and {}\"\n .format(MIN_VOTES, MAX_VOTES))\n number = input(\"\\n\\tEnter votes: \")\n\n return number", "def secure_input(self, minimum, maximum):\n wrong_input = True\n while wrong_input:\n while True:\n try:\n choice = int(input())\n break\n except ValueError:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n if choice < minimum or choice > maximum:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n else:\n wrong_input = False\n return choice", "def _is_user_wants_to_continue(self):\n\n # dummy value to get in while\n user_input = -1\n while user_input != 1 and user_input != 2:\n\n try:\n # convert the string into int\n user_input = int(input())\n except ValueError:\n print(\"Please enter a number\")\n continue\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n continue\n\n # check if the user_input was one of the options\n # if not present a error massage and try again\n if user_input != 1 and user_input != 2:\n print(\"Please enter a valid number(1-2)\")\n continue\n\n return user_input == 1", "def input_validator(message, number=False, blank=False):\n val = input(message)\n if val.strip() == '' and not blank:\n print(\"You haven't type anything here\")\n return input_validator(message, True) if number else input_validator(message)\n elif val.strip() == '' and blank:\n return time.localtime(time.time())[0]\n if number:\n if not val.isdigit():\n print(\"Your input is incorrect, Please type in numbers only\")\n return input_validator(message, True)\n return int(val)\n else:\n return str(val)", "def limit_number_prompts(state: SessionState):\n if state.prompts is not None and len(state.prompts) > 1:\n state.prompts = [state.prompts[0]]", "def _main_():\n while True:\n num = input(\"Please enter a number or done: \")\n if num == \"done\":\n print(bold(lightgreen(\"Thank You!\")))\n break\n else:\n try:\n num = int(num)\n if num < 0:\n num = abs(num)\n if num < 100:\n print(f\"Your number is negative {tens_text(num)}\")\n elif num < 1000:\n print(f\"Your number is negative {hundreds_text(num)}\")\n elif num == 0:\n print(\"Your number is zero\")\n elif num < 100:\n print(f\"Your number is {tens_text(num)}\")\n elif num < 1000:\n print(f\"Your number is {hundreds_text(num)}\")\n except Exception:\n print(info(bold(\"Not a valid input, try again\")))", "def user_input():\n user_number = input(\"Guess a number: \")\n try:\n user_number = int(user_number)\n except:\n print(\"Please ender a valid digit!\")\n return user_input()\n else:\n if 1 <= user_number <= 25:\n return user_number\n else:\n print(\"You need to enter a digit between 0 and 50\")\n return user_input()", "def validate_num(number):\n\n if number <= 0:\n new_num = int(raw_input(\"Oops, your number has to be greater than 0. Please pick again: \"))\n return validate_num(new_num)\n\n else:\n return number", "def check_input(min_guess_range, max_guess_range):\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess", "def validate_inputs(name, country, catches):\n while not name:\n name = input('Player name cannot be empty: ')\n\n while not country:\n country = input('Enter a valid country name: ')\n\n while not catches:\n catches = input('Now enter number of catches record: ')\n try: # Once user has input data, try to cast it to integer to verify is not string\n int(catches)\n except ValueError: # if input data is not an integer, print message and clear catches value to keep asking user to enter data\n print('Data given is not a number')\n catches = ''\n\n return name, country, catches", "def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer", "def user_choice():\n number_choice=50 #for enter in a loop\n while number_choice < 0 or number_choice > 49:\n try:\n number_choice=int(input(\"enter number between 0 and 49 :\")) #ask user a number and convert it in integer\n except ValueError: # if number_choice not a number\n print(\"your enter is not a number\") #display error message\n number_choice = 50 #return in a loop\n if number_choice < 0 or number_choice >49:\n print(\"your enter is not included in range\") #display error message if number is out of range\n return number_choice", "def numeric_response(prompt, units=[], num_type=int, default=None):\n if default is None:\n response = input(prompt + ': ')\n else:\n response = input(prompt + f' [{default}]' + ': ')\n try:\n if response == '' and default is not None:\n return standardize_response(default, units, num_type)\n elif response == '':\n print('Please enter a response.')\n return numeric_response(prompt, units, num_type, default)\n return standardize_response(response, units, num_type)\n except ValueError:\n print('Number must be an integer or a unit was incorrectly entered.')\n return numeric_response(prompt, units, num_type, default)", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def get_number():\n valid_input = False\n while not valid_input:\n try:\n user_num = int(input(\"Enter a number between {} and {}: \".format(LOWER_BOUND, UPPER_BOUND)))\n if LOWER_BOUND <= user_num <= UPPER_BOUND:\n return user_num\n except ValueError:\n pass\n print(\"That is not a valid number !\")", "def guess_number():\n searched_number = random.randint(1, 10)\n while True:\n try:\n users_number = int(input(\"Guess the number: \"))\n except ValueError:\n print(\"It's not a number!\")\n continue\n if users_number > searched_number:\n print(\"Too big!\")\n elif users_number < searched_number:\n print(\"Too small!\")\n else:\n return \"You win!\"", "def get_integer_input(message):\n\n value_as_string = input(message)\n while not value_as_string.isnumeric():\n print('The input must be an integer')\n value_as_string = input(message)\n\n return int(value_as_string)", "def request_input(self, possibles=[]):\n answer = self.console.input('Type your request here:')\n if len(possibles) > 0 and self.numeric:\n invalid = True\n while invalid:\n try:\n answer = int(answer)\n invalid = False\n break\n except:\n answer = self.console.input('Type your request here (numbers only):')\n\n answer = possibles[answer - 1]\n else:\n if answer.find('quit') != -1:\n self.running = False\n else:\n if answer.find('quit') != -1:\n self.running = False\n return answer", "def user_selection(num, text):\n lst = list(range(1,num+1))\n answer= 0\n while answer not in lst:\n try:\n answer = int(input(text))\n \n if answer not in range(1,num+1):\n raise ValueError\n break\n except ValueError:\n print('Select a valid Number')\n\n return answer", "def user_input():\n #Error messages\n num_invalid = \"Invalid input, please insert a valid number\"\n str_invalid = \"Invalid input, please try again following the input conventions requested\"\n\n #Model Type\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n while model_type not in {'ib', 'sc'}:\n print(str_invalid)\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n\n #Compound\n compound = input(\"What compound or drug are you using?\")\n \n #Dose Type\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n while dose_type not in {\"c\",\"i\",\"r\"}:\n print(str_invalid)\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n\n if dose_type == 'c':\n while True:\n try:\n dose = float(input(\"What is the dose of \" + compound + \" that you want to test? (units in ng per hour): \"))\n break\n except:\n print(num_invalid)\n dose_mass = None\n time_dose = None\n num_dose = None\n \n elif dose_type == 'i':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n dose = None\n time_dose = None\n num_dose = None\n\n elif dose_type == 'r':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n time_dose = float(input(\"What time period are the doses given over? (units in hours): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n num_dose = float(input(\"How many doses are given? - this program assumes that doses are evenly spaced throughout the time period: \"))\n break\n except:\n print(num_invalid)\n dose = None\n \n #Length of simulation time\n while True:\n try:\n len_assay = float(input(\"What time period would you like to simluate the model? (units in hours): \"))\n break\n except:\n\t print(num_invalid)\n \n #Interval times\n while True:\n try:\n len_interval = float(input(\"What interval time would you like in the simulation? (units in hours): \"))\n break\n except:\n print(num_invalid)\n\n #clearance\n while True:\n try:\n clearance = float(input(\"What is the clearance rate? (units in ng/hour): \"))\n break\n except:\n print(num_invalid)\n\n \n #compartments\n compartments = []\n\n if model_type == \"ib\":\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n\n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n\n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n\n compart_list = None\n \n elif model_type == \"sc\":\n while True:\n try:\n sub_compart = input(\"Enter volume (L), transition rate (ng/hour) for the sub compartment (all seperated by spaces - eg: 5 25 ): \")\n sub_compart_split = sub_compart.split()\n sub_compart_split = [float(i) for i in sub_compart_split]\n break\n except:\n print(str_invalid)\n\n sub_compart_split.append(str(\"Sub\"))\n compartments.append(sub_compart_split)\n\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n \n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n \n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n compart_list = None\n\n #visualisation\n vis = input(\"Would you like to generate a graph? (Y/N): \")\n while vis not in {'Y','y','N','n'}:\n print(str_invalid)\n vis = input(\"Would you like to generate a graph? (Y/N): \") \n\n #unix timestamp\n curr_datetime = time.time()\n curr_datetime = str(curr_datetime)\n\n\n print(\"Thank you! Building model, please wait...\")\n\n\n return {\n 'model_type': model_type,\n 'compound': compound,\n 'dose_type': dose_type,\n 'dose':dose,\n 'dose_mass': dose_mass,\n 'time_dose': time_dose,\n 'num_dose': num_dose,\n 'len_assay':len_assay,\n 'len_interval':len_interval,\n 'clearance':clearance,\n 'compartments':compartments,\n 'vis':vis,\n 'curr_datetime':curr_datetime\n }", "def int_input():\n while True:\n try:\n n = int(input(\"Enter amount of cubes(n): \"))\n if n < 1 or n > 100:\n print(\"Input must be a positive integer [1, 100]!\")\n continue\n except ValueError:\n print(\"Not an integer!\")\n continue\n\n print(\"There are %d different stairs that can be build from %d cubes.\" % (amount_of_stairs(n), n))\n break", "def UserInput():\r\n Message =\"\"\r\n while Message.lstrip() ==\"\" :\r\n # prompts user for the plaintext message and repeats prompt if nothing is typed besides space and enter\r\n Message = getpass.getpass(\"Enter a message to encrypt: \") # accepts user input but in hidden text\r\n \r\n \r\n\r\n ChosenShift = 0\r\n\r\n while int(ChosenShift) < 1 or int(ChosenShift) > 25: # loop until the user chooses a shift between 1 and 25\r\n ChosenShift=input(\"Enter the amount to shift between 1 and 25: \")\r\n if ChosenShift.isdigit() == False:\r\n ChosenShift = 0 # if the user input is not a number then reassign the shift amount to zero \r\n # to restart the iteration\r\n\r\n return Message, int(ChosenShift)", "def ask_numbers(question, error):\n while True:\n value = 0\n try:\n value = int(input(question))\n except ValueError:\n print(error)\n except UnboundLocalError:\n print(error)\n except Exception:\n print(error)\n if value <= 0:\n print(\"Syötä positiivinen luku, joka on suurempi kuin 0\\n->\")\n else:\n break\n return value", "def getNumFromUser(valueType, prompt, enforcePositiveValue=False, specifyDefaultValue=False, defaultValue=0):\n while True:\n try:\n if valueType == \"integer\" or valueType == \"int\":\n\n userInput = input(prompt + \"\\n> \")\n\n if defaultValue and userInput == \"\":\n try:\n userInput = int(defaultValue)\n except ValueError:\n raise RuntimeError(\"The specified default value ({}) is invalid.\".format(defaultValue))\n else:\n try:\n userInput = int(userInput)\n except (ValueError, TypeError) as e:\n print(\"Unable to cast value ({}) as an integer. Please try again.\\n\".format(userInput))\n continue\n\n if valueType == \"float\":\n\n userInput = input(prompt + \"\\n> \")\n\n if defaultValue and userInput == \"\":\n try:\n userInput = float(defaultValue)\n except ValueError:\n raise RuntimeError(\"The specified default value ({}) is invalid.\".format(defaultValue))\n else:\n try:\n userInput = float(userInput)\n except (ValueError, TypeError) as e:\n print(\"Unable to cast value ({}) as a float. Please try again.\\n\".format(userInput))\n continue\n\n if enforcePositiveValue:\n if userInput > 0:\n break\n else:\n print(\"Number was specified as positive, but is not positive ({}). Please try again.\\n\".format(userInput))\n else:\n break\n except:\n raise RuntimeError(\"Invalid data type passed to getNumFromUser under the set conditions ({}).\".format(valueType))\n return userInput", "def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")", "def validate_duration_input(duration):\n if duration.isdigit():\n duration = int(duration)\n clear()\n return duration\n\n else:\n clear()\n print('** Please enter time spent on task '\n 'rounded to nearest whole minute **')\n return False", "def age_input(message):\n try:\n age = int(raw_input(message))\n return age\n except:\n return age_input(\"Enter a number: \")", "def get_num(prompt='Number? '):\n _num = 0\n while True:\n try:\n _num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return _num", "def prompt():\r\n inpt = -1\r\n valid_choices = ['1','2','3','4','5']\r\n while inpt not in valid_choices:\r\n inpt = input(\"\\nPlease select the number of the operation you wish \"\r\n \"to complete:\\n\" +\r\n \"1. Run file mover\\n2. Add directories\"\r\n \"\\n3. Remove directory\\n4. View saved directories\\n5. Quit\\n\").strip()\r\n if inpt not in valid_choices:\r\n print(\"\\n*** Invalid choice ***\")\r\n return inpt", "def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()", "def get_time_period_option_from_user():\n while True:\n try:\n time_period = int(input(\"What time period would you like these songs to be from?\\n\"\n \"1. 24-hours\\n\"\n \"2. 7-days\\n\"\n \"3. 30-days\\n\"\n \"4. all\\n\\n\"\n \"Enter: \"))\n except ValueError:\n print(\"\\nSorry, that is not a number.\")\n continue\n\n valid_time_periods = ['24-hours', '7-days', '30-days', 'all']\n\n if time_period in range(1, 5):\n return valid_time_periods[time_period - 1]\n\n print(f\"\\nSorry, {time_period} is not a valid option.\")", "def validate_puzzle_string(self):\n is_puzzle_string_valid = False\n while is_puzzle_string_valid is False:\n question = \"Enter a valid puzzle. (81 inline digits where zeros \" +\\\n \"represent empty spots) E.g. 01040506.... and so on\\npuzzle\"\n puzzle_parameter = self.ask_user_input(question)\n if not puzzle_parameter.isdigit():\n print(\"The puzzle should contain only digits, please try again\")\n elif len(puzzle_parameter) == 81:\n is_puzzle_string_valid = True\n self.current_response = puzzle_parameter\n else:\n print(\"The puzzle should contain exactly 81 digits, please try again\")\n return is_puzzle_string_valid", "def continue_playing_validation():\r\n user_input_is_valid = False\r\n while user_input_is_valid == False:\r\n user_wants_another_problem = input(\"Would you like another problem, Y/N? \").lower()\r\n if user_wants_another_problem in [\"y\", \"n\"]:\r\n user_input_is_valid = True\r\n elif user_wants_another_problem not in [\"y\", \"n\"]:\r\n user_input_is_valid = False\r\n print(f\"The input you entered, '{user_wants_another_problem}', is not valid. Try again.\\n\")\r\n return user_wants_another_problem", "def valid_input():\n valid = False\n while not valid:\n principal = float(input(\"Please enter principal amount: $\"))\n if principal < 0 or principal > 1000000:\n print(\"Invalid amount. \", end=\"\")\n print(\"Principal must be between $0 and $1,000,000.00\")\n else:\n valid = True\n valid = False\n while not valid:\n interest = float(input(\"Please enter interest rate: %\"))\n if interest < 0 or interest > 100:\n print(\"Invalid rate. Interest rate must be between 0 and 100\")\n else:\n valid = True\n return principal, interest", "def prompt_loan_amount():\n loan_amount = normalize_loan_amount(input('\\nEnter loan amount: '))\n while loan_amount is None:\n loan_amount = normalize_loan_amount(input('Invalid input! Enter loan amount:'))\n return loan_amount", "def _get_input(prompt, options, allow_new=False, reprompt_options=None):\n\n _lwr_opts = [x.lower() for x in options]\n if reprompt_options is None:\n reprompt_options = options\n\n while True:\n _resp = input(prompt).strip()\n\n # Check that input is one of the options\n try:\n i = _lwr_opts.index(_resp.lower())\n return options[i]\n except ValueError:\n if not allow_new:\n print(f'Response must be one of the following: {\", \".join(reprompt_options)}')\n\n if allow_new and _resp: # If have a non-empty string\n return _resp", "def get_user_input():\n user_word = str(input(\"\\n[+] Enter your message> \")) # asking user for the word\n while True: # infinit loop checking for shifts\n try: # trying getting the following\n shift = int(input(\"[+] Enter number of shifts> \")) # asking user for an integar number for shifts\n except: # if user entered unexpected value like a string intead of integar\n print(\"Please enter an integar number for shifts!\\n\") # display this message\n continue # reload the loop\n break # if user entered an integar number, close the loop\n return user_word, shift # return the word and shifts", "def prompt_user_check_input(self):\r\n user_input = 0\r\n # grabs user input and changes it to an int\r\n while True:\r\n try:\r\n user_input = int(\r\n input(\"\\033[1;33mMake your move by entering the number of an open space on the board: \\033[0m\"))\r\n except ValueError:\r\n print(\"Why do you refuse to enter a number, Dave?\")\r\n continue\r\n else:\r\n break\r\n\r\n # makes sure the user enters a number 0-8 and verifies that the space the user selected is open\r\n if self.verify_valid_num(user_input) and self.write_user_choice(user_input):\r\n return True\r\n else:\r\n self.prompt_user_check_input()", "def amount_entered():\n while True: #Run until a suitable input is passed.\n try:\n amt = int(input(\"Enter value you wish to trade >>> \"))\n if amt <= 0:\n raise Exception\n return amt\n except ValueError: #if a string is entered\n print(\"Please enter an integer\")\n except Exception: #if a negative digit is entered\n print(\"Value cannot be less than or equal to 0\")", "def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)", "def checkUserInput(self):\n prm = []\n err = \"\"\n guess = self.text.text()\n items = str(guess).split(',')\n if len(items) != 2:\n err = \"Two parameters must be given\"\n else:\n for i in range(0, len(items)):\n val = items[i].strip()\n if not isNumber(val):\n err = \"Parameter {0} is not numeric\".format(i + 1)\n break\n if float(val) < 0.0:\n err = \"Parameter {0} is negative\".format(i + 1)\n break\n val = float(val)\n if i == 0 and val > self.yspan:\n err = \"minHeight is too large\"\n break\n if i == 1:\n if val < self.xspan/self.npt or val > self.xspan/2:\n err = \"minWidth is too large\"\n break\n prm.append(val)\n if err:\n errmsg = \"Incorrect input:\\n{0}\".format(err)\n QtWidgets.QMessageBox.warning(self, self.title, errmsg)\n return False\n\n # Store parameters values in global variables for the next call\n global lastfilename, lastmph, lastmpw\n lastfilename = self.pltw.filename\n self.mph = lastmph = prm[0]\n self.mpw = lastmpw = prm[1]\n return True", "def validate_yesno_input():\n while True:\n try:\n request = int(input())\n except ValueError:\n print(\"* Please only enter 1 for Yes or 0 for No.\")\n continue\n else:\n while request not in (0, 1):\n print(\"* Please only enter 1 for Yes or 0 for No.\")\n request = int(input())\n return request", "def score_input(test_name, test_score=0, invalid_message=\"Invalid test score, try again!\"):\n\n while True:\n\n test_score_str = str(test_score)\n\n try:\n if test_score_str.isnumeric() and 0 <= int(test_score) <= 100:\n return test_name + \": \" + str(test_score)\n\n raise ValueError\n\n except ValueError:\n return invalid_message", "def get_input(self):\n while True:\n try:\n self.rows = int(input(\"Number of rows: \"))\n while self.rows < 2 or self.rows > 30:\n self.rows = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.cols = int(input(\"Number of columns: \"))\n while self.cols < 2 or self.cols > 30:\n self.cols = int(input(\"Please enter a number between 2 and 30: \"))\n break\n except ValueError:\n print(\"Please enter a number!\")\n\n while True:\n try:\n self.mines = int(input(\"Number of mines: \"))\n while self.mines < 1 or (self.mines >= self.rows * self.cols):\n tile_count = self.rows * self.cols\n self.mines = int(input(\"Please enter a number between 1 and \" + str(tile_count - 1) + \": \"))\n break\n except ValueError:\n print(\"Please enter a number!\")", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def AskHowManyPlayers():\n\n\t# Loop forever until the user enters an integer between 1 and 10, inclusive.\n\twhile True:\n\t\tprint \"How many players? Enter a number between 1 and 10, or press enter for default 2:\"\n\t\tnum_players = SolicitInteger( lobound=1, hibound=10, default_return=2 )\n\t\tif num_players != None:\n\t\t\tprint \"Ok, {} players.\".format( num_players )\n\t\t\treturn num_players", "def get_valid_input(question:str, validation_function) -> str:\r\n while True:\r\n inp = input(question).strip()\r\n if validation_function(inp): break\r\n else:\r\n print(\"Invalid input, please try again.\")\r\n\r\n return inp", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def set_mood():\n while True:\n mood = input(Colors.RED + '#' + Colors.NORMAL + ': ')\n if len(mood) > 1 or not mood.isdigit():\n clear_screen()\n print(Colors.RED + 'Please enter a single digit number.' + Colors.NORMAL)\n else:\n print()\n break\n\n return mood", "def donation_prompt(name):\n input_msg = \"Please enter the donation amount for {}: \"\n donation_amount = input(input_msg.format(name))\n while True:\n try:\n return float(donation_amount)\n except ValueError:\n donation_amount=input(\"INVALID Amount: \"+input_msg.format(name))", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def not_a_number(update: Update, context: CallbackContext):\n text = update.message.text\n update.message.reply_text(f'{text} is not an available number of questions, please use the keyboard provided!')\n\n return HOW_MANY_QUESTIONS", "def main_method():\r\n choice = 0\r\n precision = 0\r\n # loop to display menu and validate user's input\r\n while choice != 6:\r\n display_menu()\r\n choice = input(\"Enter choice(1-6):\")\r\n print(\"\\n\")\r\n\r\n # validate choice before casting to integer\r\n if choice.isdigit():\r\n choice = int(choice)\r\n\r\n if choice == 1:\r\n length, has_upper, has_lower, has_numbers, has_special_char, \\\r\n is_all_no = 0, \" \", \" \", \" \", \" \", False\r\n\r\n print(\"-- Generating Password --\")\r\n\r\n # Prompt user for password attribute's\r\n # And validate input\r\n while length < 10 or has_upper not in valid_statement or \\\r\n has_lower not in valid_statement or \\\r\n has_numbers not in valid_statement or \\\r\n has_special_char not in valid_statement or is_all_no:\r\n\r\n print(\r\n \"Length MUST be a number 10 or greater | ALL questions are \"\r\n \"'yes' or 'no' | At LEAST 1 yes required:\")\r\n length = input(\"Enter length of password (minimum 10):\")\r\n\r\n # Validate length is digit before casting to int\r\n if length.isdigit():\r\n length = int(length)\r\n else:\r\n length = 0\r\n\r\n # Prompt user for password complexity\r\n has_upper = input(\"Should password contain uppercase?\")\r\n has_lower = input(\"Should password contain lowercase?\")\r\n has_numbers = input(\"Should password contain numbers?\")\r\n has_special_char = input(\"Should password contain special characters?\")\r\n print(\"\\n\")\r\n\r\n # Boolean check if all answers are no\r\n # This would mean no characters to make password\r\n is_all_no = has_upper in no and has_lower in no and has_numbers in no \\\r\n and has_special_char in no\r\n\r\n # Data is valid so generate password\r\n choice_1(length, has_upper, has_lower, has_numbers, has_special_char)\r\n elif choice == 2:\r\n print(\"-- Calculate a Percentage --\")\r\n\r\n # Prompt user for numerator, denominator and decimal precision\r\n # NOTE: Validate numerator and denominator and precision are integers\r\n # NOTE: Validate denominator is NOT 0\r\n\r\n numerator, denominator, precision = 0, 0, 0\r\n while True:\r\n print(\"Only whole numbers accepted! | decimal precision must be positive!\")\r\n numerator = input(\"What is the numerator?\")\r\n denominator = input(\"What is the denominator?\")\r\n precision = input(\"How many decimal precision needed?\")\r\n print(\"\\n\")\r\n\r\n if numerator[0] == \"-\":\r\n numerator_sign = -1\r\n numerator = numerator[1:]\r\n else:\r\n numerator_sign = 1\r\n\r\n if denominator[0] == \"-\":\r\n denominator_sign = -1\r\n denominator = denominator[1:]\r\n else:\r\n denominator_sign = 1\r\n\r\n if numerator.isdigit() and denominator.isdigit() and \\\r\n precision.isdigit() and denominator != \"0\":\r\n numerator = int(numerator) * numerator_sign\r\n denominator = int(denominator) * denominator_sign\r\n precision = int(precision)\r\n break\r\n\r\n choice_2(numerator, denominator, precision)\r\n elif choice == 3:\r\n choice_3()\r\n elif choice == 4:\r\n print(\"-- Calculate Leg of a Triangle --\")\r\n\r\n side_ac, side_cb, angle_acb, precision = 0, 0, 0, 0\r\n # Prompt user for side AC\r\n # Prompt user for side CB\r\n # Prompt user for angle <ACB\r\n\r\n while True:\r\n print(\"All input must be a positive whole number!\")\r\n side_ac = input(\"Enter length for side AC:\")\r\n side_cb = input(\"Enter length for side CB:\")\r\n angle_acb = input(\"Enter angle for <ACB:\")\r\n precision = input(\"How many decimal precision needed?\")\r\n\r\n # Validate data entered are integers\r\n if side_ac.isdigit() and side_cb.isdigit() and angle_acb.isdigit() \\\r\n and precision.isdigit():\r\n side_ac = int(side_ac)\r\n side_cb = int(side_cb)\r\n angle_acb = int(angle_acb)\r\n precision = int(precision)\r\n break\r\n choice_4(side_ac, side_cb, angle_acb, precision)\r\n elif choice == 5:\r\n print(\"-- Volume of Right Circular Cylinder --\")\r\n\r\n radius, height, precision = 0, 0, 0\r\n\r\n while True:\r\n radius = input(\"Enter radius of cylinder:\")\r\n height = input(\"Enter height of cylinder:\")\r\n precision = input(\"Enter decimal precision for answer:\")\r\n\r\n if radius.isdigit() and height.isdigit() and precision.isdigit():\r\n radius = int(radius)\r\n height = int(height)\r\n precision = int(precision)\r\n break\r\n\r\n choice_5(radius, height, precision)\r\n elif choice == 6:\r\n print(\"Exiting program.\")\r\n else:\r\n print(\"Invalid choice. Must be a number (1 to 6)\")" ]
[ "0.6210186", "0.60945845", "0.6090262", "0.59687257", "0.5932961", "0.5927148", "0.59168947", "0.58883595", "0.5841568", "0.58019143", "0.5782949", "0.5775905", "0.5761319", "0.5716825", "0.57102144", "0.5705627", "0.5701563", "0.5696017", "0.56239253", "0.5619944", "0.5563028", "0.55464673", "0.55456233", "0.5532081", "0.55233353", "0.5516909", "0.5509924", "0.5505276", "0.5493249", "0.5482043", "0.5462516", "0.54532266", "0.5443118", "0.54396605", "0.54190826", "0.541733", "0.54047424", "0.53736293", "0.5367091", "0.5366752", "0.5359798", "0.53352946", "0.5328069", "0.53057104", "0.52984655", "0.5292444", "0.52810943", "0.52602893", "0.524765", "0.5244313", "0.52374923", "0.52366483", "0.52236795", "0.520844", "0.5183306", "0.516586", "0.5159443", "0.5138697", "0.51358783", "0.5133649", "0.5118514", "0.5114476", "0.5104281", "0.51032954", "0.51023006", "0.5094136", "0.5089609", "0.5084553", "0.507061", "0.5069499", "0.5066653", "0.50625926", "0.5051537", "0.50495195", "0.5047313", "0.5042547", "0.50310564", "0.5030916", "0.5024251", "0.50233275", "0.501033", "0.5007032", "0.5006947", "0.4999432", "0.49877185", "0.49818808", "0.4980307", "0.49743536", "0.49740502", "0.49734738", "0.49668807", "0.49479198", "0.49447465", "0.4940953", "0.49345878", "0.49312803", "0.49267173", "0.49253517", "0.49219665", "0.49191108" ]
0.83368486
0
This function returns True if userInput can be converted to a number and returns False if it cannot.
def CheckNumber(userInput): try: float(userInput) return True except(ValueError): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_number(value_if_allowed):\n if value_if_allowed == '':\n return True\n try:\n float(value_if_allowed)\n return True\n except ValueError:\n return False", "def __checkInput(self, var):\n try:\n int(var)\n\n except:\n return False\n\n else:\n return True", "def check_if_input_is_int(self):\n try:\n int(self.input)\n except ValueError:\n return False\n else:\n return True", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False", "def _is_number(value):\n try:\n float(value)\n return True\n except (TypeError, ValueError):\n return False", "def isNumber(num):\n try:\n abs(num)\n return True\n except:\n return False", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_digit(user_input):\n # If any characters is digit return boolean True else False\n if any(char.isdigit() for char in user_input):\n return True\n return False", "def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input", "def could_be_number(val):\n if val == None:\n return False\n\n if isinstance(val, (float, int, long)):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n n = float(val)\n if not isinstance(n, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False", "def is_number(self) -> bool:\n return False", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def checkifnumber(self, test_string):\r\n try:\r\n float(test_string)\r\n return(True)\r\n except ValueError:\r\n return(False)", "def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False", "def is_number(self,val):\n try:\n float(val)\n return True\n except ValueError:\n return False", "def verify_valid_num(self, user_num):\r\n if not self.range_between_0_and_9(user_num):\r\n print(\"\\033[1;31mJust what do you think you're doing, Dave? Choose a number between 0 and 8\\033[0m\")\r\n return False\r\n\r\n return True", "def validate_answer(answer):\r\n try:\r\n float(answer)\r\n return True\r\n except ValueError:\r\n return False", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def is_number(x):\n if isinstance(x, (int, float)):\n return True\n else:\n return False", "def isnum(value):\n\n try:\n return bool(isinstance(value, (float, int)))\n except RuntimeError:\n return False", "def validation_method(input_value):\r\n while True:\r\n try:\r\n valor = float(input(input_value))\r\n return valor\r\n except ValueError:\r\n print(\" ingresa un número\")", "def checkNumberInt(value):\n if value.isnumeric():\n return int(value)\n else:\n print(\"You did not enter the correct numbers!\")\n newNum = input(\"Please enter a number: \")\n return checkNumberInt(newNum)", "def _check_message_is_number(message):\n try:\n float(message)\n return True\n except ValueError:\n return False", "def is_number(n):\n return isinstance(n, (int, float))", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def is_number(number):\n if type(number) == type(1) or type(number) == type(0.1) or type(number) == type('') or type(u''):\n try:\n float(number)\n return True\n except ValueError:\n return False\n except TypeError:\n return False\n else:\n return False", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def is_number(value, allow_bool=False):\n if isinstance(value, bool):\n return allow_bool\n return isinstance(value, _Number)", "def is_numeric(number):\n\n if isinstance(number, bool):\n return False\n elif isinstance(number, int) or isinstance(number, float):\n return True\n else:\n return False", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def isgoodnum(n):\n return (not isinstance(n,bool)) and isinstance(n,(int,float))", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def answer_input_validation():\r\n user_input_is_valid = False\r\n while user_input_is_valid == False:\r\n try:\r\n users_answer_to_problem = int(input(\"\\nEnter the answer to the addition problem: \"))\r\n user_input_is_valid = True\r\n break\r\n # The ValueError is used because the user must enter an integer. If the \r\n # answer given is not an integer, they are scolded and reprompted.\r\n except ValueError:\r\n user_input_is_valid = False\r\n print(\"That is not an integer. Please enter an appropriate answer.\")\r\n return users_answer_to_problem", "def is_number(c):\n return '0' <= c <= '9'", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(str):\n\n # Local constants\n\n # Local variabes\n\n #****** start is_number() ******#\n\n try:\n float(str)\n return True\n except ValueError:\n return False", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def isInteger(number) :\n\n try:\n int(number)\n return True \n except ValueError:\n return False", "def isNumber(number):\n try:\n # Try to cast the string\n int(number)\n # The cast was successful\n return True\n # The cast was unsuccessful, the string is not a number\n except ValueError as err:\n # Write the exception in logging\n logging.exception(str(err))\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isInteger(number) :\n\n try:\n int(number)\n return True\n except ValueError:\n return False", "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def is_number(value):\n\n return isinstance(value, (int, long, float))", "def checkNumberFloat(value):\n try:\n value = float(value)\n return value\n except ValueError:\n print(\"You did not enter a number\")\n newNum = input(\"Please enter a number: \")\n return checkNumberFloat(newNum)", "def ISNUMBER(value):\n return isinstance(value, numbers.Number)", "def not_number_rejector(message):\n actual_number = False\n\n while not actual_number:\n guess = str(input(message))\n if guess.isdigit():\n actual_number = True\n return int(guess)\n else:\n print(\"Not a number\")", "def is_num(n):\n return '{} is a number'.format(n)", "def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def isNumber(word):\n try:\n int(word)\n return True\n except ValueError:\n return False", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def isNumber(self, s):\n try:\n tmp = float(s)\n return True\n except:\n return False", "def get_number():\n valid_input = False\n while not valid_input:\n try:\n user_num = int(input(\"Enter a number between {} and {}: \".format(LOWER_BOUND, UPPER_BOUND)))\n if LOWER_BOUND <= user_num <= UPPER_BOUND:\n return user_num\n except ValueError:\n pass\n print(\"That is not a valid number !\")", "def is_number_correct(total):\n if int(total) < 0:\n return None\n return True", "def get_number():\n\n while True:\n user_number_str = input('Digite um número para saber o seu fatorial: ').strip()\n\n if user_number_str.isnumeric():\n return int(user_number_str)\n else:\n print('Valor inválido.')", "def validate_num(number):\n\n if number <= 0:\n new_num = int(raw_input(\"Oops, your number has to be greater than 0. Please pick again: \"))\n return validate_num(new_num)\n\n else:\n return number", "def user_input():\n user_number = input(\"Guess a number: \")\n try:\n user_number = int(user_number)\n except:\n print(\"Please ender a valid digit!\")\n return user_input()\n else:\n if 1 <= user_number <= 25:\n return user_number\n else:\n print(\"You need to enter a digit between 0 and 50\")\n return user_input()", "def is_number(G):\n return True", "def operand_present(input_str): # HELPER\n try:\n float(input_str)\n return True\n except ValueError:\n return False", "def value_error(number):\n try:\n nbr = int(number)\n except ValueError:\n print(\"You can't sum letters, please write a number\")\n verification = False\n else:\n verification = True\n return verification", "def __verify_numeric(self, action, value):\n if action != \"1\": # if the action is anything other than inserting:\n return True\n try:\n return value.isnumeric()\n except ValueError:\n return False", "def input_to_int(value):\n \n if value == \"1\" or value == \"2\" or value == \"3\" or value == \"4\" or value == \"5\" or value == \"6\":\n\n value = int(value)\n\n return value\n else:\n\n print(\"Your input was invalid. Please choose from one of the options next time.\")\n\n return False", "def is_valid_integer(input_string):\n\n assert input_string is not None\n try:\n input_string = int(input_string)\n return True\n except ValueError:\n return False", "def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False", "def yohoho_validator(payload, chosen):\n\n if not chosen.isdecimal():\n print(f\"Choose a number!\")\n return False\n\n return True", "def test_is_number(self):\n \n self.assertEqual(self.var.is_number(None), False)\n self.assertEqual(self.var.is_number(\"5\"), True)\n self.assertEqual(self.var.is_number(\"a\"), False)", "def is_valid_output(output) -> bool:\n log.info(f\"Output validation: {output}\")\n\n try:\n float(output)\n except ValueError as value_error:\n log.error(value_error)\n return False\n\n log.info(\"Output successfully validated\")\n return True", "def is_float(possible_number):\r\n try:\r\n float(possible_number)\r\n return True\r\n except ValueError:\r\n return False", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def is_float(self, input):\n try:\n float(input)\n return True\n except ValueError:\n return False", "def _is_user_wants_to_continue(self):\n\n # dummy value to get in while\n user_input = -1\n while user_input != 1 and user_input != 2:\n\n try:\n # convert the string into int\n user_input = int(input())\n except ValueError:\n print(\"Please enter a number\")\n continue\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n continue\n\n # check if the user_input was one of the options\n # if not present a error massage and try again\n if user_input != 1 and user_input != 2:\n print(\"Please enter a valid number(1-2)\")\n continue\n\n return user_input == 1", "def isnum(self, x):\n\n return x in '1234567890.-'", "def check_number(number):\n digits = str(number)\n if len(digits) != 6:\n return False\n\n double = False\n last = '0'\n for digit in digits:\n if digit < last:\n return False\n\n if digit == last:\n double = True\n\n last = digit\n\n return double", "def isNumber(x):\n return isinstance(x, (int, float))", "def get_user_input(prompt):\n while True:\n user_input = input(prompt)\n try:\n tmp = int(user_input)\n return tmp\n except ValueError:\n print('Not a number')", "def isNumber(txt):\r\n if not isinstance(txt, str) or len(txt)==0:\r\n return \"error: isNumber\"\r\n # --- YOU CODE STARTS HERE\r\n else: \r\n try: \r\n m = float(txt)\r\n return True\r\n except ValueError: \r\n return False", "def getValidation(myInput):\r\n if myInput == \"\":\r\n print('You did not enter the number of bugs collected.')\r\n return -1\r\n elif myInput.isnumeric() == False:\r\n print('You entered a negative or a text value, please enter numerical digits only.')\r\n return -1\r\n elif myInput.isnumeric() == True:\r\n return int(myInput)\r\n else:\r\n print('There has been a read error, please reenter your number')\r\n return -1", "def isNumber(x):\n\treturn type(x) in [int, float]", "def validate(self):\n return (self.check_input_digits_count()\n and self.check_if_input_is_int()\n and self.check_if_input_digits_are_unique())", "def is_number(self, value):\n if isinstance(value, (int, float, long, complex)): # noqa\n return True\n return False", "def is_convertible_to_int(v: Any) -> bool:\n\n try:\n test = int(v)\n return True\n except:\n return False", "def _is_int(test_val):\n try:\n int(test_val)\n return True\n except ValueError:\n return False", "def _isnumber(string):\n if not _isconvertible(float, string):\n return False\n elif isinstance(string, (str, bytes)) and (\n math.isinf(float(string)) or math.isnan(float(string))\n ):\n return string.lower() in [\"inf\", \"-inf\", \"nan\"]\n return True", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def validateNumericInput(input_param):\n if input_param.isnumeric():\n return input_param\n else:\n raise ValueError(\"One or more input parameter(s) are non numeric\")" ]
[ "0.7273646", "0.710573", "0.70342", "0.700277", "0.6989638", "0.69800496", "0.6931918", "0.68965924", "0.68937594", "0.68852633", "0.6882689", "0.68496823", "0.6842476", "0.6823968", "0.68192196", "0.68040407", "0.679756", "0.6784865", "0.67602575", "0.6758838", "0.673784", "0.67236334", "0.67183536", "0.6688678", "0.6659615", "0.66254634", "0.6623485", "0.66204065", "0.6614409", "0.66125953", "0.6606436", "0.65990245", "0.65962446", "0.6570291", "0.65612996", "0.6559294", "0.65569466", "0.6554278", "0.6548549", "0.6548235", "0.6547403", "0.6532889", "0.65171456", "0.65164644", "0.65159124", "0.65005094", "0.65005094", "0.65005094", "0.64776576", "0.6475717", "0.6473233", "0.6461338", "0.6459546", "0.6445306", "0.64371103", "0.64190173", "0.6410604", "0.6407299", "0.63821745", "0.63565814", "0.6349923", "0.6349373", "0.6331297", "0.632901", "0.6327361", "0.63249457", "0.63235295", "0.6312845", "0.63058186", "0.6304402", "0.6289944", "0.6265684", "0.6265118", "0.62650234", "0.6263503", "0.62533003", "0.62528384", "0.6239068", "0.62250274", "0.6209517", "0.62027216", "0.62026626", "0.6202394", "0.61859095", "0.61788356", "0.61778045", "0.61744386", "0.6169438", "0.6169158", "0.61639434", "0.6155958", "0.61412096", "0.61312985", "0.6115334", "0.61145324", "0.61060625", "0.6102265", "0.6087372", "0.60847074", "0.60605824" ]
0.86746126
0
This function prompts the user for a date using the message variable. User will continue to be prompted until the format is correct. The date format is very specific in the format DD/MM/YYYYY This function will confirm there are the right number of characters, the / are in the right place, the input are numbers, the days are between 1 and 31, the months are between 1 and 12, and the year is between 2000 and 3000 (roll on year 3k bug!)
def DateInput(message): askAgainMessage = "The date must be in the format DD/MM/YYYY" keepAsking = True while keepAsking: answer = input(message) # First we check if there are two / by splitting using / and looking # for 3 items in the returned list. dateCheck = answer.split(sep="/") if len(dateCheck) is not 3: print(askAgainMessage) else: # If all is order, we can assign the 3 items to day, month, year day = dateCheck[0] month = dateCheck[1] year = dateCheck[2] # Next we check each item has the right amount of characters # and they can all be converted into numbers. if (len(day) == 2 and len(month) == 2 and len(year) == 4 and CheckNumber(day) and CheckNumber(month) and CheckNumber(year)): day = int(day) month = int(month) year = int(year) if (day > 0 and day < 32 and month > 0 and month < 13 and year > 2000 and year < 3000): keepAsking = False else: print(askAgainMessage) else: print(askAgainMessage) return answer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_date(message, param):\n while True:\n try:\n day, month, year = input(message).split(param)\n return str(datetime.datetime(int(year), int(month), int(day)).strftime(\"%d/%m/%Y\"))\n except ValueError:\n continue", "def enter_date():\n valid_data = False\n # used to keep track of the values and change them in other scopes\n input_data = {'date': ''}\n\n while not valid_data:\n input_data['date'] = get_input(\"Date of the task\" + \"\\n\" + \"Please use DD/MM/YYYY format: \")\n if re.match('\\d{2}/\\d{2}/\\d{4}', input_data['date']):\n try:\n datetime.datetime.strptime(input_data['date'], '%d/%m/%Y')\n except ValueError:\n clean_scr()\n get_input(\"Enter a valid date. Press enter to try again.\")\n else:\n valid_data = True\n clean_scr()\n\n return input_data['date']", "def validate_input(date_string):\n #I decided to make sure the input was valid by checking each individual piece. I did this by splitting the input string by the dashes.\n #I checked first that the month value was between 1 and 12. I then checked depending on the month if the day value was valid.\n #I also made sure to check that the year was greater than 1000.\n #For February, I made a specific check for if it was a leap year or not. If the year inputted is not a leap year and the user entered\n #29 as the day value, it throws an error. Finally, once all values are checked and are valid, they are put into a tuple.\n splitdate = date_string.split(\"-\")\n if splitdate[0] != '' and splitdate[1] != '' and splitdate[2] != '':\n if int(splitdate[0]) >= 1 and int(splitdate[0]) <= 12:\n if int(splitdate[0]) == 1 or int(splitdate[0]) == 3 or int(splitdate[0]) == 5 or int(splitdate[0]) == 7 or int(splitdate[0]) == 8 or int(splitdate[0]) == 10 or int(splitdate[0]) == 12:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 31:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 4 or int(splitdate[0]) == 6 or int(splitdate[0]) == 9 or int(splitdate[0]) == 11:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 30:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[0]) == 2:\n if int(splitdate[2]) % 4 == 0 or int(splitdate[2]) % 1000 == 0:\n if int(splitdate[1]) >= 1 and int(splitdate[1]) <= 29:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n elif int(splitdate[1]) >= 1 and int(splitdate[1]) <= 28:\n if int(splitdate[2]) >= 1000:\n date = (int(splitdate[0]), int(splitdate[1]), int(splitdate[2]))\n return date\n return None", "def get_date(text=\"\"):\n clear()\n date = input(\"Enter {}date (Format:YYYY-MM-DD): \".format(text))\n try:\n datetime.datetime.strptime(date, \"%Y-%m-%d\")\n except ValueError:\n input(\"Please enter date in this format: YYYY-MM-DD.\"\n \" Press enter to continue.\")\n return get_date()\n else:\n return date", "def is_valid_date(date):\n\n try:\n parse(date)\n return date\n except:\n new_date = raw_input(\"Invalid date, try again: YYYY-MM-DD \")\n return is_valid_date(new_date)", "def date_format(date):\n\n formatted = True\n task_date = date\n while formatted:\n try:\n datetime.datetime.strptime(task_date, \"%m/%d/%Y\")\n formatted = False\n clear()\n except ValueError:\n clear()\n task_date = input(\n \"Sorry. That is not a valid date. Please enter a date \"\n \"in the MM/DD/YYYY format: \\n>\")\n\n return task_date", "def get_date(custom_text):\n fmt = '%m/%d/%Y'\n while True:\n clear()\n print(\"Date Format: month/day/year --/--/----\\n\")\n print(\"{}\\n\".format(custom_text))\n task_date = input(\"Please input a date: \")\n try:\n datetime.datetime.strptime(task_date, fmt)\n except ValueError:\n print(\"'{}' doesn't seem to be a valid date.\".format(task_date))\n input(\"Press Enter\")\n except AttributeError:\n print(\"'{}' doesn't seem to be a valid date.\".format(task_date))\n input(\"Press Enter\")\n else:\n return datetime.datetime.strptime(task_date, fmt).date()\n break", "def ex8() :\r\n print(\" - Date Calculator - \")\r\n import datetime\r\n today = datetime.date.today()\r\n print(today)\r\n try : #try catch method, in case user enters non-date, or 31st Feb etc.\r\n userDate = input(\"Please enter the date to check in a dd/mm/yy format: \") #userDate is string\r\n userDate = datetime.datetime.strptime(userDate, '%d/%m/%Y').date() #userDate is date_object\r\n if userDate < today : print(\"Invalid input, date is in the past\")\r\n elif userDate == today: print(\"That's today you dum-dum, answer is 0 days.\")\r\n else:\r\n delta = userDate - today #calculate difference\r\n delta = str(delta) #date_object don't work with split only str\r\n delta = delta.split(\",\") #unorthodox method to delete time (0:00:0) from the days\r\n print(\"The number of days between today (\",today,\") and entered date (\",userDate,\") are \",delta[0],\".\")\r\n except ValueError as e :\r\n print(\"Not a valid date.\")", "def date():\r\n while True:\r\n clear()\r\n task_date = input(\"When was this task performed? Date format: dd-mm-yyyy \\n > \").strip()\r\n try:\r\n task_date = datetime.datetime.strptime(task_date, \"%d-%m-%Y\")\r\n if task_date.date() > datetime.datetime.today().date():\r\n\r\n input(\" Sorry, date can't be later than today's date. Press enter and provide a correct date \")\r\n continue\r\n\r\n except ValueError:\r\n input(\" Sorry, not a valid date. Press enter and provide a correct date... \")\r\n continue\r\n\r\n except Exception: \r\n raise(\"Something went wrong.\")\r\n input(\"Press enter to continue...\")\r\n continue \r\n\r\n else:\r\n return task_date.strftime(\"%d-%m-%Y\")", "def getdatefromuser():\n date_str = raw_input(\"Enter the date cutoff in mm/dd/yyyy format: \")\n date_parts = re.split('[-/]', date_str)\n return date(*[int(elt) for elt in [date_parts[2], date_parts[0], date_parts[1]]])", "def check_date(date, logger):\n logger.info('Checking the entered date...')\n try:\n (datetime.datetime.strptime(date, '%Y%m%d')).date()\n return True\n except Exception:\n raise SystemExit('Please, enter the date in \"YYYYMMDD\" format')", "def exact_date(self):\n print(\"Exact Date Search\")\n date_string = input(\"Enter a date in the format DD/MM/YYYY> \")\n return date_string", "def validate(self, string, pos):\n res, string, pos = super().validate(string, pos)\n\n if res == 0:\n if string[pos-1].isdigit() and string.count('/') < 2:\n string = string[0:pos-1] + \"/\" + string[pos-1]\n res = 1\n pos = pos+1\n\n\n # get the mo/da/yr as array\n date = string.split(\"/\")\n\n # init to something that cannot be entered\n mo = da = yr = \"-1\"\n\n # update vars based on array\n try:\n mo = date[0]\n da = date[1]\n yr = date[2]\n except:\n pass\n\n # change arr len of date based on overwritten values\n if mo == \"-1\":\n date = []\n elif da == \"-1\":\n date = date[:1]\n elif yr == \"-1\":\n date = date[:2]\n\n # if entering date that doesnt exist, stop it\n try:\n if int(mo) > 12:\n date[0] = \"1\"\n pos -= 1\n if int(da) > 31:\n date[1] = \"3\"\n pos -= 1\n\n except Exception as e:\n pass\n\n # reform the date\n string = '/'.join(date)\n\n return res, string, pos", "def search_date(self, text='date'):\n\n date = input(f\"\\nEnter a {text} (MM-DD-YYYY): \")\n date_obj = datetime.strptime(date, \"%m-%d-%Y\")\n\n try:\n date = datetime.strftime(date_obj, \"%m-%d-%Y\")\n return date\n except ValueError:\n input(\"\\nFormat of date must be MM-DD-YYYY\\n\")\n return self.search_date()", "def validate_date_format_yyy_mm_dd(date_text):\n try:\n datetime.datetime.strptime(date_text, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def get_user_input():\n # Gets user input in M\\nD\\nYYYY format for the start date\n start_instrings = [\"Enter start month: \",\n \"Enter start day: \", \"Enter start year: \"]\n raw_start_date = tuple(input(s) for s in start_instrings)\n # Gets user input in M\\nD\\nYYYY format for the end date\n end_instrings = [\"Enter end month: \",\n \"Enter end day: \", \"Enter end year: \"]\n raw_end_date = tuple(input(s) for s in end_instrings)\n\n # Uses map to convert string input to integers and stores the values in a tuple\n start_date = tuple(map(int, raw_start_date))\n end_date = tuple(map(int, raw_end_date))\n\n # Checks if each year is within the date limit\n if not(1971 <= start_date[2] <= 2020 and 1971 <= end_date[2] <= 2020):\n raise Exception(\"Input date/s outside date limit.\")\n\n # Cyclic rotation of elements (because I really really **really** want to unpack)\n # Source: https://www.geeksforgeeks.org/python-shift-last-element-to-first-position-in-list/\n start_date, end_date = start_date[-1:] + \\\n start_date[:-1], end_date[-1:] + end_date[:-1]\n\n # As you can see unpacking makes the line smaller and more readable\n # return DateRange(datetime.date(start_date[2], start_date[0], start_date[1]), datetime.date(end_date[2], end_date[0], end_date[1]))\n return DateRange(datetime.date(*start_date), datetime.date(*end_date))", "def input_date(self, date_attr):\r\n try:\r\n date = input(\"Entrez la \" + date_attr + \"(JJ/MM/AAAA): \")\r\n datetime.datetime.strptime(date, '%d/%m/%Y')\r\n return date\r\n except ValueError:\r\n print(\"Erreur de saisie de la date (format JJ/MM/AAAA)\")\r\n return self.input_date(date_attr)", "def chkDate(stdin):\n # return \"Y\" if dateCheck(stdin) else \"N\"\n return run(\"./chkdate\", [], stdin)[1].strip()", "def main():\n ## The standard way to get arguments from the command line, \n ## make sure they are the right type, and print help messages\n parser = argparse.ArgumentParser(description=\"Compute days from yyyy-mm-dd to next mm-dd.\")\n parser.add_argument('year', type=int, help=\"Start year, between 1800 and 2500\")\n parser.add_argument('start_month', type=int, help=\"Starting month, integer 1..12\")\n parser.add_argument('start_day', type=int, help=\"Starting day, integer 1..31\")\n parser.add_argument('end_month', type=int, help=\"Ending month, integer 1..12\")\n parser.add_argument('end_day', type=int, help=\"Ending day, integer 1..12\")\n args = parser.parse_args() # will get arguments from command line and validate them\n year = args.year\n start_month = args.start_month\n start_day = args.start_day\n end_month = args.end_month\n end_day = args.end_day\n \n print(\"Checking date \", str(year) + \"/\" + str(start_month) + \"/\" + str(start_day))\n \n\n if not is_valid(year, start_month, start_day) : \n sys.exit(\"Must start on a valid date between 1800 and 2500\")\n if not is_valid(2000, end_month, end_day):\n sys.exit(\"Ending month and day must be part of a valid date\")\n count_days(year,start_month,start_day,end_month,end_day)", "def valid_date(input_date):\n try:\n input_dt = dt.datetime.strptime(input_date, \"%Y-%m-%d\")\n return input_date\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(input_date)\n raise argparse.ArgumentTypeError(msg)", "def check_dateformat(date_field, date_format='YYYY-MM-DD'):\r\n if not date_format or not date_field:\r\n return None\r\n # format = \"%Y-%m-d\"\r\n date_field = date_field.strip()\r\n\r\n try:\r\n dd = None\r\n mm = None\r\n yyyy = None\r\n seperator = '-'\r\n date_part = date_field\r\n time_part = None\r\n if '/' in date_field:\r\n seperator = '/'\r\n if ' ' in date_field:\r\n (date_part, time_part) = date_field.split(' ')\r\n\r\n if not time_part:\r\n if date_format == 'DD-MM-YYYY' or date_format == 'DD/MM/YYYY':\r\n (dd, mm, yyyy) = date_part.split(seperator)\r\n elif date_format == 'YYYY-MM-DD' or date_format == 'YYYY/MM/DD':\r\n (yyyy, mm, dd) = date_part.split(seperator)\r\n elif date_format == 'YYYY-DD-MM' or date_format == 'YYYY/DD/MM':\r\n (yyyy, dd, mm) = date_part.split(seperator)\r\n yyyy = int(yyyy)\r\n dd = int(dd)\r\n mm = int(mm)\r\n date_part = date(yyyy, mm, dd)\r\n return date_part\r\n else:\r\n raise SIDException(\r\n 'Invalid Date: datetime not supported', 'datetime')\r\n # to support further \"%d/%m/%Y %H:%M:%S\"\r\n\r\n # date_string = str(yyyy) + '-' + str(mm) + '-' + str(dd)\r\n # return datetime.strptime(date_string, format)\r\n\r\n except Exception:\r\n raise SIDException('Invalid Date', 'check_dateformat')", "def read_day():\n\twhile True:\n\t\t_day = input(\"Introduceti ziua: \")\n\t\ttry:\n\t\t\t_day = int(_day)\n\t\t\tif (not is_in_range(_day, 0, VALID_DAY)):\n\t\t\t\tprint(\"Ziua invalida.\")\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Ziua invalida, introduceti un intreg.\")\n\treturn (_day)", "def get_date(prompt, title, min_date, max_date):\r\n question = prompt + ' Please select the year:'\r\n choices = [i for i in range(min_date.year, max_date.year + 1)]\r\n year = e.choicebox(question, title, choices)\r\n if year == None:\r\n raise QuitError\r\n else:\r\n year = int(year)\r\n question = 'Please select the month:'\r\n choices = [('0' + str(i))[-2:] for i in range(1, 13)]\r\n if min_date.year == max_date.year:\r\n choices = choices[min_date.month - 1: max_date.month]\r\n elif year == min_date.year:\r\n choices = choices[min_date.month - 1:]\r\n elif year == max_date.year:\r\n choices = choices[:max_date.month]\r\n month = e.choicebox(question, title, choices)\r\n if month == None:\r\n raise QuitError\r\n else:\r\n month = int(month)\r\n question = 'Please select the day:'\r\n month_length = c.monthrange(year, month)[1]\r\n choices = [('0' + str(i))[-2:] for i in range(1, month_length + 1)]\r\n if (min_date.year, min_date.month) == (max_date.year, max_date.month):\r\n choices = choices[min_date.day - 1: max_date.day]\r\n elif (year, month) == (min_date.year, min_date.month):\r\n choices = choices[min_date.day - 1:]\r\n elif (year, month) == (max_date.year, max_date.month):\r\n choices = choices[:max_date.day]\r\n day = e.choicebox(question, title, choices)\r\n if day == None:\r\n raise QuitError\r\n else:\r\n day = int(day)\r\n return d.date(year, month, day)", "def valid_date(date_string):\n date_string_number = re.sub('\\D', '', date_string)\n try:\n date_res = datetime.strptime(date_string_number, '%Y%m%d').date()\n except ValueError:\n print(\"Not a valid date: '{}'.\".format(date_string))\n else:\n return date_res", "def parse_date(input):\n input = input.strip()\n if input == '':\n return None, None\n\n # Parse the start\n mo = yyyymmdd_re.match(input)\n if not mo:\n mo = yyyymmdd_hyphen_re.match(input)\n if not mo:\n mo = ddmmyyyy_re.match(input)\n if not mo:\n mo = ddmmyyyy_hyphen_re.match(input)\n if mo:\n start = Date(*map(lambda x: x and int(x), (mo.group('year'), mo.group('month'), mo.group('day'))))\n else:\n return None, 'N'\n\n\n # Check if we're at the end of the input\n pos = mo.end()\n if pos == len(input):\n return DateRange(start, start), None\n\n # Check for a range specifier\n mo = range_re.match(input, pos)\n if mo:\n pos = mo.end()\n else:\n return DateRange(start, start), 'T'\n\n # Parse the end date\n mo = yyyymmdd_re.match(input, pos)\n if not mo:\n mo = yyyymmdd_hyphen_re.match(input, pos)\n if not mo:\n mo = ddmmyyyy_re.match(input, pos)\n if not mo:\n mo = ddmmyyyy_hyphen_re.match(input, pos)\n if mo:\n end = Date(*map(lambda x: x and int(x), (mo.group('year'), mo.group('month'), mo.group('day'))))\n else:\n return DateRange(start, start), 'T'\n\n pos = mo.end()\n if pos == len(input):\n return DateRange(start, end), None\n return DateRange(start, end), 'T'", "def check_date(date):\n import datetime\n correctDate = None\n date = str(date)\n \n if (len(date)!=8):\n return False\n year = int(date[0:4])\n month = int(date[4:6])\n day = int(date[6:8])\n try:\n datetime.datetime(year,month,day)\n correctDate = True\n except ValueError:\n correctDate = False\n return correctDate", "def date_datetime():\n date = input(\"give date in mon/day/year format(month like jan feb): \")\n return datetime.datetime.strptime(date, \"%b/%d/%Y\")", "def valid_date(date):\n import datetime\n try:\n datetime.datetime.strptime(date, '%Y-%m-%d')\n except ValueError:\n raise ValueError(\"Incorrect data format, should be YYYY-MM-DD\")", "def checkdate_re(name, val):\n mnames = calendar.month_name + calendar.month_abbr\n mat = _slash.match(val)\n if mat is not None:\n\tif string.capitalize(mat.group(1)) in mnames:\n\t return\n\ttry:\n\t x = string.atoi(mat.group(1))\n\texcept ValueError:\n\t raise ValidationError, \\\n\t\t 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t\t (name, val)\n mat = _amer.match(val)\n if (mat is not None and\n\tstring.capitalize(mat.group(1)) in mnames):\n\treturn\n mat = _euro.match(val)\n if (mat is not None and\n\tstring.capitalize(mat.group(2)) in mnames):\n\treturn\n raise ValidationError, \\\n\t 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t (name, val)", "def checkdate_regex(name, val):\n mnames = calendar.month_name + calendar.month_abbr\n if _slash.match(val) != -1:\n\tif string.capitalize(_slash.group(1)) in mnames:\n\t return\n\ttry:\n\t x = string.atoi(_slash.group(1))\n\texcept ValueError:\n\t raise ValidationError, 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t\t (name, val)\n if _amer.match(val) != -1 and string.capitalize(_amer.group(1)) in mnames:\n\treturn\n if _euro.match(val) != -1 and string.capitalize(_euro.group(2)) in mnames:\n\treturn\n raise ValidationError, 'parameter \"%s\", value \"%s\" does not look like a date' % \\\n\t (name, val)", "def test_valid_date_format():\n assert valid_date_format(\"2010-10-15\") is True\n assert valid_date_format(\"2005-06-29\") is True\n assert valid_date_format(\"2003-02-29\") is True\n\n assert valid_date_format(\"18501312\") is False\n assert valid_date_format(\"1800-12-5\") is False\n assert valid_date_format(\"1800-2-05\") is False", "def check_date(y, m, d=None):\r\n months = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\", \"july\",\r\n \"august\", \"september\", \"october\", \"november\", \"december\"]\r\n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n if y.isdigit() and int(y) > 1899 and int(y) < 3000:\r\n if m in months:\r\n if d is None or d.isdigit() and int(d) > 0 \\\r\n and int(d) <= days[months.index(m)]:\r\n return True\r\n return False", "def prompt_for_date(fdin, fdout, prompt, initial, optional=False):\n cols = get_terminal_width(fdin)\n if optional:\n opt = \"[+/- changes, n skips, ENTER/tab accepts]\"\n else:\n opt = \"[+/- changes, ENTER/tab accepts]\"\n line = prompt + (\"\" if not initial else \" %s\" % initial)\n print_line_ellipsized(fdout, cols, line + \" \" + opt)\n while True:\n char = read_one_character(fdin)\n if char in \"\\n\\r\\t\":\n break\n elif char == \"+\":\n initial = initial + datetime.timedelta(1)\n elif char == \"-\":\n initial = initial + datetime.timedelta(-1)\n elif char in \"nN\" and optional:\n return None\n cols = get_terminal_width(fdin)\n go_cursor_up(fdout)\n blank_line(fdout, cols)\n go_cursor_up(fdout)\n line = prompt + \" \" + \"%s\" % initial\n print_line_ellipsized(fdout, cols, line + \" \" + opt)\n return initial", "def enforce_valid_dates(arg):\n year_formats = (\n '%Y-%m-%d',\n '%Y%m%d',\n '%d',\n '%j',\n )\n\n for yf in year_formats:\n try:\n return datetime.strptime(str(arg), yf)\n except ValueError:\n pass\n\n\n raise ValueError(\n 'Unable to coerce {} to a date. Try %Y-%m-%d'.format(arg)\n )", "def error_imaginary_date(user: discord.User, date_arg: str) -> str:\n return (\n f\"{user.mention}, you might need to check you're calendar!\"\n f\" '{date_arg}' doesn't exist!\"\n )", "def checkDate(datestr):\r\n try:\r\n year = int(datestr[0:4])\r\n except:\r\n return False\r\n \r\n try:\r\n month = int(datestr[4:6])\r\n except:\r\n return False\r\n \r\n try:\r\n day = int(datestr[6:8])\r\n except:\r\n return False\r\n \r\n try:\r\n datetime.date(year, month, day)\r\n except ValueError:\r\n return False\r\n \r\n return True", "def find_by_date(self):\n clear_screen()\n while True:\n self.date = input(\"Which date would you like to look at, ex: MM/DD/\"\n \"YYYY? Or you can find all dates including and between two \"\n \"dates, ex: MM/DD/YYYY - MM/DD/YYYY. Or Q to quit to the main \"\n \"screen.: \")\n if self.date.strip().upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n break\n #if the user put a range of dates it will go into this option.\n elif re.search(r'[0-1][0-9]/[0-3][0-9]/[1-2][0-9]{3}\\s?[-]\\s?[0-1]'\n '[0-9]/[0-3][0-9]/[1-2][0-9]{3}',self.date):\n self.date_one = re.search(r'([0-1][0-9]/[0-3][0-9]/[1-2]'\n '[0-9]{3})\\s?[-]\\s?',self.date)\n self.date_two = re.search(r'\\s?[-]\\s?([0-1][0-9]/[0-3][0-9]/'\n '[1-2][0-9]{3})', self.date)\n clear_screen() \n self.dates_to_print = \"Results for dates including and between \"\n \"{} - {}.\".format(self.date_one.group(1), self.date_two.group(1))\n self.date_one = datetime.datetime.strptime(self.date_one.group(1),\n '%m/%d/%Y')\n self.date_two = datetime.datetime.strptime(self.date_two.group(1),\n '%m/%d/%Y')\n self.find_by_date_list = []\n a = 0\n #finds the dates that are in between the two entered dates.\n for i in self.dict_list:\n self.this_date = datetime.datetime.strptime(i[\"date\"], \n '%m/%d/%Y %H:%M')\n if self.date_one <= self.this_date <= self.date_two:\n self.find_by_date_list.append(i) \n a += 1\n if a == 0:\n print(\"{} was not listed.\".format(self.date))\n continue \n else:\n self.display_style(self.find_by_date_list, \n dates=self.dates_to_print)\n self.del_or_edit()\n break\n #if user entered a single date, this option will be triggered\n elif re.search(r'[0-1][0-9]/[0-3][0-9]/[1-2][0-9]{3}',self.date):\n print(\"Results for the date {}.\".format(self.date))\n self.find_by_date_list = []\n a = 0\n for i in self.dict_list:\n if re.search(self.date, i[\"date\"]):\n self.find_by_date_list.append(i)\n a += 1\n if a == 0:\n print(\"{} was not listed.\".format(self.date))\n continue \n else:\n self.display_style(self.find_by_date_list)\n self.del_or_edit()\n break\n else:\n print(\"{} is not an acceptable date.\".format(self.date))\n print(\"\")", "def clean_date(self):\n input_day = self.cleaned_data.get('day')\n input_date = self.cleaned_data.get('date')\n if input_date < datetime.date.today():\n raise forms.ValidationError(\"Can not create a lesson in the past.\")\n elif input_date.strftime(\"%A\").lower() != input_day:\n raise forms.ValidationError(input_date.strftime(\"%d-%m-%Y\")+\" does not fall on a \"+input_day.title()+\".\")\n return input_date", "def get_date_from_string(datestring):\n try:\n month,day,year = [int(date_num) for date_num in datestring.split('/')]\n except:\n Exception(f\"Date string ({datestring}) must be in MM/DD/YY format.\")\n return None\n\n # If a 2-digit year is provided, assume it's from 1900s if it's greater\n # than current year - 2000. Otherwise, assume it's from the 2000s. \n if year < 100: \n # got a 2-digit year in the datestring. Make it 4digits. \n if year > date.today().year - 2000 + 5:\n year += 1900\n else:\n year += 2000\n\n #sanity check for years\n if year < 1900 or year > 2023: \n raise Exception(f\"Year must be > 1900 and < 2023: {datestring}\")\n\n if month < 1 or month > 12:\n raise Exception(f\"Month in date is out of range: {datestring}\")\n\n if day < 1 or day > 31:\n raise Exception(f\"Month in date is out of range: {datestring}\")\n\n #sanity check for day values given a particular month\n # To do handle year 2000 (no leap year)\n if ((month in [4, 6, 9, 11] and day > 30) or\n (month == 2 and \n ((day > 28 and (year%4 != 0 or year == 2000)) or\n (day > 29 and (year%4 == 0 and year!= 2000))))):\n raise Exception(f\"Day in date is out of range: {datestring}\")\n\n return date(year, month, day)", "def _validate(year, month, day):\n if day is not None and month is None:\n raise ValueError(\"Day without month\")\n if day is None:\n day = 1\n if month is None:\n month = 1\n if year is None:\n year = 2000\n # actual validation happens here\n datetime.date(year, month, day)", "def valid_date(M, D, Y, HH, MM, SS):\n check1 = False\n check2 = False\n check3 = False\n if isinstance(M, int) and isinstance(Y, int) and isinstance(D, int):\n check1 = True\n if M <= 12 and D <= 31 and HH <= 24 and MM <= 60 and SS <= 60:\n check2 = True\n if M >= 0 and D >= 0:\n check3 = True\n if check1 and check2 and check3:\n check = True\n else:\n check = False\n\n return check", "def test_hotshot_check_date_error(self):\n try:\n check_date('N/A', 'N/A', '20.11.2015')\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def datetime_checkinput(year, month, day):\n try:\n datetime.datetime(year, month, day)\n except:\n raise Invaliddatetimeinput\n return 0", "def properDayInput(day):\r\n possibleStrings = [\"m\",\"mon\",\"monday\",\"tu\",\"tue\",\"tues\",\"tuesday\",\"w\",\r\n \"we\",\"wed\",\"wednesday\",\"th\",\"tr\",\"r\", \"thu\",\"thur\",\"thurs\",\"thursday\",\"f\",\"fr\",\r\n \"fri\",\"friday\",\"sa\",\"sat\",\"saturday\",\"su\",\"sun\",\"sunday\"]\r\n \r\n validString = False\r\n for i in range(0, len(possibleStrings)):\r\n if possibleStrings[i] == day.lower():\r\n validString = True\r\n return validString", "def validate_lookup_date_format(search_query):\n try:\n datetime.datetime.strptime(search_query, '%d/%m/%Y')\n clear()\n return search_query\n\n except ValueError:\n clear()\n return False", "def check_imported_date(self, date, date_type, sourcepath):\n try:\n [year, month, day] = date.split(\"/\")\n datetime.datetime(int(year), int(month), int(day))\n if self.summary[date_type] < int(year):\n self.summary[date_type] = int(year)\n except ValueError as e:\n raise DateFormatError(\"ERROR: %s: invalid YYYY/MM/DD date: %s/%s/%s\"\n % (sourcepath, year, month, day))", "def is_date_string_valid(date: str) -> bool:\n valid = False\n if type(date) is str:\n # check if formated as dd/mm/yy\n if len(date) == 8 and (date[2] == \"/\" and date[5] == \"/\"):\n valid = True\n if not valid:\n print(\"ERROR: '\" + date + \"' is not a valid date string\")\n exit(1)\n return valid", "def validate(self):\n\n if not (self.from_currency and self.to_currency):\n return \"Please you should provide two currencies\"\n if not self.date:\n self.date = \"latest\"\n else:\n try:\n datetime.strptime(self.date, \"%Y-%m-%d\")\n except ValueError as err:\n return str(err)", "def validate_date(date_str):\n\ttry:\n\t\treturn (datetime.strptime(date_str, \"%Y-%m-%d\"), \"Date format matched\")\n\texcept Exception as e:\n\t\tlog.error(sys.exc_info()[0], e)\n\t\treturn (None, \"Date should be of YYYY-MM-DD format\")", "def breakdate(date):\n match = re.search(r'(\\d+)\\s(\\w+)\\s(\\d+)',date)\n day = 0\n month = ''\n year = 0\n if not match:\n sys.stderr.write('\\nError in reading date!!!\\n')\n sys.exit(1)\n day = int(match.group(1))\n month = match.group(2)\n year = int(match.group(3))\n month = month.lower()\n \n if month[:3] == 'jan':\n month = 1\n elif month[:3] == 'feb':\n month = 2\n elif month[:3] == 'mar':\n month = 3\n elif month[:3] == 'apr':\n month = 4\n elif month[:3] == 'may':\n month = 5\n elif month[:3] == 'jun':\n month = 6\n elif month[:3] == 'jul':\n month = 7\n elif month[:3] == 'aug':\n month = 8\n elif month[:3] == 'sep':\n month = 9\n elif month[:3] == 'oct':\n month = 10\n elif month[:3] == 'nov':\n month = 11\n elif month[:3] == 'dec':\n month = 12\n return (day,month,year)", "def _validate(self, date, format):\n try:\n datetime.datetime.strptime(date, format) # format = \"%m/%d/%Y\"\n return True\n except ValueError:\n return False", "def _validate(self, date, format):\n try:\n datetime.datetime.strptime(date, format) # format = \"%m/%d/%Y\"\n return True\n except ValueError:\n return False", "def test_date_entry_returns_correct_value_for_date(self):\n date_string = \"2018-01-21\"\n date_format = settings.DATE_FORMATS['iso 8601']\n self.menu.OPTIONS['date format'] = date_format\n\n user_input = [date_string]\n\n with patch('builtins.input', side_effect=user_input):\n result = self.menu.date_entry()\n\n expected_result = (\n None,\n datetime.datetime.strptime(date_string,\n date_format['datetime format'])\n )\n\n self.assertEqual(result, expected_result)", "def test_validate_date_entry_returns_correct_ValueError(self):\n date_string = \"2018-21-01\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_text = \"{} is not valid in format {}\".format(\n date_string,\n date_format['UI format']\n )\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def check_date(date):\r\n try:\r\n d_max = str(datetime.today() + timedelta(days=5))\r\n d_max = d_max[:10]\r\n date_list = date.strip().split(\"-\")[-1]\r\n d_max = d_max.strip().split(\"-\")\r\n for i in range(3):\r\n if int(date_list[i]) > int(d_max[i]):\r\n return False\r\n return True\r\n except Exception as r:\r\n # print(Exception)\r\n return None", "def _prompt_user(self):\n print '\\nPlease enter the ff. Just leave blank to accept default.\\n'\n self._handle_param(param='start',\n msg='Starting date (Ex. Feb 1, 2016): ')\n self._handle_param(param='end',\n msg='Ending date (Ex. Aug 1, 2016): ')", "def get_month_from_user():\n while True:\n month = input('Select month to explore. Enter from january, february, march, april, may, june or all: ').lower()\n\n if month in VALID_MONTHS:\n confirm = input(\"You have selected {}. Press 'y' to confirm: \".format(month.title()))\n\n if confirm == 'y':\n break\n else:\n print(\"Try again.\\n\")\n else:\n print(\"Invalid input: {}. Try again.\\n\".format(month))\n return month", "def condition(self, year, month, day, lastday, leapday):\n try:\n if len(day) == 0 or int(day) > int(lastday):\n if int(month) == 2 and day == leapday:\n Input.change_display(self, self.entries[4],\n 'Not a leap year')\n else:\n Input.change_display(self, self.entries[4],\n 'Enter day between 1-' + lastday)\n elif int(day) <= int(lastday):\n Input.change_display(self, self.entries[3], #Weekday message\n Output.message(self, year, month, day))\n except:\n Input.change_display(self, self.entries[4],\n 'Enter day between 1-' + lastday)", "def read_day_range(where):\n\twhile True:\n\t\tif (where == 'start'):\n\t\t\t_day = input(\"Introduceti ziua de inceput: \")\n\t\telif (where == 'end'):\n\t\t\t_day = input(\"Introduceti ziua de sfarsit: \")\n\t\telse:\n\t\t\traise NameError\n\t\ttry:\n\t\t\t_day = int(_day)\n\t\t\tif (not is_in_range(_day, 0, VALID_DAY)):\n\t\t\t\tprint(\"Ziua invalida.\")\t\n\t\t\telse:\n\t\t\t\tbreak\n\t\texcept ValueError:\n\t\t\tprint(\"Ziua invalida, introduceti un intreg.\")\n\treturn (_day)", "def print_choice_menu():\n month1 = input(\"Give the month as 4, 5, 6, 7, 8, or 9: \")\n print()\n day1 = input(\"Give the day as 1, 2, ..., 29, 30, or 31: \")\n print()\n\n try:\n month1 = int(month1)\n day1 = int(day1)\n\n if month1 in [4, 5, 6, 7, 8, 9]:\n if day1 in list(range(1, 32)):\n if day1 == 31 and month1 in [4, 6, 9]:\n print(\"Invalid Date!\")\n sleep(2)\n return 0, 0\n elif month1 > datetime.now().month or (month1 == datetime.now().month and day1 > datetime.now().day):\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n else:\n sleep(2)\n return month1, day1\n else:\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n else:\n print(\"Date out of bounds\")\n sleep(2)\n return 0, 0\n\n except ValueError:\n print(\"User Error\")\n sleep(2)\n return 0, 0", "def week_init():\n week = input('Week to check: MM/DD/YYYY\\n')\n week = dtt.datetime.strptime(week,'%m/%d/%Y') #turns input to a datetime\n beforeday = input('Check days before date (Press enter to use today): MM/DD/YYYY\\n') or dtt.date.today()\n if (beforeday != dtt.date.today()):\n beforeday = dtt.datetime.strptime(beforeday,'%m/%d/%Y')\n return week, beforeday", "def getBugsToday(myDay):\r\n #set bugs_today as neg one to accept zero as an input\r\n bugs_today = -1\r\n while bugs_today < 0 :\r\n myBugs_Validation = (input(u'Enter the number of bugs collected on day ' + str(myDay) + ' : '))\r\n #call my getValidation to check values entered\r\n bugs_today = getValidation(myBugs_Validation)\r\n #check if user entered a valid number\r\n if bugs_today == -1:\r\n print('\\nPlease enter the number of bugs collected. \\nEnter a whole integer number >= 0')\r\n \r\n return bugs_today", "def valid_date(s):\n try:\n date = datetime.strptime(s, \"%Y%m%d\")\n return date\n except ValueError:\n msg = \"Not a valid date: '{0}'\".format(s)\n raise argparse.ArgumentTypeError(msg)", "def validate_date_format(date):\n return re.match(DATE_FORMAT, date)", "def parseDate(date):\n formats = [\n \"D MMM YY, hh:mm a\", \n \"YYYY-MM-DDTHH:mm:ss+00:00\", \n \"ddd, D MMM YYYY HH:mm:ss +0530\", # NDTV\n \"ddd, D MMM YYYY HH:mm:ss +0100\", # skynews\n \"ddd, D MMM YYYY HH:mm:ss -0400\", # reuters\n \"D MMM, YYYY\", # espn cricket\n \"ddd, D MMM YYYY HH:mm:ss GMT\", # times of india\n \"ddd, D MMM YYYY HH:mm:ss +0200\", # lifrea\n \"ddd, D MMM YYYY HH:mm:ss +0000\", # linux, ubuntu\n \"ddd, D MMM YYYY HH:mm:ss -0700\", # iTunes\n ]\n\n for f in formats:\n try:\n parsed_date = tryDateFormat(date, f)\n return parsed_date.format(\"D MMM YY, hh:mm a\")\n except Exception as e:\n pass\n else:\n return \"Invalid date\"", "def validate_date(value):\n if date_regex.fullmatch(value):\n return True\n else:\n return False", "def format_date(txt):\n pattern1 = r\"(?P<day>0?[1-9]|[12][0-9]|3[01])([/.-]|\\s|\\s/\\s|\\.\\s)\" \\\n r\"(?P<month>0[1-9]|1[012])([/.-]|\\s?|\\s/\\s|\\.\\s)\" \\\n r\"(?P<y1>19|20)(?P<y2>[0-9][0-9])\"\n txt = re.sub(pattern1, r\"\\g<y1>\\g<y2>\\g<month>\\g<day>\", txt)\n\n pattern_month = \"(?P<month>janvier|janv.|févr.|février|mars|avr.|avril|mai|juin|juill.|juillet|août|sept.|septembre|oct.|octobre|nov.|novembre|déc.|décembre)\"\n pattern2 = r\"(?P<day>0?[1-9]|[12][0-9]|3[01]|(1er)|(1°)) %s (?P<y1>19|20)(?P<y2>[0-9][0-9])\" % pattern_month\n txt = re.sub(pattern2, convert_month, txt)\n\n pattern3 = r\"(?P<day>0?[1-9]|[12][0-9]|3[01])([/.-]|\\s|(\\s/\\s)|(.\\s))\" \\\n r\"(?P<month>0[1-9]|1[012])([/.-]|\\s|(\\s/\\s)|(.\\s))\" \\\n r\"(?P<y2>[0-9][0-9])\"\n txt = re.sub(pattern3, convert_date_year_2_digits, txt)\n return txt", "def get_day_of_week_from_user():\n while True:\n day = input('Select the month to explore. Enter from monday, tuesday, wednesday, thursday, friday, '\n 'saturday, sunday or all: ').lower()\n\n if day in VALID_DAYS:\n confirm = input(\"You have selected {}. Press 'y' to confirm: \".format(day.title()))\n\n if confirm == 'y':\n break\n else:\n print(\"Try again.\\n\")\n else:\n print(\"Invalid input: {}. Try again.\\n\".format(day))\n return day", "def check_date(self):\n parse_date = datetime.datetime.strptime(self.json_parsed_file['date'], \"%d %b %Y\")\n current_day = datetime.datetime.now()\n\n # Check that the parsed date is older then the current date.\n if parse_date > current_day:\n self.output_message += \"Issue detected on date of the progress report. Parsed date: {}\\n\".format(parse_date)\n self.is_parsed_pdf_valid = False", "def get_year():\n try:\n year = input(\"Enter Year: \")\n year = int(year)\n if year > 2021 or year < 2000:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()\n else:\n os.system('cls')\n return year\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 2000-2021\")\n return get_year()", "def check_for_date(date_str):\r\n try:\r\n if rex.match(\"\\d\\d\\d\\d-\\d\\d-\\d\\d\", str(date_str)) is None:\r\n raise sqlErr(\"Not a Date!\")\r\n except Exception as e:\r\n raise e", "def validate_date_args(self):\n\n date_args = self.args[1:3]\n\n if not all([self.validate_date_format(x) for x in date_args]):\n raise InvalidDateFormatError", "def valid_date(s):\n try:\n return datetime.datetime.strptime(s, \"%Y-%m-%d\")\n except ValueError:\n print(\"Error: Not a valid date: '{0}'.\".format(s))\n\n return", "def check_leap_year():\n while True:\n # Taking user input for year and converting to int\n year_input = int(input(\"Please input a year: \"))\n\n # Checking the condition for Leap year\n leap_check = ((year_input % 4 == 0) and (year_input % 100 != 0)) or (year_input % 400 == 0)\n\n # Manipulating a string based on computation\n leap = \" NOT\"\n if leap_check:\n leap = \"\"\n leap_result = ' is' + str(leap) + ' a leap'\n\n # Printing output of the check\n print(\"The given year \" + str(year_input) + str(leap_result) + \" year\")\n print('--------------------')", "def is_ddmmyy (val):\n if len(val) == 6 and count_digits(val) == 6:\n return is_valid_day(val[0:2]) and is_valid_month(val[2:4])\n return False", "def is_date(string, fuzzy=False):\n # try: \n # parse(string, fuzzy=fuzzy)\n # return True\n\n # except ValueError:\n # return False\n \n datePattern = r'[0-9]+\\/[0-9]+\\/[0-9]{2,4}'\n if re.search(datePattern, string):\n #print(string,'is Date')\n return True\n else:\n return False", "def user_input():\n #Error messages\n num_invalid = \"Invalid input, please insert a valid number\"\n str_invalid = \"Invalid input, please try again following the input conventions requested\"\n\n #Model Type\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n while model_type not in {'ib', 'sc'}:\n print(str_invalid)\n model_type = input(\"What kind of models do you want to build? (intravenous bolous (ib) / subcutaneous (sc)): \")\n model_type = model_type.lower()\n\n #Compound\n compound = input(\"What compound or drug are you using?\")\n \n #Dose Type\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n while dose_type not in {\"c\",\"i\",\"r\"}:\n print(str_invalid)\n dose_type = input(\"How is the dose delivered? Constantly over time (c), Instantaneously (i) or Repeated instantaneous doses (r): \")\n dose_type = dose_type.lower()\n\n if dose_type == 'c':\n while True:\n try:\n dose = float(input(\"What is the dose of \" + compound + \" that you want to test? (units in ng per hour): \"))\n break\n except:\n print(num_invalid)\n dose_mass = None\n time_dose = None\n num_dose = None\n \n elif dose_type == 'i':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n dose = None\n time_dose = None\n num_dose = None\n\n elif dose_type == 'r':\n while True:\n try:\n dose_mass = float(input(\"What is the mass of the dose of \" + compound + \" that you want to test? (units in ng): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n time_dose = float(input(\"What time period are the doses given over? (units in hours): \"))\n break\n except:\n print(num_invalid)\n while True:\n try:\n num_dose = float(input(\"How many doses are given? - this program assumes that doses are evenly spaced throughout the time period: \"))\n break\n except:\n print(num_invalid)\n dose = None\n \n #Length of simulation time\n while True:\n try:\n len_assay = float(input(\"What time period would you like to simluate the model? (units in hours): \"))\n break\n except:\n\t print(num_invalid)\n \n #Interval times\n while True:\n try:\n len_interval = float(input(\"What interval time would you like in the simulation? (units in hours): \"))\n break\n except:\n print(num_invalid)\n\n #clearance\n while True:\n try:\n clearance = float(input(\"What is the clearance rate? (units in ng/hour): \"))\n break\n except:\n print(num_invalid)\n\n \n #compartments\n compartments = []\n\n if model_type == \"ib\":\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n\n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n\n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n\n compart_list = None\n \n elif model_type == \"sc\":\n while True:\n try:\n sub_compart = input(\"Enter volume (L), transition rate (ng/hour) for the sub compartment (all seperated by spaces - eg: 5 25 ): \")\n sub_compart_split = sub_compart.split()\n sub_compart_split = [float(i) for i in sub_compart_split]\n break\n except:\n print(str_invalid)\n\n sub_compart_split.append(str(\"Sub\"))\n compartments.append(sub_compart_split)\n\n while True:\n try:\n main_compart = input(\"Enter volume (L), transition rate (ng/hour) for the main compartment (all seperated by spaces - eg: 5 25 ): \")\n main_compart_split = main_compart.split()\n main_compart_split = [float(i) for i in main_compart_split]\n break\n\n except:\n print(str_invalid)\n\n main_compart_split.append(str(\"Main\"))\n compartments.append(main_compart_split)\n\n while True:\n try:\n num_peripherals = float(input(\"How many peripheral compartments do you want to test?: \"))\n break\n except:\n\t print(num_invalid)\n \n num_peripherals = int(num_peripherals)\n\n if num_peripherals > 0:\n \n for i in range(num_peripherals):\n while True:\n try:\n compart = input(\"Enter volume (L), transition rate (ng/hour) of the compartment (all seperated by spaces - eg: 5 25): \")\n compart_list = compart.split()\n compart_list = [float(i) for i in compart_list]\n break\n \n except:\n print(str_invalid)\n \n compart_list.append(str(\"Peripheral\"))\n compart_list.append(str(input(\"Please enter the name of the compartment (please ensure correct spelling): \")))\n compartments.append(compart_list)\n compart_list = None\n\n #visualisation\n vis = input(\"Would you like to generate a graph? (Y/N): \")\n while vis not in {'Y','y','N','n'}:\n print(str_invalid)\n vis = input(\"Would you like to generate a graph? (Y/N): \") \n\n #unix timestamp\n curr_datetime = time.time()\n curr_datetime = str(curr_datetime)\n\n\n print(\"Thank you! Building model, please wait...\")\n\n\n return {\n 'model_type': model_type,\n 'compound': compound,\n 'dose_type': dose_type,\n 'dose':dose,\n 'dose_mass': dose_mass,\n 'time_dose': time_dose,\n 'num_dose': num_dose,\n 'len_assay':len_assay,\n 'len_interval':len_interval,\n 'clearance':clearance,\n 'compartments':compartments,\n 'vis':vis,\n 'curr_datetime':curr_datetime\n }", "def test():\n assert which_date('2016/02/10','35 days') == '2016/03/16'\n assert which_date('2016/12/21','3 weeks') == '2017/01/11'\n assert which_date('2015/01/17','1 week') == '2015/01/24'\n print(\"All tests completed.\")", "def get_day(month_name, num_days):\n display_month(month_name, num_days)\n day = input(\"Enter Day: \")\n try:\n day = int(day)\n if day > num_days or day < 1:\n os.system('cls')\n print(\"Accepted Values: 1-\" + str(num_days))\n return get_day(month_name, num_days)\n else:\n return day\n except ValueError:\n os.system('cls')\n print(\"Accepted Values: 1-\" + str(num_days))\n return get_day(month_name, num_days)", "def _read_standard_date(match):\n day_string = match.group(1)\n\n if not day_string.isnumeric():\n return\n\n d = int(day_string)\n\n month_string = match.group(2)\n m = None\n\n for n, candidate in enumerate(names.months):\n if candidate.sanitized == month_string:\n m = n\n break\n else:\n return\n\n y = roman_to_decimal(match.group(3))\n\n return (y, m, d)", "def valid_args(args):\n is_valid = True\n\n # valid date format?\n try:\n datetime.datetime(year=args.year, month=args.month, day=args.day)\n except Exception:\n traceback.print_exc()\n is_valid = False\n\n print(f\"Arguments: {args}\")\n return is_valid", "def normalize_dates(end_date, start_date, today_date):\n if start_date < today_date or end_date < today_date:\n return {'status': False, 'message': 'Sorry, you cannot enter a past date'}\n elif end_date < start_date:\n return {'status': False, 'message': 'Sorry, end date must be after start date'}\n else:\n return {'status': True, 'message': 'Validation successful'}", "def test_date_format():\n assert gather_stock_returns(api_key, 'AAPL', '2018', sell_date) == msg2", "def create(service, user, email):\n day,year,month,hour,minutes = 0,0,0,0,0\n\n while day < 1 or day > 31 or month < 1 or month > 12 or year < 1:\n dateinput = input(\"Enter date (day/month/year): \") .strip()\n if \"/\" in dateinput:\n date = dateinput.split('/') \n if len(date) != 3 or not date[0].isdigit()\\\n or not date[1].isdigit() or not date[2].isdigit():\n print(\"date should be in this format day/month/year\")\n\n continue\n \n else:\n print(\"date should be in this format day/month/year\")\n\n continue\n day = int(date[0])\n month = int(date[1])\n year = int(date[2])\n if day < 1 or day > 31:\n print(\"date is invalid.\")\n if month < 1 or month > 12:\n print(\"month is invalid.\")\n if year < 1:\n print(\"year is invalid.\")\n\n \"\"\"\n Check the date if is passed and ask\n \"\"\"\n my_date = datetime.datetime(year, month, day,23,30)\n \n if my_date < datetime.datetime.today():\n message2 = \"event cannot be created , day has passed.\"\n print(\"{} {}\".format(user, message2))\n \n print(f'Bye {user}')\n\n return message2\n \n\n while hour<7 or hour>17 or minutes<0 or minutes>59\\\n or (hour==17 and min>30):\n timeinput = input(\"Enter time (HH:MM): \").strip()\n if \":\" in timeinput:\n time = timeinput.split(\":\")\n if len(time) != 2 or not time[0].isdigit()\\\n or not time[1].isdigit():\n print(\"time should be in this format HH:MM\")\n\n continue\n else:\n print(\"time should be in this format HH:MM\")\n\n continue\n hour = int(time[0])\n minutes = int(time[1]) \n\n if hour < 7 or hour > 17:\n print(\"Hour should be between 7 and 17\")\n if minutes < 0 or minutes > 59:\n print(\"minutes should be between 0 and 59\")\n if hour == 17 and minutes > 30:\n print(\"minutes should be between 00-30 since we close at 18:00\") \n\n hour2 = hour \n minutes2 = minutes+30\n\n if minutes >= 30:\n minutes2 = 0\n hour2 += 1\n add = minutes - 30\n minutes2 += add \n\n my_date = datetime.datetime(year, month, day, hour, minutes)\n\n \n if my_date < datetime.datetime.now():\n message2 = \"event cannot be created , time has passed.\"\n print(\"{} {}\".format(user,message2))\n \n print(f'Bye {user}')\n\n return message2\n \n else:\n startday = str(year)+\"-\"+str(month)+\"-\"+str(day)\n starttime = str(hour)+\":\"+str(minutes)\n endtime = str(hour2)+\":\"+str(minutes2)\n\n \"\"\"\n Checking if you have created Event Before\n - 30 minutes before start time\n - During Available Event\n - Before the End time\n \"\"\"\n \n if createdevent(service,email,my_date):\n message = \"You will be busy during that time\"\n \n return message\n\n \n \"\"\"\n Creating the event\n \"\"\"\n\n summary,description = \"\",\"\"\n while summary ==\"\":\n summary = input(\"Name of your topic: \").strip()\n \n while description == \"\":\n description = input(\"Describe your topic: \").strip() \n\n confirm = \"\"\n while confirm.lower() != 'y' or confirm.lower() != 'n':\n confirm = input(\"Confirm event?(y/n): \").strip()\n if confirm.lower() == 'y' or confirm.lower() == 'n':\n\n break\n \n if confirm.lower() == 'y':\n event=do_create(service,summary,description,startday,starttime,\\\n endtime,user,email) \n message = \"Event created successfully\"\n print('{}\\n - Calender Link: {}'.format(message,\\\n event.get('htmlLink'))) \n \n else:\n message = \"Event not created\"\n print(message)\n\n return message", "def validate_date(date):\n\n if isinstance(date, datetime):\n try:\n date = dto_to_str(date)\n except ValueError:\n pass # What to do in this case?\n else:\n return date\n\n if isinstance(date, str) or isinstance(date, unicode):\n try: # Convert to dto then back to string to ensure format is as expected\n date = str_to_dto(date)\n date = dto_to_str(date)\n except ValueError:\n pass\n else:\n return date\n\n raise DataValidationError(\"Date, {}, is not of an expected type (datetime object or string in format YYYYMMDD or MM/DD/YYYY\".format(date))", "def test_invalid_date(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1233, 'date_of_expense': 'fgjfj'})\n self.assertEqual(res.status_code, 400)\n results = json.loads(res.data)\n self.assertEqual(results['message'], 'The date fgjfj does not match the format DD-MM-YYYY')", "def guess_date_format(dates_list):\n\n # Check if the date to be guessed is a list of dates or single date string\n if isinstance(dates_list, str):\n dates_list = [dates_list]\n\n date_separator_res = []\n default_date_format = \"%d-%m-%Y\"\n\n # Loop through the dates and find the common separator in all dates\n for index, date_string in enumerate(dates_list):\n try:\n # Strip spaces in beginning and end of string\n dates_list[index] = date_string.strip()\n date_separator_temp_res = set('[_ -+/\\']').intersection(\n date_string)\n if not date_separator_temp_res:\n continue\n else:\n date_separator_res.append(list(date_separator_temp_res)[0])\n except:\n continue\n\n # Find the top most common separator based on occurences\n try:\n date_separator = Counter(date_separator_res).most_common(1)\n if date_separator:\n date_separator = date_separator[0][0]\n else:\n date_separator = \"no_space\"\n except Exception as error:\n error_msg = \"Error : {} occurred when trying to guess the date \" \\\n \"format. returning `{}` as default date format\".format(\n error, default_date_format)\n logging.error(error_msg)\n return default_date_format\n\n if '/' == date_separator:\n date_formats = [\"%d/%m/%Y\", \"%m/%d/%Y\", \"%Y/%m/%d\",\n \"%Y/%d/%m\", \"%m/%Y/%d\",\n \"%d/%m/%y\", \"%m/%d/%y\", \"%y/%m/%d\",\n \"%y/%d/%m\", \"%m/%y/%d\",\n \"%d/%b/%Y\", \"%b/%d/%Y\", \"%Y/%b/%d\",\n \"%Y/%d/%b\", \"%b/%Y/%d\",\n \"%d/%b/%y\", \"%b/%d/%y\", \"%y/%b/%d\",\n \"%y/%d/%b\", \"%b/%y/%d\",\n \"%d/%B/%Y\", \"%B/%d/%Y\", \"%Y/%B/%d\",\n \"%Y/%d/%B\", \"%B/%Y/%d\",\n \"%d/%B/%y\", \"%B/%d/%y\", \"%y/%B/%d\",\n \"%y/%d/%B\", \"%B/%y/%d\",\n \"%m/%Y\", \"%m/%Y\", \"%Y/%m\",\n \"%Y/%m\", \"%m/%Y\",\n \"%m/%y\", \"%m/%y\", \"%y/%m\",\n \"%y/%m\", \"%m/%y\",\n \"%b/%Y\", \"%b/%Y\", \"%Y/%b\",\n \"%Y/%b\", \"%b/%Y\",\n \"%b/%y\", \"%b/%y\", \"%y/%b\",\n \"%y/%b\", \"%b/%y\",\n \"%B/%Y\", \"%B/%Y\", \"%Y/%B\",\n \"%Y/%B\", \"%B/%Y\",\n \"%B/%y\", \"%B/%y\", \"%y/%B\",\n \"%y/%B\", \"%B/%y\"]\n\n elif '-' == date_separator:\n date_formats = [\"%d-%m-%Y\", \"%m-%d-%Y\", \"%Y-%m-%d\",\n \"%Y-%d-%m\", \"%m-%Y-%d\",\n \"%d-%m-%y\", \"%m-%d-%y\", \"%y-%m-%d\",\n \"%y-%d-%m\", \"%m-%y-%d\",\n \"%d-%b-%Y\", \"%b-%d-%Y\", \"%Y-%b-%d\",\n \"%Y-%d-%b\", \"%b-%Y-%d\",\n \"%d-%b-%y\", \"%b-%d-%y\", \"%y-%b-%d\",\n \"%y-%d-%b\", \"%b-%y-%d\",\n \"%d-%B-%Y\", \"%B-%d-%Y\", \"%Y-%B-%d\",\n \"%Y-%d-%B\", \"%B/%Y/%d\",\n \"%d-%B-%y\", \"%B-%d-%y\", \"%y-%B-%d\",\n \"%y-%d-%B\", \"%B-%y-%d\",\n \"%m-%Y\", \"%m-%Y\", \"%Y-%m\",\n \"%Y-%m\", \"%m-%Y\",\n \"%m-%y\", \"%m-%y\", \"%y-%m\",\n \"%y-%m\", \"%m-%y\",\n \"%b-%Y\", \"%b-%Y\", \"%Y-%b\",\n \"%Y-%b\", \"%b-%Y\",\n \"%b-%y\", \"%b-%y\", \"%y-%b\",\n \"%y-%b\", \"%b-%y\",\n \"%B-%Y\", \"%B-%Y\", \"%Y-%B\",\n \"%Y-%B\", \"%B/%Y\",\n \"%B-%y\", \"%B-%y\", \"%y-%B\",\n \"%y-%B\", \"%B-%y\"]\n\n elif ' ' == date_separator:\n date_formats = [\"%d %m %Y\", \"%m %d %Y\", \"%Y %m %d\",\n \"%Y %d %m\", \"%m %Y %d\",\n \"%d %m %y\", \"%m %d %y\", \"%y %m %d\",\n \"%y %d %m\", \"%m %y %d\",\n \"%d %b %Y\", \"%b %d %Y\", \"%Y %b %d\",\n \"%Y %d %b\", \"%b %Y %d\",\n \"%d %b %y\", \"%b %d %y\", \"%y %b %d\",\n \"%y %d %b\", \"%b %y %d\",\n \"%d %B %Y\", \"%B %d %Y\", \"%Y %B %d\",\n \"%Y %d %B\", \"%B %Y %d\",\n \"%d %B %y\", \"%B %d %y\", \"%y %B %d\",\n \"%y %d %B\", \"%d %y %B\",\n \"%m %Y\", \"%m %Y\", \"%Y %m\",\n \"%Y %m\", \"%m %Y\",\n \"%m %y\", \"%m %y\", \"%y %m\",\n \"%y %m\", \"%m %y\",\n \"%b %Y\", \"%b %Y\", \"%Y %b\",\n \"%Y %b\", \"%b %Y\",\n \"%b %y\", \"%b %y\", \"%y %b\",\n \"%y %b\", \"%b %y\",\n \"%B %Y\", \"%B %Y\", \"%Y %B\",\n \"%Y %B\", \"%B %Y\",\n \"%B %y\", \"%B %y\", \"%y %B\",\n \"%y %B\", \"%y %B\"]\n\n elif 'no_space' == date_separator:\n date_formats = [\"%d%m%Y\", \"%m%d%Y\", \"%Y%m%d\",\n \"%Y%d%m\", \"%m%Y%d\",\n \"%d%m%y\", \"%m%d%y\", \"%y%m%d\",\n \"%y%d%m\", \"%m%y%d\",\n \"%d%b%Y\", \"%b%d%Y\", \"%Y%b%d\",\n \"%Y%d%b\", \"%b%Y%d\",\n \"%d%b%y\", \"%b%d%y\", \"%y%b%d\",\n \"%y%d%b\", \"%b%y%d\",\n \"%d%B%Y\", \"%B%d%Y\", \"%Y%B%d\",\n \"%Y%d%B\", \"%b%Y%d\",\n \"%d%B%y\", \"%B%d%y\", \"%y%B%d\",\n \"%y%d%B\", \"%B%y%d\",\n \"%m/%Y\", \"%m/%Y\", \"%Y/%m\",\n \"%Y/%m\", \"%m/%Y\",\n \"%m/%y\", \"%m/%y\", \"%y/%m\",\n \"%y/%m\", \"%m/%y\",\n \"%b/%Y\", \"%b/%Y\", \"%Y/%b\",\n \"%Y/%b\", \"%b/%Y\",\n \"%b/%y\", \"%b/%y\", \"%y/%b\",\n \"%y/%b\", \"%b/%y\",\n \"%B/%Y\", \"%B/%Y\", \"%Y/%B\",\n \"%Y/%B\", \"%B/%Y\",\n \"%B/%y\", \"%B/%y\", \"%y/%B\",\n \"%y/%B\", \"%B/%y\"]\n\n date_format_res = []\n now = datetime.now()\n # Loop through different formats and find which format matches the\n # maximum for all dates\n for date_string in dates_list:\n for date_format in date_formats:\n try:\n date_obj = datetime.strptime(date_string.strip(), date_format)\n if date_obj and (date_obj <= now):\n date_format_res.append(date_format)\n except ValueError:\n continue\n\n # Take the top most matched format and return it\n try:\n if date_format_res:\n date_format_common = Counter(date_format_res).most_common(1)\n if date_format_common:\n date_format = date_format_common[0][0]\n logging.info(\"Date format detected: `{}`\".format(date_format))\n return date_format\n else:\n logging.warning(\"Unable to determine the date format, default date format returned: `{}`\".format(default_date_format))\n return default_date_format\n else:\n logging.warning(\"Unable to determine the date format, default date format returned: `{}`\".format(default_date_format))\n return default_date_format\n except Exception as error:\n error_msg = \"Error : {} occurred when trying to guess the date \" \\\n \"format. returning `{}` as default date format\".format(error, default_date_format)\n logging.error(error_msg)\n return default_date_format", "def validate_date(column_name, value, date_format, column_data_type=\"date\"):\n value = value.replace(\"T\", \" \")\n dtpart = value.split(\" \")\n value = dtpart[0]\n try:\n datetime.strptime(value, date_format)\n return None\n except ValueError:\n return \"{0} : '{1}' is not a valid {2}\".format(column_name, value, column_data_type)", "def is_valid_day_number(gender_number: int, year_number: int, month_number: int, day_number: int) -> bool:\n monthlist1 = [1, 3, 5, 7, 8, 10, 12]\n monthlist2 = [4, 6, 9, 11]\n monthlist3 = [2]\n if month_number in monthlist1:\n if day_number in range(1, 32):\n return True\n else:\n return False\n elif month_number in monthlist2:\n if day_number in range(1, 31):\n return True\n else:\n return False\n elif month_number in monthlist3:\n if is_leap_year(get_full_year(gender_number, year_number)):\n if day_number in range(1, 30):\n return True\n else:\n return False\n else:\n if day_number in range(1, 28):\n return True\n else:\n return False", "def valid_date_type(arg_date_str):\n try:\n return dt.datetime.strptime(arg_date_str, \"%Y-%m-%d\")\n except ValueError:\n msg = \"Given Date ({0}) not valid! Expected format, YYYY-MM-DD!\".format(arg_date_str)\n raise argparse.ArgumentTypeError(msg)", "def test_convert_date_error(self):\n try:\n convert_to_date('N/A', FORMAT_CALENDAR)\n except ValueError as error:\n self.assertEqual(type(error), ValueError)", "def get_day():\n return handle_invalid_inputs(question_4, days)", "def _handle_bad_input_date(f):\n def date_handler_wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n import re\n e_str = \"{}\".format(e)\n for r in [\".*date/time field value out of range: \\\"(.*)\\\".*LINE\",\n \".*invalid input syntax for type timestamp: \\\"(.*)\\\".*\",\n \".*timestamp out of range: \\\"(.*)\\\".*\"]:\n p = re.compile(r, re.DOTALL)\n m = p.match(e_str)\n if m and len(m.groups()) > 0:\n bad_date = m.group(1)\n raise wsme.exc.ClientSideError(_(\n \"Invalid date '{}' specified\".format(bad_date)))\n raise\n return date_handler_wrapper", "def parse_date(text):\n\n for fmt in ('%Y-%m-%d', '%d-%m-%Y'):\n try:\n return datetime.datetime.strptime(text, fmt)\n except ValueError:\n pass\n\n raise ValueError('Dates should be in YYYY-MM-DD or DD-MM-YYYY format')", "def test_validate_date_entry_returns_correct_outOfBounds_if_past(self):\n date_string = \"1899-12-12\"\n date_format = settings.DATE_FORMATS['iso 8601']\n\n error_string = \"dates before {} are not permitted\"\n date_fmt = \"%Y-%m-%d\"\n earliest_date = self.menu.OPTIONS['earliest allowed date']\n earliest_date_string = earliest_date.strftime(date_fmt)\n\n error_text = error_string.format(earliest_date_string)\n\n result = self.menu.validate_date_entry(date_string, date_format)\n\n expected_result = (error_text, None)\n\n self.assertEqual(result, expected_result)", "def test_date_accept_date_minus_many_days(self):\n spi_search = \"find date 2011-02-24 - 946\"\n inv_search = \"year:2008-07-23\"\n self._compare_searches(inv_search, spi_search)", "def clean_date(date):\n months = [\n \"JAN\",\n \"FEB\",\n \"MAR\",\n \"APR\",\n \"MAY\",\n \"JUN\",\n \"JUL\",\n \"AUG\",\n \"SEP\",\n \"OCT\",\n \"NOV\",\n \"DEC\",\n ]\n index = 0\n prefix = date[:7]\n result = \"\"\n data = date[7:].upper().strip()\n letter = word = False\n while index < len(data):\n if data[index] == \"0\" and not word:\n index = index + 1\n continue\n if data[index] == \" \":\n word = False\n else:\n word = True\n if data[index].isalpha:\n letter = True\n elif data[index].isdigit and letter:\n result = result + \" \"\n word = False\n result = result + data[index]\n index = index + 1\n\n for month in months:\n if month in result:\n match = re.search(r\"\" + month + \"\\w+\", result)\n if not match:\n logging.info(\n \"MONTH: {} RESULT: {} BUT MATCH NONE?\".format(month, result)\n )\n else:\n result = result.replace(str(match.group()), month)\n\n result = result.replace(\"ABOUT\", \"ABT\")\n result = result.replace(\"BEFORE\", \"BEF\")\n result = result.replace(\"AFTER\", \"AFT\")\n result = result.replace(\"BETWEEN\", \"BET\")\n result = result.replace(\"FROM\", \"\")\n result = result.replace(\"TO\", \"AND\")\n\n if \"AND\" in result and \"BET\" not in result:\n result = \"BET {0}\".format(result)\n\n if \"-\" in result:\n split = result.split(\"-\")\n if result[:1] == \"-\":\n result = \"BEF {0}\".format(split[1])\n elif result[-1:] == \"-\":\n result = \"AFT {0}\".format(split[0])\n elif len(split) == 2:\n result = \"BET {0} AND {1}\".format(split[0], split[1])\n\n while \" \" in result:\n result = result.replace(\" \", \" \")\n\n return \"{0}{1}\\n\".format(prefix, result)", "def search_date():\n while True:\n clear()\n print(dedent(\"\"\"\n What do you want to do? Enter a or b.\n a) Choose from a list of dates\n b) Search by a date range\n c) Return to search menu\n \"\"\"))\n choice = input(\"> \")\n if choice == \"a\":\n work_log.multiple_matches(type='date')\n elif choice == \"b\":\n work_log.search_date_range()\n elif choice == \"c\":\n break\n else:\n print(\"Please enter a valid choice\")\n time.sleep(3)", "def search_by_date(self, tl):\n print(\"Search by exact date\")\n date_str = input(\"Please use YYYYMMDD: \")\n try:\n date = datetime.datetime.strptime(date_str, utils.fmt)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_date(tl)\n else:\n return tl.findall_date(date)", "def test_date_accept_date_minus_days_with_year_wrap(self):\n spi_search = \"find date 2011-01-01 - 1\"\n inv_search = \"year:2010-12-31\"\n self._compare_searches(inv_search, spi_search)" ]
[ "0.7718624", "0.74047714", "0.7254131", "0.6943424", "0.681888", "0.6775938", "0.6571638", "0.65667284", "0.6505889", "0.6504818", "0.6378474", "0.6291643", "0.6249938", "0.61674076", "0.61129284", "0.61053425", "0.6080972", "0.6056492", "0.60186255", "0.5945199", "0.5924336", "0.58938646", "0.58560985", "0.58498544", "0.58328795", "0.58292073", "0.5808484", "0.5794331", "0.579351", "0.578675", "0.5781919", "0.5768636", "0.57528365", "0.5745066", "0.5737899", "0.5733818", "0.5723156", "0.5720656", "0.5684104", "0.56761795", "0.5675037", "0.5668196", "0.5645532", "0.56009734", "0.55762607", "0.5564588", "0.55465746", "0.55437136", "0.5536483", "0.5523686", "0.5512472", "0.5512472", "0.550821", "0.55075973", "0.5494201", "0.5482928", "0.5475394", "0.5467901", "0.5463037", "0.5455376", "0.54400563", "0.5420931", "0.5418981", "0.541791", "0.5396528", "0.53947055", "0.5384562", "0.5384549", "0.53809565", "0.5377506", "0.53753763", "0.53752506", "0.5362042", "0.5342967", "0.53210425", "0.5320499", "0.5319573", "0.5308538", "0.530538", "0.52821076", "0.52801645", "0.52673894", "0.52609277", "0.52591556", "0.52545106", "0.5247208", "0.5246091", "0.5236898", "0.52350354", "0.52341956", "0.52297664", "0.52269804", "0.5220929", "0.52185804", "0.5218488", "0.5215827", "0.5213405", "0.52023476", "0.5201267", "0.5200271" ]
0.8515413
0
Unicode representation of Match History
def __str__(self): return f"{str(self.team1)} vs {str(self.team2)} on {str(self.date)}"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toString(self) -> unicode:\n ...", "def toString(self) -> unicode:\n ...", "def __str__(self):\n return \"{}\".format(self._matches.keys())", "def unicode(self, irc, msg, args, query):\n url = \"http://unicodelookup.com/lookup?\"\n url = url + urlencode({\"q\": query, \"o\": 0})\n data = web.getUrl(url)\n try:\n data = json.loads(data)\n responses = []\n for result in data[\"results\"]:\n ucode = result[2].replace(\"0x\", \"U+\")\n name = unicodedata.name(\"{0}\".format(query))\n responses.append(\n \"%s (%s): %s [HTML: %s / Decimal: %s / Hex: %s]\"\n % (ucode, name, result[4], result[3], result[1], result[2])\n )\n response = \"; \".join(responses)\n irc.reply(response)\n except ValueError:\n irc.reply(\"No unicode characters matching /\" + query + \"/ found.\")", "def __str__(self):\n return self.get_ascii_trunk() + self.get_ascii_leaves()", "def test_repr_format(self):\n t = OneHotEncode(3)\n assert t.repr_format(\"asfa\") == \"OneHotEncode(asfa)\"", "def __unicode__(self):\n return str(self).decode('ascii')", "def __repr__(self):\t\n\t\treturn arabicRepr.repr(self.__dict__);", "def history_board(self):\n return np.array([fen.split(' ')[0] for fen in self.history])", "def __str__(self):\n return self.title.encode(\"utf-8\", \"replace\").lower() + \\\n \"\\t\" + self.artist.encode(\"utf-8\", \"replace\").lower()", "def to_unicode(data):\n return to_string(data)", "def testParseUnicode(self):\n test_file = self._GetTestFilePath([u'skydriveerr-unicode.log'])\n event_queue_consumer = self._ParseFile(self._parser, test_file)\n event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)\n\n self.assertEqual(len(event_objects), 19)\n\n # TODO: check if this test passes because the encoding on my system\n # is UTF-8.\n expected_text = (\n u'No node found named Passport-Jméno-člena')\n self.assertEqual(event_objects[3].text, expected_text)", "def find_unicode(self, modifier=4):\n wide = []\n matches = re.finditer(b'([\\x20-\\x7e]\\x00){' +\n str(modifier).encode('ascii') + b',}', self.buff)\n\n if matches:\n for m in matches:\n wide.append(m.group(0).decode('utf-16'))\n return wide", "def __str__(self):\n\n return '\\n'.join(map(History.node_repr, self.__history))", "def convert(self, match: Match) -> V: # pylint: disable=no-self-use\n return match.string", "def _chinese(source):\n return json.dumps(source, ensure_ascii=False)", "def _hidden_in_unicode(self, txt):", "def show_history_log(self):\n self.visual.print_enum(self.visual.history_log)", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def __str__(self):\n return self.__unicode__().encode('utf-8').decode()", "def format_result(self,obj):\n return unicode(obj)", "def test_unicode_io(self):\r\n tm1 = TestModel.create(count=9, text=u'4567ë9989')\r\n tm2 = TestModel.get(tm1.vid)", "def convert_to_unicode( tscii_input ):\n output = list()\n prev = None\n prev2x = None\n # need a look ahead of 2 tokens atleast\n for char in tscii_input:\n ## print \"%2x\"%ord(char) # debugging\n if ord(char) < 128 :\n # base-ASCII copy to output\n output.append( char )\n prev = None\n prev2x = None\n \n elif ord(char) in TSCII_DIRECT_LOOKUP:\n if ( prev in TSCII_PRE_MODIFIER ):\n curr_char = [TSCII[ord(char)],TSCII[prev]] \n else:\n # we are direct lookup char\n curr_char = [TSCII[ord(char)]]\n char = None\n\n output.extend( curr_char )\n\n elif ( (ord(char) in TSCII_POST_MODIFIER) ): \n \n if ( (prev in TSCII_DIRECT_LOOKUP) and \n (prev2x in TSCII_PRE_MODIFIER) ):\n if len(output) >= 2:\n del output[-1] #we are reducing this token to something new\n del output[-2]\n elif len(output)==1:\n del output[-1] \n else:\n # nothing to delete here.. \n pass\n output.extend( [TSCII[prev], TSCII[prev2x]] )\n else:\n print(\"Warning: malformed TSCII encoded file; skipping characters\")\n \n prev = None\n char = None\n else:\n # pass - must be one of the pre/post modifiers\n pass\n \n prev2x = prev\n if char:\n prev = ord(char)\n return u\"\".join(output)", "def __unicode__(self):\n return unicode(self.GetString())", "def __init__(self, encoding):\n self.trans = {}\n for char in u\"ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ\":\n self.trans[char] = u\"A\"\n for char in u\"ȀǞ\":\n self.trans[char] = u\"Ä\"\n self.trans[u\"Ǻ\"] = u\"Å\"\n self.trans[u\"Ä\"] = u\"Ae\"\n self.trans[u\"Å\"] = u\"Aa\"\n for char in u\"àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ\":\n self.trans[char] = u\"a\"\n for char in u\"ȁǟ\":\n self.trans[char] = u\"ä\"\n self.trans[u\"ǻ\"] = u\"å\"\n self.trans[u\"ä\"] = u\"ae\"\n self.trans[u\"å\"] = u\"aa\"\n for char in u\"ḂḄḆƁƂ\":\n self.trans[char] = u\"B\"\n for char in u\"ḃḅḇƀɓƃ\":\n self.trans[char] = u\"b\"\n for char in u\"ĆĈĊÇČƇ\":\n self.trans[char] = u\"C\"\n for char in u\"ćĉċçčƈȼ\":\n self.trans[char] = u\"c\"\n self.trans[u\"Ḉ\"] = u\"Ç\"\n self.trans[u\"ḉ\"] = u\"ç\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ĎḊḌḎḐḒĐƉƊƋ\":\n self.trans[char] = u\"D\"\n for char in u\"ďḋḍḏḑḓđɖɗƌ\":\n self.trans[char] = u\"d\"\n for char in u\"ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ\":\n self.trans[char] = u\"E\"\n for char in u\"ỀẾỄỆỂ\":\n self.trans[char] = u\"Ê\"\n for char in u\"èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ\":\n self.trans[char] = u\"e\"\n for char in u\"ềếễệể\":\n self.trans[char] = u\"ê\"\n for char in u\"ḞƑ\":\n self.trans[char] = u\"F\"\n for char in u\"ḟƒ\":\n self.trans[char] = u\"f\"\n for char in u\"ǴḠĞĠĢǦǤƓ\":\n self.trans[char] = u\"G\"\n for char in u\"ǵḡğġģǧǥɠ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ĝ\"] = u\"Gx\"\n self.trans[u\"ĝ\"] = u\"gx\"\n for char in u\"ḢḤḦȞḨḪH̱ĦǶ\":\n self.trans[char] = u\"H\"\n for char in u\"ḣḥḧȟḩḫ̱ẖħƕ\":\n self.trans[char] = u\"h\"\n for char in u\"IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ\":\n self.trans[char] = u\"I\"\n for char in u\"ıìȉíîĩḭïḯīĭȋįǐiịỉɨ\":\n self.trans[char] = u\"i\"\n for char in u\"ĴJ\":\n self.trans[char] = u\"J\"\n for char in u\"ɟĵ̌ǰ\":\n self.trans[char] = u\"j\"\n for char in u\"ḰǨĶḲḴƘ\":\n self.trans[char] = u\"K\"\n for char in u\"ḱǩķḳḵƙ\":\n self.trans[char] = u\"k\"\n for char in u\"ĹĻĽḶḸḺḼȽŁ\":\n self.trans[char] = u\"L\"\n for char in u\"ĺļľḷḹḻḽƚłɫ\":\n self.trans[char] = u\"l\"\n for char in u\"ḾṀṂ\":\n self.trans[char] = u\"M\"\n for char in u\"ḿṁṃɱ\":\n self.trans[char] = u\"m\"\n for char in u\"ǸŃÑŅŇṄṆṈṊŊƝɲȠ\":\n self.trans[char] = u\"N\"\n for char in u\"ǹńñņňṅṇṉṋŋɲƞ\":\n self.trans[char] = u\"n\"\n for char in u\"ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ\":\n self.trans[char] = u\"O\"\n for char in u\"òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ\":\n self.trans[char] = u\"o\"\n for char in u\"ȌŐȪ\":\n self.trans[char] = u\"Ö\"\n for char in u\"ȍőȫ\":\n self.trans[char] = u\"ö\"\n for char in u\"ỒỐỖỘỔȎ\":\n self.trans[char] = u\"Ô\"\n for char in u\"ồốỗộổȏ\":\n self.trans[char] = u\"ô\"\n for char in u\"ṔṖƤ\":\n self.trans[char] = u\"P\"\n for char in u\"ṕṗƥ\":\n self.trans[char] = u\"p\"\n self.trans[u\"ᵽ\"] = u\"q\"\n for char in u\"ȐŔŖŘȒṘṚṜṞ\":\n self.trans[char] = u\"R\"\n for char in u\"ȑŕŗřȓṙṛṝṟɽ\":\n self.trans[char] = u\"r\"\n for char in u\"ŚṤŞȘŠṦṠṢṨ\":\n self.trans[char] = u\"S\"\n for char in u\"śṥşșšṧṡṣṩȿ\":\n self.trans[char] = u\"s\"\n self.trans[u\"Ŝ\"] = u\"Sx\"\n self.trans[u\"ŝ\"] = u\"sx\"\n for char in u\"ŢȚŤṪṬṮṰŦƬƮ\":\n self.trans[char] = u\"T\"\n for char in u\"ţțťṫṭṯṱŧȾƭʈ\":\n self.trans[char] = u\"t\"\n for char in u\"ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ\":\n self.trans[char] = u\"U\"\n for char in u\"ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ\":\n self.trans[char] = u\"u\"\n for char in u\"ȔŰǛǗǕǙ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ȕűǜǘǖǚ\":\n self.trans[char] = u\"ü\"\n self.trans[u\"Û\"] = u\"Ux\"\n self.trans[u\"û\"] = u\"ux\"\n self.trans[u\"Ȗ\"] = u\"Û\"\n self.trans[u\"ȗ\"] = u\"û\"\n self.trans[u\"Ừ\"] = u\"Ù\"\n self.trans[u\"ừ\"] = u\"ù\"\n self.trans[u\"Ứ\"] = u\"Ú\"\n self.trans[u\"ứ\"] = u\"ú\"\n for char in u\"ṼṾ\":\n self.trans[char] = u\"V\"\n for char in u\"ṽṿ\":\n self.trans[char] = u\"v\"\n for char in u\"ẀẂŴẄẆẈ\":\n self.trans[char] = u\"W\"\n for char in u\"ẁẃŵẅẇẉ\":\n self.trans[char] = u\"w\"\n for char in u\"ẊẌ\":\n self.trans[char] = u\"X\"\n for char in u\"ẋẍ\":\n self.trans[char] = u\"x\"\n for char in u\"ỲÝŶŸỸȲẎỴỶƳ\":\n self.trans[char] = u\"Y\"\n for char in u\"ỳýŷÿỹȳẏỵỷƴ\":\n self.trans[char] = u\"y\"\n for char in u\"ŹẐŻẒŽẔƵȤ\":\n self.trans[char] = u\"Z\"\n for char in u\"źẑżẓžẕƶȥ\":\n self.trans[char] = u\"z\"\n self.trans[u\"ɀ\"] = u\"zv\"\n\n # Latin: extended Latin alphabet\n self.trans[u\"ɑ\"] = u\"a\"\n for char in u\"ÆǼǢ\":\n self.trans[char] = u\"AE\"\n for char in u\"æǽǣ\":\n self.trans[char] = u\"ae\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ƎƏƐ\":\n self.trans[char] = u\"E\"\n for char in u\"ǝəɛ\":\n self.trans[char] = u\"e\"\n for char in u\"ƔƢ\":\n self.trans[char] = u\"G\"\n for char in u\"ᵷɣƣᵹ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ƅ\"] = u\"H\"\n self.trans[u\"ƅ\"] = u\"h\"\n self.trans[u\"Ƕ\"] = u\"Wh\"\n self.trans[u\"ƕ\"] = u\"wh\"\n self.trans[u\"Ɩ\"] = u\"I\"\n self.trans[u\"ɩ\"] = u\"i\"\n self.trans[u\"Ŋ\"] = u\"Ng\"\n self.trans[u\"ŋ\"] = u\"ng\"\n self.trans[u\"Œ\"] = u\"OE\"\n self.trans[u\"œ\"] = u\"oe\"\n self.trans[u\"Ɔ\"] = u\"O\"\n self.trans[u\"ɔ\"] = u\"o\"\n self.trans[u\"Ȣ\"] = u\"Ou\"\n self.trans[u\"ȣ\"] = u\"ou\"\n self.trans[u\"Ƽ\"] = u\"Q\"\n for char in u\"ĸƽ\":\n self.trans[char] = u\"q\"\n self.trans[u\"ȹ\"] = u\"qp\"\n self.trans[u\"\"] = u\"r\"\n self.trans[u\"ſ\"] = u\"s\"\n self.trans[u\"ß\"] = u\"ss\"\n self.trans[u\"Ʃ\"] = u\"Sh\"\n for char in u\"ʃᶋ\":\n self.trans[char] = u\"sh\"\n self.trans[u\"Ʉ\"] = u\"U\"\n self.trans[u\"ʉ\"] = u\"u\"\n self.trans[u\"Ʌ\"] = u\"V\"\n self.trans[u\"ʌ\"] = u\"v\"\n for char in u\"ƜǷ\":\n self.trans[char] = u\"W\"\n for char in u\"ɯƿ\":\n self.trans[char] = u\"w\"\n self.trans[u\"Ȝ\"] = u\"Y\"\n self.trans[u\"ȝ\"] = u\"y\"\n self.trans[u\"IJ\"] = u\"IJ\"\n self.trans[u\"ij\"] = u\"ij\"\n self.trans[u\"Ƨ\"] = u\"Z\"\n for char in u\"ʮƨ\":\n self.trans[char] = u\"z\"\n self.trans[u\"Ʒ\"] = u\"Zh\"\n self.trans[u\"ʒ\"] = u\"zh\"\n self.trans[u\"Ǯ\"] = u\"Dzh\"\n self.trans[u\"ǯ\"] = u\"dzh\"\n for char in u\"ƸƹʔˀɁɂ\":\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in u\"Cʗǃ\":\n self.trans[char] = u\"!\"\n\n # Punctuation and typography\n for char in u\"«»“”„¨\":\n self.trans[char] = u'\"'\n for char in u\"‘’′\":\n self.trans[char] = u\"'\"\n self.trans[u\"•\"] = u\"*\"\n self.trans[u\"@\"] = u\"(at)\"\n self.trans[u\"¤\"] = u\"$\"\n self.trans[u\"¢\"] = u\"c\"\n self.trans[u\"€\"] = u\"E\"\n self.trans[u\"£\"] = u\"L\"\n self.trans[u\"¥\"] = u\"yen\"\n self.trans[u\"†\"] = u\"+\"\n self.trans[u\"‡\"] = u\"++\"\n self.trans[u\"°\"] = u\":\"\n self.trans[u\"¡\"] = u\"!\"\n self.trans[u\"¿\"] = u\"?\"\n self.trans[u\"‰\"] = u\"o/oo\"\n self.trans[u\"‱\"] = u\"o/ooo\"\n for char in u\"¶§\":\n self.trans[char] = u\">\"\n self.trans['…'] = '...'\n for char in u\"‒–—―\":\n self.trans[char] = u\"-\"\n self.trans['·'] = ' '\n self.trans[u\"¦\"] = u\"|\"\n self.trans[u\"⁂\"] = u\"***\"\n self.trans[u\"◊\"] = u\"<>\"\n self.trans[u\"‽\"] = u\"?!\"\n self.trans[u\"؟\"] = u\";-)\"\n self.trans[u\"¹\"] = u\"1\"\n self.trans[u\"²\"] = u\"2\"\n self.trans[u\"³\"] = u\"3\"\n\n # Cyrillic\n self.trans.update({u\"А\": u\"A\", u\"а\": u\"a\", u\"Б\": u\"B\", u\"б\": u\"b\",\n u\"В\": u\"V\", u\"в\": u\"v\", u\"Г\": u\"G\", u\"г\": u\"g\",\n u\"Д\": u\"D\", u\"д\": u\"d\", u\"Е\": u\"E\", u\"е\": u\"e\",\n u\"Ж\": u\"Zh\", u\"ж\": u\"zh\", u\"З\": u\"Z\", u\"з\": u\"z\",\n u\"И\": u\"I\", u\"и\": u\"i\", u\"Й\": u\"J\", u\"й\": u\"j\",\n u\"К\": u\"K\", u\"к\": u\"k\", u\"Л\": u\"L\", u\"л\": u\"l\",\n u\"М\": u\"M\", u\"м\": u\"m\", u\"Н\": u\"N\", u\"н\": u\"n\",\n u\"О\": u\"O\", u\"о\": u\"o\", u\"П\": u\"P\", u\"п\": u\"p\",\n u\"Р\": u\"R\", u\"р\": u\"r\", u\"С\": u\"S\", u\"с\": u\"s\",\n u\"Т\": u\"T\", u\"т\": u\"t\", u\"У\": u\"U\", u\"у\": u\"u\",\n u\"Ф\": u\"F\", u\"ф\": u\"f\", u\"х\": u\"kh\", u\"Ц\": u\"C\",\n u\"ц\": u\"c\", u\"Ч\": u\"Ch\", u\"ч\": u\"ch\", u\"Ш\": u\"Sh\",\n u\"ш\": u\"sh\", u\"Щ\": u\"Shch\", u\"щ\": u\"shch\", u\"Ь\": u\"'\",\n u\"ь\": \"'\", u\"Ъ\": u'\"', u\"ъ\": '\"', u\"Ю\": u\"Yu\",\n u\"ю\": u\"yu\", u\"Я\": u\"Ya\", u\"я\": u\"ya\", u\"Х\": u\"Kh\",\n u\"Χ\": u\"Kh\"})\n\n # Additional Cyrillic letters, most occuring in only one or a few languages\n self.trans.update({u\"Ы\": u\"Y\", u\"ы\": u\"y\", u\"Ё\": u\"Ë\", u\"ё\": u\"ë\",\n u\"Э\": u\"È\", u\"Ѐ\": u\"È\", u\"э\": u\"è\", u\"ѐ\": u\"è\",\n u\"І\": u\"I\", u\"і\": u\"i\", u\"Ї\": u\"Ji\", u\"ї\": u\"ji\",\n u\"Є\": u\"Je\", u\"є\": u\"je\", u\"Ґ\": u\"G\", u\"Ҝ\": u\"G\",\n u\"ґ\": u\"g\", u\"ҝ\": u\"g\", u\"Ђ\": u\"Dj\", u\"ђ\": u\"dj\",\n \"Љ\": \"Lj\", \"љ\": \"lj\",\n u\"Њ\": u\"Nj\", u\"њ\": u\"nj\", u\"Ћ\": u\"Cj\", u\"ћ\": u\"cj\",\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n u\"Ќ\": u\"Kj\", u\"ќ\": u\"kj\", u\"Ӣ\": u\"Ii\", u\"ӣ\": u\"ii\",\n \"Ҳ\": \"H\", \"ҳ\": \"h\",\n u\"Ҷ\": u\"Dz\", u\"ҷ\": u\"dz\", u\"Ө\": u\"Ô\", u\"Ӫ\": u\"Ô\",\n u\"ө\": u\"ô\", u\"ӫ\": u\"ô\", u\"Ү\": u\"Y\", u\"ү\": u\"y\", u\"Һ\": u\"H\",\n u\"һ\": u\"h\", u\"Ә\": u\"AE\", u\"Ӕ\": u\"AE\", u\"ә\": u\"ae\",\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n u\"ѝ\": u\"ì\", u\"Ѝ\": u\"Ì\", u\"Ӑ\": u\"A\", u\"ă\": u\"a\", u\"Ӓ\": u\"Ä\",\n \"Ҽ\": \"Ts\", \"Ҿ\": \"Ts\", \"ҽ\": \"ts\", \"ҿ\": \"ts\",\n u\"Ҙ\": u\"Dh\", u\"ҙ\": u\"dh\", u\"Ӏ\": u\"\", u\"ӏ\": u\"\", u\"Ӆ\": u\"L\",\n u\"ӆ\": u\"l\", u\"Ӎ\": u\"M\", u\"ӎ\": u\"m\", u\"Ӧ\": u\"Ö\", u\"ӧ\": u\"ö\",\n u\"Ҩ\": u\"u\", u\"ҩ\": u\"u\", u\"Ҧ\": u\"Ph\", u\"ҧ\": u\"ph\", u\"Ҏ\": u\"R\",\n u\"ҏ\": u\"r\", u\"Ҫ\": u\"Th\", u\"ҫ\": u\"th\", u\"Ҭ\": u\"T\", u\"ҭ\": u\"t\",\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n u\"ӹ\": u\"u\", u\"Ҵ\": u\"Tts\", u\"ҵ\": u\"tts\", u\"Ӵ\": u\"Ch\", u\"ӵ\": u\"ch\"})\n\n for char in u\"ЈӤҊ\":\n self.trans[char] = u\"J\"\n for char in u\"јӥҋ\":\n self.trans[char] = u\"j\"\n for char in u\"ЏӁӜҶ\":\n self.trans[char] = u\"Dzh\"\n for char in u\"џӂӝҷ\":\n self.trans[char] = u\"dzh\"\n for char in u\"ЅӞӠӋҸ\":\n self.trans[char] = u\"Dz\"\n for char in u\"ѕӟӡӌҹ\":\n self.trans[char] = u\"dz\"\n for char in u\"ҒӶҔ\":\n self.trans[char] = u\"G\"\n for char in u\"ғӷҕ\":\n self.trans[char] = u\"g\"\n for char in u\"ҚҞҠӃ\":\n self.trans[char] = u\"Q\"\n for char in u\"қҟҡӄ\":\n self.trans[char] = u\"q\"\n for char in u\"ҢҤӉӇ\":\n self.trans[char] = u\"Ng\"\n for char in u\"ңҥӊӈ\":\n self.trans[char] = u\"ng\"\n for char in u\"ӖѢҌ\":\n self.trans[char] = u\"E\"\n for char in u\"ӗѣҍ\":\n self.trans[char] = u\"e\"\n for char in u\"ӲӰҮ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ӳӱү\":\n self.trans[char] = u\"ü\"\n\n # Archaic Cyrillic letters\n self.trans.update({u\"Ѹ\": u\"Ou\", u\"ѹ\": u\"ou\", u\"Ѡ\": u\"O\", u\"Ѻ\": u\"O\", u\"ѡ\": u\"o\",\n u\"ѻ\": u\"o\", u\"Ѿ\": u\"Ot\", u\"ѿ\": u\"ot\", u\"Ѣ\": u\"E\", u\"ѣ\": u\"e\",\n u\"Ѥ\": u\"Ei\", u\"Ѧ\": u\"Ei\", u\"ѥ\": u\"ei\", u\"ѧ\": u\"ei\", u\"Ѫ\": u\"Ai\",\n u\"ѫ\": u\"ai\", u\"Ѯ\": u\"X\", u\"ѯ\": u\"x\", u\"Ѱ\": u\"Ps\", u\"ѱ\": u\"ps\",\n u\"Ѳ\": u\"Th\", u\"ѳ\": u\"th\", u\"Ѵ\": u\"Ü\", u\"Ѷ\": u\"Ü\", u\"ѵ\": u\"ü\"})\n\n # Hebrew alphabet\n for char in u\"אע\":\n self.trans[char] = u\"'\"\n self.trans[u\"ב\"] = u\"b\"\n self.trans[u\"ג\"] = u\"g\"\n self.trans[u\"ד\"] = u\"d\"\n self.trans[u\"ה\"] = u\"h\"\n self.trans[u\"ו\"] = u\"v\"\n self.trans[u\"ז\"] = u\"z\"\n self.trans[u\"ח\"] = u\"kh\"\n self.trans[u\"ט\"] = u\"t\"\n self.trans[u\"י\"] = u\"y\"\n for char in u\"ךכ\":\n self.trans[char] = u\"k\"\n self.trans[u\"ל\"] = u\"l\"\n for char in u\"םמ\":\n self.trans[char] = u\"m\"\n for char in u\"ןנ\":\n self.trans[char] = u\"n\"\n self.trans[u\"ס\"] = u\"s\"\n for char in u\"ףפ\":\n self.trans[char] = u\"ph\"\n for char in u\"ץצ\":\n self.trans[char] = u\"ts\"\n self.trans[u\"ק\"] = u\"q\"\n self.trans[u\"ר\"] = u\"r\"\n self.trans[u\"ש\"] = u\"sh\"\n self.trans[u\"ת\"] = u\"th\"\n\n # Arab alphabet\n for char in u\"اﺍﺎ\":\n self.trans[char] = u\"a\"\n for char in u\"بﺏﺐﺒﺑ\":\n self.trans[char] = u\"b\"\n for char in u\"تﺕﺖﺘﺗ\":\n self.trans[char] = u\"t\"\n for char in u\"ثﺙﺚﺜﺛ\":\n self.trans[char] = u\"th\"\n for char in u\"جﺝﺞﺠﺟ\":\n self.trans[char] = u\"g\"\n for char in u\"حﺡﺢﺤﺣ\":\n self.trans[char] = u\"h\"\n for char in u\"خﺥﺦﺨﺧ\":\n self.trans[char] = u\"kh\"\n for char in u\"دﺩﺪ\":\n self.trans[char] = u\"d\"\n for char in u\"ذﺫﺬ\":\n self.trans[char] = u\"dh\"\n for char in u\"رﺭﺮ\":\n self.trans[char] = u\"r\"\n for char in u\"زﺯﺰ\":\n self.trans[char] = u\"z\"\n for char in u\"سﺱﺲﺴﺳ\":\n self.trans[char] = u\"s\"\n for char in u\"شﺵﺶﺸﺷ\":\n self.trans[char] = u\"sh\"\n for char in u\"صﺹﺺﺼﺻ\":\n self.trans[char] = u\"s\"\n for char in u\"ضﺽﺾﻀﺿ\":\n self.trans[char] = u\"d\"\n for char in u\"طﻁﻂﻄﻃ\":\n self.trans[char] = u\"t\"\n for char in u\"ظﻅﻆﻈﻇ\":\n self.trans[char] = u\"z\"\n for char in u\"عﻉﻊﻌﻋ\":\n self.trans[char] = u\"'\"\n for char in u\"غﻍﻎﻐﻏ\":\n self.trans[char] = u\"gh\"\n for char in u\"فﻑﻒﻔﻓ\":\n self.trans[char] = u\"f\"\n for char in u\"قﻕﻖﻘﻗ\":\n self.trans[char] = u\"q\"\n for char in u\"كﻙﻚﻜﻛک\":\n self.trans[char] = u\"k\"\n for char in u\"لﻝﻞﻠﻟ\":\n self.trans[char] = u\"l\"\n for char in u\"مﻡﻢﻤﻣ\":\n self.trans[char] = u\"m\"\n for char in u\"نﻥﻦﻨﻧ\":\n self.trans[char] = u\"n\"\n for char in u\"هﻩﻪﻬﻫ\":\n self.trans[char] = u\"h\"\n for char in u\"وﻭﻮ\":\n self.trans[char] = u\"w\"\n for char in u\"یيﻱﻲﻴﻳ\":\n self.trans[char] = u\"y\"\n # Arabic - additional letters, modified letters and ligatures\n self.trans[u\"ﺀ\"] = u\"'\"\n for char in u\"آﺁﺂ\":\n self.trans[char] = u\"'a\"\n for char in u\"ةﺓﺔ\":\n self.trans[char] = u\"th\"\n for char in u\"ىﻯﻰ\":\n self.trans[char] = u\"á\"\n for char in u\"یﯼﯽﯿﯾ\":\n self.trans[char] = u\"y\"\n self.trans[u\"؟\"] = u\"?\"\n # Arabic - ligatures\n for char in u\"ﻻﻼ\":\n self.trans[char] = u\"la\"\n self.trans[u\"ﷲ\"] = u\"llah\"\n for char in u\"إأ\":\n self.trans[char] = u\"a'\"\n self.trans[u\"ؤ\"] = u\"w'\"\n self.trans[u\"ئ\"] = u\"y'\"\n for char in u\"◌◌\":\n self.trans[char] = u\"\" # indicates absence of vowels\n # Arabic vowels\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"i\"\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"iy\"\n # Arab numerals\n for char in u\"٠۰\":\n self.trans[char] = u\"0\"\n for char in u\"١۱\":\n self.trans[char] = u\"1\"\n for char in u\"٢۲\":\n self.trans[char] = u\"2\"\n for char in u\"٣۳\":\n self.trans[char] = u\"3\"\n for char in u\"٤۴\":\n self.trans[char] = u\"4\"\n for char in u\"٥۵\":\n self.trans[char] = u\"5\"\n for char in u\"٦۶\":\n self.trans[char] = u\"6\"\n for char in u\"٧۷\":\n self.trans[char] = u\"7\"\n for char in u\"٨۸\":\n self.trans[char] = u\"8\"\n for char in u\"٩۹\":\n self.trans[char] = u\"9\"\n # Perso-Arabic\n for char in u\"پﭙﭙپ\":\n self.trans[char] = u\"p\"\n for char in u\"چچچچ\":\n self.trans[char] = u\"ch\"\n for char in u\"ژژ\":\n self.trans[char] = u\"zh\"\n for char in u\"گﮔﮕﮓ\":\n self.trans[char] = u\"g\"\n\n # Greek\n self.trans.update({u\"Α\": u\"A\", u\"α\": u\"a\", u\"Β\": u\"B\", u\"β\": u\"b\", u\"Γ\": u\"G\",\n u\"γ\": u\"g\", u\"Δ\": u\"D\", u\"δ\": u\"d\", u\"Ε\": u\"E\", u\"ε\": u\"e\",\n u\"Ζ\": u\"Z\", u\"ζ\": u\"z\", u\"Η\": u\"I\", u\"η\": u\"i\", u\"θ\": u\"th\",\n u\"Θ\": u\"Th\", u\"Ι\": u\"I\", u\"ι\": u\"i\", u\"Κ\": u\"K\", u\"κ\": u\"k\",\n u\"Λ\": u\"L\", u\"λ\": u\"l\", u\"Μ\": u\"M\", u\"μ\": u\"m\", u\"Ν\": u\"N\",\n u\"ν\": u\"n\", u\"Ξ\": u\"X\", u\"ξ\": u\"x\", u\"Ο\": u\"O\", u\"ο\": u\"o\",\n u\"Π\": u\"P\", u\"π\": u\"p\", u\"Ρ\": u\"R\", u\"ρ\": u\"r\", u\"Σ\": u\"S\",\n u\"σ\": u\"s\", u\"ς\": u\"s\", u\"Τ\": u\"T\", u\"τ\": u\"t\", u\"Υ\": u\"Y\",\n u\"υ\": u\"y\", u\"Φ\": u\"F\", u\"φ\": u\"f\", u\"Ψ\": u\"Ps\", u\"ψ\": u\"ps\",\n u\"Ω\": u\"O\", u\"ω\": u\"o\", u\"ϗ\": u\"&\", u\"Ϛ\": u\"St\", u\"ϛ\": u\"st\",\n u\"Ϙ\": u\"Q\", u\"Ϟ\": u\"Q\", u\"ϙ\": u\"q\", u\"ϟ\": u\"q\", u\"Ϻ\": u\"S\",\n u\"ϻ\": u\"s\", u\"Ϡ\": u\"Ss\", u\"ϡ\": u\"ss\", u\"Ϸ\": u\"Sh\", u\"ϸ\": u\"sh\",\n u\"·\": u\":\", u\"Ά\": u\"Á\", u\"ά\": u\"á\", u\"Έ\": u\"É\", u\"Ή\": u\"É\",\n u\"έ\": u\"é\", u\"ή\": u\"é\", u\"Ί\": u\"Í\", u\"ί\": u\"í\", u\"Ϊ\": u\"Ï\",\n u\"ϊ\": u\"ï\", u\"ΐ\": u\"ï\", u\"Ό\": u\"Ó\", u\"ό\": u\"ó\", u\"Ύ\": u\"Ý\",\n u\"ύ\": u\"ý\", u\"Ϋ\": u\"Y\", u\"ϋ\": u\"ÿ\", u\"ΰ\": u\"ÿ\", u\"Ώ\": u\"Ó\",\n u\"ώ\": u\"ó\"})\n\n # Japanese (katakana and hiragana)\n for char in u\"アァあ\":\n self.trans[char] = u\"a\"\n for char in u\"イィい\":\n self.trans[char] = u\"i\"\n for char in u\"ウう\":\n self.trans[char] = u\"u\"\n for char in u\"エェえ\":\n self.trans[char] = u\"e\"\n for char in u\"オォお\":\n self.trans[char] = u\"o\"\n for char in u\"ャや\":\n self.trans[char] = u\"ya\"\n for char in u\"ュゆ\":\n self.trans[char] = u\"yu\"\n for char in u\"ョよ\":\n self.trans[char] = u\"yo\"\n for char in u\"カか\":\n self.trans[char] = u\"ka\"\n for char in u\"キき\":\n self.trans[char] = u\"ki\"\n for char in u\"クく\":\n self.trans[char] = u\"ku\"\n for char in u\"ケけ\":\n self.trans[char] = u\"ke\"\n for char in u\"コこ\":\n self.trans[char] = u\"ko\"\n for char in u\"サさ\":\n self.trans[char] = u\"sa\"\n for char in u\"シし\":\n self.trans[char] = u\"shi\"\n for char in u\"スす\":\n self.trans[char] = u\"su\"\n for char in u\"セせ\":\n self.trans[char] = u\"se\"\n for char in u\"ソそ\":\n self.trans[char] = u\"so\"\n for char in u\"タた\":\n self.trans[char] = u\"ta\"\n for char in u\"チち\":\n self.trans[char] = u\"chi\"\n for char in u\"ツつ\":\n self.trans[char] = u\"tsu\"\n for char in u\"テて\":\n self.trans[char] = u\"te\"\n for char in u\"トと\":\n self.trans[char] = u\"to\"\n for char in u\"ナな\":\n self.trans[char] = u\"na\"\n for char in u\"ニに\":\n self.trans[char] = u\"ni\"\n for char in u\"ヌぬ\":\n self.trans[char] = u\"nu\"\n for char in u\"ネね\":\n self.trans[char] = u\"ne\"\n for char in u\"ノの\":\n self.trans[char] = u\"no\"\n for char in u\"ハは\":\n self.trans[char] = u\"ha\"\n for char in u\"ヒひ\":\n self.trans[char] = u\"hi\"\n for char in u\"フふ\":\n self.trans[char] = u\"fu\"\n for char in u\"ヘへ\":\n self.trans[char] = u\"he\"\n for char in u\"ホほ\":\n self.trans[char] = u\"ho\"\n for char in u\"マま\":\n self.trans[char] = u\"ma\"\n for char in u\"ミみ\":\n self.trans[char] = u\"mi\"\n for char in u\"ムむ\":\n self.trans[char] = u\"mu\"\n for char in u\"メめ\":\n self.trans[char] = u\"me\"\n for char in u\"モも\":\n self.trans[char] = u\"mo\"\n for char in u\"ラら\":\n self.trans[char] = u\"ra\"\n for char in u\"リり\":\n self.trans[char] = u\"ri\"\n for char in u\"ルる\":\n self.trans[char] = u\"ru\"\n for char in u\"レれ\":\n self.trans[char] = u\"re\"\n for char in u\"ロろ\":\n self.trans[char] = u\"ro\"\n for char in u\"ワわ\":\n self.trans[char] = u\"wa\"\n for char in u\"ヰゐ\":\n self.trans[char] = u\"wi\"\n for char in u\"ヱゑ\":\n self.trans[char] = u\"we\"\n for char in u\"ヲを\":\n self.trans[char] = u\"wo\"\n for char in u\"ンん\":\n self.trans[char] = u\"n\"\n for char in u\"ガが\":\n self.trans[char] = u\"ga\"\n for char in u\"ギぎ\":\n self.trans[char] = u\"gi\"\n for char in u\"グぐ\":\n self.trans[char] = u\"gu\"\n for char in u\"ゲげ\":\n self.trans[char] = u\"ge\"\n for char in u\"ゴご\":\n self.trans[char] = u\"go\"\n for char in u\"ザざ\":\n self.trans[char] = u\"za\"\n for char in u\"ジじ\":\n self.trans[char] = u\"ji\"\n for char in u\"ズず\":\n self.trans[char] = u\"zu\"\n for char in u\"ゼぜ\":\n self.trans[char] = u\"ze\"\n for char in u\"ゾぞ\":\n self.trans[char] = u\"zo\"\n for char in u\"ダだ\":\n self.trans[char] = u\"da\"\n for char in u\"ヂぢ\":\n self.trans[char] = u\"dji\"\n for char in u\"ヅづ\":\n self.trans[char] = u\"dzu\"\n for char in u\"デで\":\n self.trans[char] = u\"de\"\n for char in u\"ドど\":\n self.trans[char] = u\"do\"\n for char in u\"バば\":\n self.trans[char] = u\"ba\"\n for char in u\"ビび\":\n self.trans[char] = u\"bi\"\n for char in u\"ブぶ\":\n self.trans[char] = u\"bu\"\n for char in u\"ベべ\":\n self.trans[char] = u\"be\"\n for char in u\"ボぼ\":\n self.trans[char] = u\"bo\"\n for char in u\"パぱ\":\n self.trans[char] = u\"pa\"\n for char in u\"ピぴ\":\n self.trans[char] = u\"pi\"\n for char in u\"プぷ\":\n self.trans[char] = u\"pu\"\n for char in u\"ペぺ\":\n self.trans[char] = u\"pe\"\n for char in u\"ポぽ\":\n self.trans[char] = u\"po\"\n for char in u\"ヴゔ\":\n self.trans[char] = u\"vu\"\n self.trans[u\"ヷ\"] = u\"va\"\n self.trans[u\"ヸ\"] = u\"vi\"\n self.trans[u\"ヹ\"] = u\"ve\"\n self.trans[u\"ヺ\"] = u\"vo\"\n\n # Japanese and Chinese punctuation and typography\n for char in u\"・·\":\n self.trans[char] = u\" \"\n for char in u\"〃『』《》\":\n self.trans[char] = u'\"'\n for char in u\"「」〈〉〘〙〚〛\":\n self.trans[char] = u\"'\"\n for char in u\"(〔\":\n self.trans[char] = u\"(\"\n for char in u\")〕\":\n self.trans[char] = u\")\"\n for char in u\"[【〖\":\n self.trans[char] = u\"[\"\n for char in u\"]】〗\":\n self.trans[char] = u\"]\"\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in u\"•◦\":\n self.trans[char] = u\"_\"\n for char in u\"※*\":\n self.trans[char] = u\"*\"\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in u\",、\":\n self.trans[char] = u\",\"\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in u\"ეჱ\":\n self.trans[char] = u\"e\"\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in u\"ყ\":\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in u\"წ\":\n self.trans[char] = u\"ts'\"\n for char in u\"ჭ\":\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in u\"पप\":\n self.trans[char] = u\"p\"\n self.trans['अ'] = 'a'\n for char in u\"आा\":\n self.trans[char] = u\"aa\"\n self.trans['प'] = 'pa'\n for char in u\"इि\":\n self.trans[char] = u\"i\"\n for char in u\"ईी\":\n self.trans[char] = u\"ii\"\n for char in u\"उु\":\n self.trans[char] = u\"u\"\n for char in u\"ऊू\":\n self.trans[char] = u\"uu\"\n for char in u\"एे\":\n self.trans[char] = u\"e\"\n for char in u\"ऐै\":\n self.trans[char] = u\"ai\"\n for char in u\"ओो\":\n self.trans[char] = u\"o\"\n for char in u\"औौ\":\n self.trans[char] = u\"au\"\n for char in u\"ऋृर\":\n self.trans[char] = u\"r\"\n for char in u\"ॠॄ\":\n self.trans[char] = u\"rr\"\n for char in u\"ऌॢल\":\n self.trans[char] = u\"l\"\n for char in u\"ॡॣ\":\n self.trans[char] = u\"ll\"\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in u\"टत\":\n self.trans[char] = u\"t\"\n for char in u\"ठथ\":\n self.trans[char] = u\"th\"\n for char in u\"डद\":\n self.trans[char] = u\"d\"\n for char in u\"ढध\":\n self.trans[char] = u\"dh\"\n for char in u\"णन\":\n self.trans[char] = u\"n\"\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in u\"षस\":\n self.trans[char] = u\"s\"\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in u\"क़\":\n self.trans[char] = u\"q\"\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in u\"डढ\":\n self.trans[char] = u\"r\"\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in u\"ख्\":\n self.trans[char] = u\"khn\"\n self.trans['त'] = 'tn'\n for char in u\"द्\":\n self.trans[char] = u\"dn\"\n self.trans['श'] = 'cn'\n for char in u\"ह्\":\n self.trans[char] = u\"fn\"\n for char in u\"अँ\":\n self.trans[char] = u\"m\"\n for char in u\"॒॑\":\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in u\"Տ\":\n self.trans[char] = u\"T'\"\n for char in u\"տ\":\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in u\"க்\":\n self.trans[char] = u\"k\"\n for char in u\"ஙண்ந்ன்\":\n self.trans[char] = u\"n\"\n self.trans['ச'] = 'c'\n for char in u\"ஞ்\":\n self.trans[char] = u\"ñ\"\n for char in u\"ட்\":\n self.trans[char] = u\"th\"\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in u\"ம்\":\n self.trans[char] = u\"m\"\n for char in u\"ய்\":\n self.trans[char] = u\"y\"\n for char in u\"ர்ழ்ற\":\n self.trans[char] = u\"r\"\n for char in u\"ல்ள\":\n self.trans[char] = u\"l\"\n for char in u\"வ்\":\n self.trans[char] = u\"v\"\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in u\"க்ஷ\":\n self.trans[char] = u\"x\"\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in u\"আা\":\n self.trans[char] = u\"a\"\n for char in u\"ইিঈী\":\n self.trans[char] = u\"i\"\n for char in u\"উুঊূ\":\n self.trans[char] = u\"u\"\n for char in u\"ঋৃ\":\n self.trans[char] = u\"ri\"\n for char in u\"এেয়\":\n self.trans[char] = u\"e\"\n for char in u\"ঐৈ\":\n self.trans[char] = u\"oi\"\n for char in u\"ওো\":\n self.trans[char] = u\"o\"\n for char in u\"ঔৌ\":\n self.trans[char] = \"ou\"\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in u\"টত\":\n self.trans[char] = u\"t\"\n for char in u\"ঠথ\":\n self.trans[char] = u\"th\"\n for char in u\"ডদ\":\n self.trans[char] = u\"d\"\n for char in u\"ঢধ\":\n self.trans[char] = u\"dh\"\n for char in u\"ণন\":\n self.trans[char] = u\"n\"\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in u\"য়\":\n self.trans[char] = u\"-\"\n for char in u\"ড়\":\n self.trans[char] = u\"r\"\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in u\"ขฃคฅฆ\":\n self.trans[char] = u\"kh\"\n self.trans['ง'] = 'ng'\n for char in u\"จฉชฌ\":\n self.trans[char] = u\"ch\"\n for char in u\"ซศษส\":\n self.trans[char] = u\"s\"\n for char in u\"ญย\":\n self.trans[char] = u\"y\"\n for char in u\"ฎด\":\n self.trans[char] = u\"d\"\n for char in u\"ฏต\":\n self.trans[char] = u\"t\"\n for char in u\"ฐฑฒถทธ\":\n self.trans[char] = u\"th\"\n for char in u\"ณน\":\n self.trans[char] = u\"n\"\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in u\"ผพภ\":\n self.trans[char] = u\"ph\"\n for char in u\"ฝฟ\":\n self.trans[char] = u\"f\"\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in u\"ลฬ\":\n self.trans[char] = u\"l\"\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in u\"หฮ\":\n self.trans[char] = u\"h\"\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in u\"อวโิ\":\n self.trans[char] = u\"o\"\n for char in u\"ะัา\":\n self.trans[char] = u\"a\"\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in u\"เ็\":\n self.trans[char] = u\"e\"\n self.trans['แ'] = 'ae'\n for char in u\"ใไ\":\n self.trans[char] = u\"ai\"\n for char in u\"่้๊๋็์\":\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans[u\"ಅ\"] = u\"a\"\n for char in u\"ಆಾ\":\n self.trans[char] = u\"aa\"\n for char in u\"ಇಿ\":\n self.trans[char] = u\"i\"\n for char in u\"ಈೀ\":\n self.trans[char] = u\"ii\"\n for char in u\"ಉು\":\n self.trans[char] = u\"u\"\n for char in u\"ಊೂ\":\n self.trans[char] = u\"uu\"\n for char in u\"ಋೂ\":\n self.trans[char] = u\"r'\"\n for char in u\"ಎೆ\":\n self.trans[char] = u\"e\"\n for char in u\"ಏೇ\":\n self.trans[char] = u\"ee\"\n for char in u\"ಐೈ\":\n self.trans[char] = u\"ai\"\n for char in u\"ಒೊ\":\n self.trans[char] = u\"o\"\n for char in u\"ಓೋ\":\n self.trans[char] = u\"oo\"\n for char in u\"ಔೌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ಂ\"] = u\"m'\"\n self.trans[u\"ಃ\"] = u\"h'\"\n self.trans[u\"ಕ\"] = u\"k\"\n self.trans[u\"ಖ\"] = u\"kh\"\n self.trans[u\"ಗ\"] = u\"g\"\n self.trans[u\"ಘ\"] = u\"gh\"\n self.trans[u\"ಙ\"] = u\"ng\"\n self.trans[u\"ಚ\"] = u\"c\"\n self.trans[u\"ಛ\"] = u\"ch\"\n self.trans[u\"ಜ\"] = u\"j\"\n self.trans[u\"ಝ\"] = u\"ny\"\n self.trans[u\"ಟ\"] = u\"tt\"\n self.trans[u\"ಠ\"] = u\"tth\"\n self.trans[u\"ಡ\"] = u\"dd\"\n self.trans[u\"ಢ\"] = u\"ddh\"\n self.trans[u\"ಣ\"] = u\"nn\"\n self.trans[u\"ತ\"] = u\"t\"\n self.trans[u\"ಥ\"] = u\"th\"\n self.trans[u\"ದ\"] = u\"d\"\n self.trans[u\"ಧ\"] = u\"dh\"\n self.trans[u\"ನ\"] = u\"n\"\n self.trans[u\"ಪ\"] = u\"p\"\n self.trans[u\"ಫ\"] = u\"ph\"\n self.trans[u\"ಬ\"] = u\"b\"\n self.trans[u\"ಭ\"] = u\"bh\"\n self.trans[u\"ಮ\"] = u\"m\"\n self.trans[u\"ಯ\"] = u\"y\"\n self.trans[u\"ರ\"] = u\"r\"\n self.trans[u\"ಲ\"] = u\"l\"\n self.trans[u\"ವ\"] = u\"v\"\n self.trans[u\"ಶ\"] = u\"sh\"\n self.trans[u\"ಷ\"] = u\"ss\"\n self.trans[u\"ಸ\"] = u\"s\"\n self.trans[u\"ಹ\"] = u\"h\"\n self.trans[u\"ಳ\"] = u\"ll\"\n self.trans[u\"೦\"] = u\"0\"\n self.trans[u\"೧\"] = u\"1\"\n self.trans[u\"೨\"] = u\"2\"\n self.trans[u\"೩\"] = u\"3\"\n self.trans[u\"೪\"] = u\"4\"\n self.trans[u\"೫\"] = u\"5\"\n self.trans[u\"೬\"] = u\"6\"\n self.trans[u\"೭\"] = u\"7\"\n self.trans[u\"೮\"] = u\"8\"\n self.trans[u\"೯\"] = u\"9\"\n # Telugu\n self.trans['అ'] = 'a'\n for char in u\"ఆా\":\n self.trans[char] = u\"aa\"\n for char in u\"ఇి\":\n self.trans[char] = u\"i\"\n for char in u\"ఈీ\":\n self.trans[char] = u\"ii\"\n for char in u\"ఉు\":\n self.trans[char] = u\"u\"\n for char in u\"ఊూ\":\n self.trans[char] = u\"uu\"\n for char in u\"ఋృ\":\n self.trans[char] = u\"r'\"\n for char in u\"ౠౄ\":\n self.trans[char] = u'r\"'\n self.trans[u\"ఌ\"] = u\"l'\"\n self.trans[u\"ౡ\"] = u'l\"'\n for char in u\"ఎె\":\n self.trans[char] = u\"e\"\n for char in u\"ఏే\":\n self.trans[char] = u\"ee\"\n for char in u\"ఐై\":\n self.trans[char] = u\"ai\"\n for char in u\"ఒొ\":\n self.trans[char] = u\"o\"\n for char in u\"ఓో\":\n self.trans[char] = u\"oo\"\n for char in u\"ఔౌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ం\"] = u\"'\"\n self.trans[u\"ః\"] = u'\"'\n self.trans[u\"క\"] = u\"k\"\n self.trans[u\"ఖ\"] = u\"kh\"\n self.trans[u\"గ\"] = u\"g\"\n self.trans[u\"ఘ\"] = u\"gh\"\n self.trans[u\"ఙ\"] = u\"ng\"\n self.trans[u\"చ\"] = u\"ts\"\n self.trans[u\"ఛ\"] = u\"tsh\"\n self.trans[u\"జ\"] = u\"j\"\n self.trans[u\"ఝ\"] = u\"jh\"\n self.trans[u\"ఞ\"] = u\"ñ\"\n for char in u\"టత\":\n self.trans[char] = u\"t\"\n for char in u\"ఠథ\":\n self.trans[char] = u\"th\"\n for char in u\"డద\":\n self.trans[char] = u\"d\"\n for char in u\"ఢధ\":\n self.trans[char] = u\"dh\"\n for char in u\"ణన\":\n self.trans[char] = u\"n\"\n self.trans[u\"ప\"] = u\"p\"\n self.trans[u\"ఫ\"] = u\"ph\"\n self.trans[u\"బ\"] = u\"b\"\n self.trans[u\"భ\"] = u\"bh\"\n self.trans[u\"మ\"] = u\"m\"\n self.trans[u\"య\"] = u\"y\"\n for char in u\"రఱ\":\n self.trans[char] = u\"r\"\n for char in u\"లళ\":\n self.trans[char] = u\"l\"\n self.trans[u\"వ\"] = u\"v\"\n self.trans[u\"శ\"] = u\"sh\"\n for char in u\"షస\":\n self.trans[char] = u\"s\"\n self.trans[u\"హ\"] = u\"h\"\n self.trans[u\"్\"] = \"\"\n for char in u\"ంఁ\":\n self.trans[char] = u\"^\"\n self.trans[u\"ః\"] = u\"-\"\n self.trans[u\"౦\"] = u\"0\"\n self.trans[u\"౧\"] = u\"1\"\n self.trans[u\"౨\"] = u\"2\"\n self.trans[u\"౩\"] = u\"3\"\n self.trans[u\"౪\"] = u\"4\"\n self.trans[u\"౫\"] = u\"5\"\n self.trans[u\"౬\"] = u\"6\"\n self.trans[u\"౭\"] = u\"7\"\n self.trans[u\"౮\"] = u\"8\"\n self.trans[u\"౯\"] = u\"9\"\n self.trans[u\"౹\"] = u\"1/4\"\n self.trans[u\"౺\"] = u\"1/2\"\n self.trans[u\"౻\"] = u\"3/4\"\n self.trans[u\"౼\"] = u\"1/16\"\n self.trans[u\"౽\"] = u\"1/8\"\n self.trans[u\"౾\"] = u\"3/16\"\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans[u\"ກ\"] = \"k\"\n for char in u\"ຂຄ\":\n self.trans[char] = \"kh\"\n self.trans[u\"ງ\"] = \"ng\"\n self.trans[u\"ຈ\"] = \"ch\"\n for char in u\"ສຊ\":\n self.trans[char] = \"s\"\n self.trans[u\"ຍ\"] = \"ny\"\n self.trans[u\"ດ\"] = \"d\"\n self.trans[u\"ຕ\"] = \"t\"\n for char in u\"ຖທ\":\n self.trans[char] = \"th\"\n self.trans[u\"ນ\"] = \"n\"\n self.trans[u\"ບ\"] = \"b\"\n self.trans[u\"ປ\"] = \"p\"\n for char in u\"ຜພ\":\n self.trans[char] = \"ph\"\n for char in u\"ຝຟ\":\n self.trans[char] = \"f\"\n for char in u\"ມໝ\":\n self.trans[char] = \"m\"\n self.trans[u\"ຢ\"] = \"y\"\n for char in u\"ຣຼ\":\n self.trans[char] = \"r\"\n for char in u\"ລຼ\":\n self.trans[char] = \"l\"\n self.trans[u\"ວ\"] = \"v\"\n self.trans['ຮ'] = 'h'\n self.trans[u\"ອ\"] = \"'\"\n for char in u\"ະັ\":\n self.trans[char] = \"a\"\n self.trans[u\"ິ\"] = \"i\"\n self.trans[u\"ຶ\"] = \"ue\"\n self.trans[u\"ຸ\"] = \"u\"\n self.trans[u\"ເ\"] = u\"é\"\n self.trans[u\"ແ\"] = u\"è\"\n for char in u\"ໂົາໍ\":\n self.trans[char] = \"o\"\n self.trans[u\"ຽ\"] = \"ia\"\n self.trans[u\"ເຶ\"] = \"uea\"\n self.trans[u\"ຍ\"] = \"i\"\n for char in u\"ໄໃ\":\n self.trans[char] = \"ai\"\n self.trans[u\"ຳ\"] = \"am\"\n self.trans[u\"າ\"] = \"aa\"\n self.trans[u\"ີ\"] = \"ii\"\n self.trans[u\"ື\"] = \"yy\"\n self.trans[u\"ູ\"] = \"uu\"\n self.trans[u\"ເ\"] = \"e\"\n self.trans[u\"ແ\"] = \"ei\"\n self.trans[u\"໐\"] = \"0\"\n self.trans[u\"໑\"] = \"1\"\n self.trans[u\"໒\"] = \"2\"\n self.trans[u\"໓\"] = \"3\"\n self.trans[u\"໔\"] = \"4\"\n self.trans[u\"໕\"] = \"5\"\n self.trans[u\"໖\"] = \"6\"\n self.trans[u\"໗\"] = \"7\"\n self.trans[u\"໘\"] = \"8\"\n self.trans[u\"໙\"] = \"9\"\n # Chinese -- note: incomplete\n for char in u\"埃挨哎唉哀皑癌蔼矮艾碍爱隘\":\n self.trans[char] = u\"ai\"\n for char in u\"鞍氨安俺按暗岸胺案\":\n self.trans[char] = u\"an\"\n for char in u\"肮昂盎\":\n self.trans[char] = u\"ang\"\n for char in u\"凹敖熬翱袄傲奥懊澳\":\n self.trans[char] = u\"ao\"\n for char in u\"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸\":\n self.trans[char] = u\"ba\"\n for char in u\"白柏百摆佰败拜稗\":\n self.trans[char] = u\"bai\"\n for char in u\"斑班搬扳般颁板版扮拌伴瓣半办绊\":\n self.trans[char] = u\"ban\"\n for char in u\"邦帮梆榜膀绑棒磅蚌镑傍谤\":\n self.trans[char] = u\"bang\"\n for char in u\"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆\":\n self.trans[char] = u\"bao\"\n for char in u\"杯碑悲卑北辈背贝钡倍狈备惫焙被\":\n self.trans[char] = u\"bei\"\n for char in u\"奔苯本笨\":\n self.trans[char] = u\"ben\"\n for char in u\"崩绷甭泵蹦迸\":\n self.trans[char] = u\"beng\"\n for char in u\"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛\":\n self.trans[char] = u\"bi\"\n for char in u\"鞭边编贬扁便变卞辨辩辫遍\":\n self.trans[char] = u\"bian\"\n for char in u\"标彪膘表\":\n self.trans[char] = u\"biao\"\n for char in u\"鳖憋别瘪\":\n self.trans[char] = u\"bie\"\n for char in u\"彬斌濒滨宾摈\":\n self.trans[char] = u\"bin\"\n for char in u\"兵冰柄丙秉饼炳病并\":\n self.trans[char] = u\"bing\"\n for char in u\"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳\":\n self.trans[char] = u\"bo\"\n for char in u\"哺补埠不布步簿部怖\":\n self.trans[char] = u\"bu\"\n for char in u\"猜裁材才财睬踩采彩菜蔡\":\n self.trans[char] = u\"cai\"\n for char in u\"餐参蚕残惭惨灿\":\n self.trans[char] = u\"can\"\n for char in u\"苍舱仓沧藏\":\n self.trans[char] = u\"cang\"\n for char in u\"操糙槽曹草\":\n self.trans[char] = u\"cao\"\n for char in u\"厕策侧册测\":\n self.trans[char] = u\"ce\"\n for char in u\"层蹭\":\n self.trans[char] = u\"ceng\"\n for char in u\"插叉茬茶查碴搽察岔差诧\":\n self.trans[char] = u\"cha\"\n for char in u\"拆柴豺\":\n self.trans[char] = u\"chai\"\n for char in u\"搀掺蝉馋谗缠铲产阐颤\":\n self.trans[char] = u\"chan\"\n for char in u\"昌猖场尝常长偿肠厂敞畅唱倡\":\n self.trans[char] = u\"chang\"\n for char in u\"超抄钞朝嘲潮巢吵炒\":\n self.trans[char] = u\"chao\"\n for char in u\"车扯撤掣彻澈\":\n self.trans[char] = u\"che\"\n for char in u\"郴臣辰尘晨忱沉陈趁衬\":\n self.trans[char] = u\"chen\"\n for char in u\"撑称城橙成呈乘程惩澄诚承逞骋秤\":\n self.trans[char] = u\"cheng\"\n for char in u\"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽\":\n self.trans[char] = u\"chi\"\n for char in u\"充冲虫崇宠\":\n self.trans[char] = u\"chong\"\n for char in u\"抽酬畴踌稠愁筹仇绸瞅丑臭\":\n self.trans[char] = u\"chou\"\n for char in u\"初出橱厨躇锄雏滁除楚储矗搐触处\":\n self.trans[char] = u\"chu\"\n self.trans['揣'] = 'chuai'\n for char in u\"川穿椽传船喘串\":\n self.trans[char] = u\"chuan\"\n for char in u\"疮窗幢床闯创\":\n self.trans[char] = u\"chuang\"\n for char in u\"吹炊捶锤垂\":\n self.trans[char] = u\"chui\"\n for char in u\"春椿醇唇淳纯蠢\":\n self.trans[char] = u\"chun\"\n for char in u\"戳绰\":\n self.trans[char] = u\"chuo\"\n for char in u\"疵茨磁雌辞慈瓷词此刺赐次\":\n self.trans[char] = u\"ci\"\n for char in u\"聪葱囱匆从丛\":\n self.trans[char] = u\"cong\"\n self.trans['凑'] = 'cou'\n for char in u\"粗醋簇促\":\n self.trans[char] = u\"cu\"\n for char in u\"蹿篡窜\":\n self.trans[char] = u\"cuan\"\n for char in u\"摧崔催脆瘁粹淬翠\":\n self.trans[char] = u\"cui\"\n for char in u\"村存寸\":\n self.trans[char] = u\"cun\"\n for char in u\"磋撮搓措挫错\":\n self.trans[char] = u\"cuo\"\n for char in u\"搭达答瘩打大\":\n self.trans[char] = u\"da\"\n for char in u\"呆歹傣戴带殆代贷袋待逮怠\":\n self.trans[char] = u\"dai\"\n for char in u\"耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋\":\n self.trans[char] = u\"dan\"\n for char in u\"当挡党荡档\":\n self.trans[char] = u\"dang\"\n for char in u\"刀捣蹈倒岛祷导到稻悼道盗\":\n self.trans[char] = u\"dao\"\n for char in u\"德得的\":\n self.trans[char] = u\"de\"\n for char in u\"蹬灯登等瞪凳邓\":\n self.trans[char] = u\"deng\"\n for char in u\"堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔\":\n self.trans[char] = u\"di\"\n for char in u\"颠掂滇碘点典靛垫电佃甸店惦奠淀殿\":\n self.trans[char] = u\"dian\"\n for char in u\"碉叼雕凋刁掉吊钓调\":\n self.trans[char] = u\"diao\"\n for char in u\"跌爹碟蝶迭谍叠\":\n self.trans[char] = u\"die\"\n for char in u\"丁盯叮钉顶鼎锭定订\":\n self.trans[char] = u\"ding\"\n self.trans['丢'] = 'diu'\n for char in u\"东冬董懂动栋侗恫冻洞\":\n self.trans[char] = u\"dong\"\n for char in u\"兜抖斗陡豆逗痘\":\n self.trans[char] = u\"dou\"\n for char in u\"都督毒犊独读堵睹赌杜镀肚度渡妒\":\n self.trans[char] = u\"du\"\n for char in u\"端短锻段断缎\":\n self.trans[char] = u\"duan\"\n for char in u\"堆兑队对\":\n self.trans[char] = u\"dui\"\n for char in u\"墩吨蹲敦顿囤钝盾遁\":\n self.trans[char] = u\"dun\"\n for char in u\"掇哆多夺垛躲朵跺舵剁惰堕\":\n self.trans[char] = u\"duo\"\n for char in u\"蛾峨鹅俄额讹娥恶厄扼遏鄂饿\":\n self.trans[char] = u\"e\"\n for char in u\"恩嗯\":\n self.trans[char] = u\"en\"\n for char in u\"而儿耳尔饵洱二贰\":\n self.trans[char] = u\"er\"\n for char in u\"发罚筏伐乏阀法珐\":\n self.trans[char] = u\"fa\"\n for char in u\"藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛\":\n self.trans[char] = u\"fan\"\n for char in u\"坊芳方肪房防妨仿访纺放\":\n self.trans[char] = u\"fang\"\n for char in u\"菲非啡飞肥匪诽吠肺废沸费\":\n self.trans[char] = u\"fei\"\n for char in u\"芬酚吩氛分纷坟焚汾粉奋份忿愤粪\":\n self.trans[char] = u\"fen\"\n for char in u\"丰封枫蜂峰锋风疯烽逢冯缝讽奉凤\":\n self.trans[char] = u\"feng\"\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in u\"夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐\":\n self.trans[char] = u\"fu\"\n for char in u\"噶嘎\":\n self.trans[char] = u\"ga\"\n for char in u\"该改概钙盖溉\":\n self.trans[char] = u\"gai\"\n for char in u\"干甘杆柑竿肝赶感秆敢赣\":\n self.trans[char] = u\"gan\"\n for char in u\"冈刚钢缸肛纲岗港杠\":\n self.trans[char] = u\"gang\"\n for char in u\"篙皋高膏羔糕搞镐稿告\":\n self.trans[char] = u\"gao\"\n for char in u\"哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各\":\n self.trans[char] = u\"ge\"\n self.trans['给'] = 'gei'\n for char in u\"根跟\":\n self.trans[char] = u\"gen\"\n for char in u\"耕更庚羹埂耿梗\":\n self.trans[char] = u\"geng\"\n for char in u\"工攻功恭龚供躬公宫弓巩汞拱贡共\":\n self.trans[char] = u\"gong\"\n for char in u\"钩勾沟苟狗垢构购够\":\n self.trans[char] = u\"gou\"\n for char in u\"辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇\":\n self.trans[char] = u\"gu\"\n for char in u\"刮瓜剐寡挂褂\":\n self.trans[char] = u\"gua\"\n for char in u\"乖拐怪\":\n self.trans[char] = u\"guai\"\n for char in u\"棺关官冠观管馆罐惯灌贯\":\n self.trans[char] = u\"guan\"\n for char in u\"光广逛\":\n self.trans[char] = u\"guang\"\n for char in u\"瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽\":\n self.trans[char] = u\"gui\"\n for char in u\"辊滚棍\":\n self.trans[char] = u\"gun\"\n for char in u\"锅郭国果裹过\":\n self.trans[char] = u\"guo\"\n self.trans['哈'] = 'ha'\n for char in u\"骸孩海氦亥害骇\":\n self.trans[char] = u\"hai\"\n for char in u\"酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉\":\n self.trans[char] = u\"han\"\n for char in u\"夯杭航\":\n self.trans[char] = u\"hang\"\n for char in u\"壕嚎豪毫郝好耗号浩\":\n self.trans[char] = u\"hao\"\n for char in u\"呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺\":\n self.trans[char] = u\"he\"\n for char in u\"嘿黑\":\n self.trans[char] = u\"hei\"\n for char in u\"痕很狠恨\":\n self.trans[char] = u\"hen\"\n for char in u\"哼亨横衡恒\":\n self.trans[char] = u\"heng\"\n for char in u\"轰哄烘虹鸿洪宏弘红\":\n self.trans[char] = u\"hong\"\n for char in u\"喉侯猴吼厚候后\":\n self.trans[char] = u\"hou\"\n for char in u\"呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户\":\n self.trans[char] = u\"hu\"\n for char in u\"花哗华猾滑画划化话\":\n self.trans[char] = u\"hua\"\n for char in u\"槐徊怀淮坏\":\n self.trans[char] = u\"huai\"\n for char in u\"欢环桓还缓换患唤痪豢焕涣宦幻\":\n self.trans[char] = u\"huan\"\n for char in u\"荒慌黄磺蝗簧皇凰惶煌晃幌恍谎\":\n self.trans[char] = u\"huang\"\n for char in u\"灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘\":\n self.trans[char] = u\"hui\"\n for char in u\"荤昏婚魂浑混\":\n self.trans[char] = u\"hun\"\n for char in u\"豁活伙火获或惑霍货祸\":\n self.trans[char] = u\"huo\"\n for char in u\"击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪\":\n self.trans[char] = u\"ji\"\n for char in u\"嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁\":\n self.trans[char] = u\"jia\"\n for char in u\"歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建\":\n self.trans[char] = u\"jian\"\n for char in u\"僵姜将浆江疆蒋桨奖讲匠酱降\":\n self.trans[char] = u\"jiang\"\n for char in u\"蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖\":\n self.trans[char] = u\"jiao\"\n for char in u\"揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届\":\n self.trans[char] = u\"jie\"\n for char in u\"巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲\":\n self.trans[char] = u\"jin\"\n for char in u\"荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净\":\n self.trans[char] = u\"jing\"\n for char in u\"囧炯窘\":\n self.trans[char] = u\"jiong\"\n for char in u\"揪究纠玖韭久灸九酒厩救旧臼舅咎就疚\":\n self.trans[char] = u\"jiu\"\n for char in u\"鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧\":\n self.trans[char] = u\"ju\"\n for char in u\"捐鹃娟倦眷卷绢\":\n self.trans[char] = u\"juan\"\n for char in u\"撅攫抉掘倔爵觉决诀绝\":\n self.trans[char] = u\"jue\"\n for char in u\"均菌钧军君峻俊竣浚郡骏\":\n self.trans[char] = u\"jun\"\n for char in u\"喀咖卡咯\":\n self.trans[char] = u\"ka\"\n for char in u\"开揩楷凯慨\":\n self.trans[char] = u\"kai\"\n for char in u\"刊堪勘坎砍看\":\n self.trans[char] = u\"kan\"\n for char in u\"康慷糠扛抗亢炕\":\n self.trans[char] = u\"kang\"\n for char in u\"考拷烤靠\":\n self.trans[char] = u\"kao\"\n for char in u\"坷苛柯棵磕颗科壳咳可渴克刻客课\":\n self.trans[char] = u\"ke\"\n for char in u\"肯啃垦恳\":\n self.trans[char] = u\"ken\"\n for char in u\"坑吭\":\n self.trans[char] = u\"keng\"\n for char in u\"空恐孔控\":\n self.trans[char] = u\"kong\"\n for char in u\"抠口扣寇\":\n self.trans[char] = u\"kou\"\n for char in u\"枯哭窟苦酷库裤\":\n self.trans[char] = u\"ku\"\n for char in u\"夸垮挎跨胯\":\n self.trans[char] = u\"kua\"\n for char in u\"块筷侩快\":\n self.trans[char] = u\"kuai\"\n for char in u\"宽款\":\n self.trans[char] = u\"kuan\"\n for char in u\"匡筐狂框矿眶旷况\":\n self.trans[char] = u\"kuang\"\n for char in u\"亏盔岿窥葵奎魁傀馈愧溃\":\n self.trans[char] = u\"kui\"\n for char in u\"坤昆捆困\":\n self.trans[char] = u\"kun\"\n for char in u\"括扩廓阔\":\n self.trans[char] = u\"kuo\"\n for char in u\"垃拉喇蜡腊辣啦\":\n self.trans[char] = u\"la\"\n for char in u\"莱来赖\":\n self.trans[char] = u\"lai\"\n for char in u\"蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥\":\n self.trans[char] = u\"lan\"\n for char in u\"琅榔狼廊郎朗浪\":\n self.trans[char] = u\"lang\"\n for char in u\"捞劳牢老佬姥酪烙涝\":\n self.trans[char] = u\"lao\"\n for char in u\"勒乐\":\n self.trans[char] = u\"le\"\n for char in u\"雷镭蕾磊累儡垒擂肋类泪\":\n self.trans[char] = u\"lei\"\n for char in u\"棱楞冷\":\n self.trans[char] = u\"leng\"\n for char in u\"厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力璃哩\":\n self.trans[char] = u\"li\"\n self.trans['俩'] = 'lia'\n for char in u\"联莲连镰廉怜涟帘敛脸链恋炼练\":\n self.trans[char] = u\"lian\"\n for char in u\"粮凉梁粱良两辆量晾亮谅\":\n self.trans[char] = u\"liang\"\n for char in u\"撩聊僚疗燎寥辽潦了撂镣廖料\":\n self.trans[char] = u\"liao\"\n for char in u\"列裂烈劣猎\":\n self.trans[char] = u\"lie\"\n for char in u\"琳林磷霖临邻鳞淋凛赁吝拎\":\n self.trans[char] = u\"lin\"\n for char in u\"玲菱零龄铃伶羚凌灵陵岭领另令\":\n self.trans[char] = u\"ling\"\n for char in u\"溜琉榴硫馏留刘瘤流柳六\":\n self.trans[char] = u\"liu\"\n for char in u\"龙聋咙笼窿隆垄拢陇\":\n self.trans[char] = u\"long\"\n for char in u\"楼娄搂篓漏陋\":\n self.trans[char] = u\"lou\"\n for char in u\"芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸\":\n self.trans[char] = u\"lu\"\n for char in u\"峦挛孪滦卵乱\":\n self.trans[char] = u\"luan\"\n for char in u\"掠略\":\n self.trans[char] = u\"lue\"\n for char in u\"抡轮伦仑沦纶论\":\n self.trans[char] = u\"lun\"\n for char in u\"萝螺罗逻锣箩骡裸落洛骆络漯\":\n self.trans[char] = u\"luo\"\n for char in u\"驴吕铝侣旅履屡缕虑氯律率滤绿\":\n self.trans[char] = u\"lv\"\n for char in u\"妈麻玛码蚂马骂嘛吗\":\n self.trans[char] = u\"ma\"\n for char in u\"埋买麦卖迈脉\":\n self.trans[char] = u\"mai\"\n for char in u\"瞒馒蛮满蔓曼慢漫谩\":\n self.trans[char] = u\"man\"\n for char in u\"芒茫盲氓忙莽\":\n self.trans[char] = u\"mang\"\n for char in u\"猫茅锚毛矛铆卯茂冒帽貌贸\":\n self.trans[char] = u\"mao\"\n self.trans['么'] = 'me'\n for char in u\"玫枚梅酶霉煤没眉媒镁每美昧寐妹媚\":\n self.trans[char] = u\"mei\"\n for char in u\"门闷们\":\n self.trans[char] = u\"men\"\n for char in u\"萌蒙檬盟锰猛梦孟\":\n self.trans[char] = u\"meng\"\n for char in u\"眯醚靡糜迷谜弥米秘觅泌蜜密幂\":\n self.trans[char] = u\"mi\"\n for char in u\"棉眠绵冕免勉娩缅面\":\n self.trans[char] = u\"mian\"\n for char in u\"苗描瞄藐秒渺庙妙\":\n self.trans[char] = u\"miao\"\n for char in u\"蔑灭\":\n self.trans[char] = u\"mie\"\n for char in u\"民抿皿敏悯闽\":\n self.trans[char] = u\"min\"\n for char in u\"明螟鸣铭名命\":\n self.trans[char] = u\"ming\"\n self.trans['谬'] = 'miu'\n for char in u\"摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌\":\n self.trans[char] = u\"mo\"\n for char in u\"谋牟某\":\n self.trans[char] = u\"mou\"\n for char in u\"拇牡亩姆母墓暮幕募慕木目睦牧穆\":\n self.trans[char] = u\"mu\"\n for char in u\"拿哪呐钠那娜纳\":\n self.trans[char] = u\"na\"\n for char in u\"氖乃奶耐奈\":\n self.trans[char] = u\"nai\"\n for char in u\"南男难\":\n self.trans[char] = u\"nan\"\n self.trans['囊'] = 'nang'\n for char in u\"挠脑恼闹淖\":\n self.trans[char] = u\"nao\"\n self.trans['呢'] = 'ne'\n for char in u\"馁内\":\n self.trans[char] = u\"nei\"\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in u\"妮霓倪泥尼拟你匿腻逆溺\":\n self.trans[char] = u\"ni\"\n for char in u\"蔫拈年碾撵捻念\":\n self.trans[char] = u\"nian\"\n for char in u\"娘酿\":\n self.trans[char] = u\"niang\"\n for char in u\"鸟尿\":\n self.trans[char] = u\"niao\"\n for char in u\"捏聂孽啮镊镍涅\":\n self.trans[char] = u\"nie\"\n self.trans['您'] = 'nin'\n for char in u\"柠狞凝宁拧泞\":\n self.trans[char] = u\"ning\"\n for char in u\"牛扭钮纽\":\n self.trans[char] = u\"niu\"\n for char in u\"脓浓农弄\":\n self.trans[char] = u\"nong\"\n for char in u\"奴努怒\":\n self.trans[char] = u\"nu\"\n self.trans['暖'] = 'nuan'\n for char in u\"虐疟\":\n self.trans[char] = u\"nue\"\n for char in u\"挪懦糯诺\":\n self.trans[char] = u\"nuo\"\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in u\"欧鸥殴藕呕偶沤\":\n self.trans[char] = u\"ou\"\n for char in u\"啪趴爬帕怕琶\":\n self.trans[char] = u\"pa\"\n for char in u\"拍排牌徘湃派\":\n self.trans[char] = u\"pai\"\n for char in u\"攀潘盘磐盼畔判叛\":\n self.trans[char] = u\"pan\"\n for char in u\"乓庞旁耪胖\":\n self.trans[char] = u\"pang\"\n for char in u\"抛咆刨炮袍跑泡\":\n self.trans[char] = u\"pao\"\n for char in u\"呸胚培裴赔陪配佩沛\":\n self.trans[char] = u\"pei\"\n for char in u\"喷盆\":\n self.trans[char] = u\"pen\"\n for char in u\"砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰\":\n self.trans[char] = u\"peng\"\n for char in u\"坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬\":\n self.trans[char] = u\"pi\"\n for char in u\"篇偏片骗\":\n self.trans[char] = u\"pian\"\n for char in u\"飘漂瓢票\":\n self.trans[char] = u\"piao\"\n for char in u\"撇瞥\":\n self.trans[char] = u\"pie\"\n for char in u\"拼频贫品聘\":\n self.trans[char] = u\"pin\"\n for char in u\"乒坪苹萍平凭瓶评屏\":\n self.trans[char] = u\"ping\"\n for char in u\"坡泼颇婆破魄迫粕剖\":\n self.trans[char] = u\"po\"\n for char in u\"扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮\":\n self.trans[char] = u\"pu\"\n for char in u\"期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫\":\n self.trans[char] = u\"qi\"\n for char in u\"掐恰洽\":\n self.trans[char] = u\"qia\"\n for char in u\"牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉\":\n self.trans[char] = u\"qian\"\n for char in u\"枪呛腔羌墙蔷强抢\":\n self.trans[char] = u\"qiang\"\n for char in u\"橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍\":\n self.trans[char] = u\"qiao\"\n for char in u\"切茄且怯窃\":\n self.trans[char] = u\"qie\"\n for char in u\"钦侵亲秦琴勤芹擒禽寝沁\":\n self.trans[char] = u\"qin\"\n for char in u\"青轻氢倾卿清擎晴氰情顷请庆\":\n self.trans[char] = u\"qing\"\n for char in u\"琼穷\":\n self.trans[char] = u\"qiong\"\n for char in u\"秋丘邱球求囚酋泅\":\n self.trans[char] = u\"qiu\"\n for char in u\"趋区蛆曲躯屈驱渠取娶龋趣去\":\n self.trans[char] = u\"qu\"\n for char in u\"圈颧权醛泉全痊拳犬券劝\":\n self.trans[char] = u\"quan\"\n for char in u\"缺炔瘸却鹊榷确雀\":\n self.trans[char] = u\"que\"\n for char in u\"裙群\":\n self.trans[char] = u\"qun\"\n for char in u\"然燃冉染\":\n self.trans[char] = u\"ran\"\n for char in u\"瓤壤攘嚷让\":\n self.trans[char] = u\"rang\"\n for char in u\"饶扰绕\":\n self.trans[char] = u\"rao\"\n for char in u\"惹热\":\n self.trans[char] = u\"re\"\n for char in u\"壬仁人忍韧任认刃妊纫\":\n self.trans[char] = u\"ren\"\n for char in u\"扔仍\":\n self.trans[char] = u\"reng\"\n self.trans['日'] = 'ri'\n for char in u\"戎茸蓉荣融熔溶容绒冗\":\n self.trans[char] = u\"rong\"\n for char in u\"揉柔肉\":\n self.trans[char] = u\"rou\"\n for char in u\"茹蠕儒孺如辱乳汝入褥\":\n self.trans[char] = u\"ru\"\n for char in u\"软阮\":\n self.trans[char] = u\"ruan\"\n for char in u\"蕊瑞锐\":\n self.trans[char] = u\"rui\"\n for char in u\"闰润\":\n self.trans[char] = u\"run\"\n for char in u\"若弱\":\n self.trans[char] = u\"ruo\"\n for char in u\"撒洒萨\":\n self.trans[char] = u\"sa\"\n for char in u\"腮鳃塞赛\":\n self.trans[char] = u\"sai\"\n for char in u\"三叁伞散\":\n self.trans[char] = u\"san\"\n for char in u\"桑嗓丧\":\n self.trans[char] = u\"sang\"\n for char in u\"搔骚扫嫂\":\n self.trans[char] = u\"sao\"\n for char in u\"瑟色涩\":\n self.trans[char] = u\"se\"\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in u\"莎砂杀刹沙纱傻啥煞\":\n self.trans[char] = u\"sha\"\n for char in u\"筛晒\":\n self.trans[char] = u\"shai\"\n for char in u\"珊苫杉山删煽衫闪陕擅赡膳善汕扇缮\":\n self.trans[char] = u\"shan\"\n for char in u\"墒伤商赏晌上尚裳\":\n self.trans[char] = u\"shang\"\n for char in u\"梢捎稍烧芍勺韶少哨邵绍\":\n self.trans[char] = u\"shao\"\n for char in u\"奢赊蛇舌舍赦摄射慑涉社设\":\n self.trans[char] = u\"she\"\n for char in u\"砷申呻伸身深娠绅神沈审婶甚肾慎渗\":\n self.trans[char] = u\"shen\"\n for char in u\"声生甥牲升绳省盛剩胜圣\":\n self.trans[char] = u\"sheng\"\n for char in u\"师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试\":\n self.trans[char] = u\"shi\"\n for char in u\"收手首守寿授售受瘦兽\":\n self.trans[char] = u\"shou\"\n for char in u\"蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕\":\n self.trans[char] = u\"shu\"\n for char in u\"刷耍\":\n self.trans[char] = u\"shua\"\n for char in u\"摔衰甩帅\":\n self.trans[char] = u\"shuai\"\n for char in u\"栓拴\":\n self.trans[char] = u\"shuan\"\n for char in u\"霜双爽\":\n self.trans[char] = u\"shuang\"\n for char in u\"谁水睡税\":\n self.trans[char] = u\"shui\"\n for char in u\"吮瞬顺舜\":\n self.trans[char] = u\"shun\"\n for char in u\"说硕朔烁\":\n self.trans[char] = u\"shuo\"\n for char in u\"斯撕嘶思私司丝死肆寺嗣四伺似饲巳\":\n self.trans[char] = u\"si\"\n for char in u\"松耸怂颂送宋讼诵\":\n self.trans[char] = u\"song\"\n for char in u\"搜艘擞\":\n self.trans[char] = u\"sou\"\n for char in u\"嗽苏酥俗素速粟僳塑溯宿诉肃\":\n self.trans[char] = u\"su\"\n for char in u\"酸蒜算\":\n self.trans[char] = u\"suan\"\n for char in u\"虽隋随绥髓碎岁穗遂隧祟\":\n self.trans[char] = u\"sui\"\n for char in u\"孙损笋\":\n self.trans[char] = u\"sun\"\n for char in u\"蓑梭唆缩琐索锁所\":\n self.trans[char] = u\"suo\"\n for char in u\"塌他它她塔獭挞蹋踏\":\n self.trans[char] = u\"ta\"\n for char in u\"胎苔抬台泰酞太态汰\":\n self.trans[char] = u\"tai\"\n for char in u\"坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭\":\n self.trans[char] = u\"tan\"\n for char in u\"汤塘搪堂棠膛唐糖倘躺淌趟烫\":\n self.trans[char] = u\"tang\"\n for char in u\"掏涛滔绦萄桃逃淘陶讨套\":\n self.trans[char] = u\"tao\"\n self.trans['特'] = 'te'\n for char in u\"藤腾疼誊\":\n self.trans[char] = u\"teng\"\n for char in u\"梯剔踢锑提题蹄啼体替嚏惕涕剃屉\":\n self.trans[char] = u\"ti\"\n for char in u\"兲天添填田甜恬舔腆\":\n self.trans[char] = u\"tian\"\n for char in u\"挑条迢眺跳\":\n self.trans[char] = u\"tiao\"\n for char in u\"贴铁帖\":\n self.trans[char] = u\"tie\"\n for char in u\"厅听烃汀廷停亭庭挺艇\":\n self.trans[char] = u\"ting\"\n for char in u\"通桐酮瞳同铜彤童桶捅筒统痛\":\n self.trans[char] = u\"tong\"\n for char in u\"偷投头透\":\n self.trans[char] = u\"tou\"\n for char in u\"凸秃突图徒途涂屠土吐兔\":\n self.trans[char] = u\"tu\"\n for char in u\"湍团\":\n self.trans[char] = u\"tuan\"\n for char in u\"推颓腿蜕褪退\":\n self.trans[char] = u\"tui\"\n for char in u\"吞屯臀\":\n self.trans[char] = u\"tun\"\n for char in u\"拖托脱鸵陀驮驼椭妥拓唾\":\n self.trans[char] = u\"tuo\"\n for char in u\"挖哇蛙洼娃瓦袜\":\n self.trans[char] = u\"wa\"\n for char in u\"歪外\":\n self.trans[char] = u\"wai\"\n for char in u\"豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞\":\n self.trans[char] = u\"wan\"\n for char in u\"汪王亡枉网往旺望忘妄\":\n self.trans[char] = u\"wang\"\n for char in u\"威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫\":\n self.trans[char] = u\"wei\"\n for char in u\"瘟温蚊文闻纹吻稳紊问\":\n self.trans[char] = u\"wen\"\n for char in u\"嗡翁瓮\":\n self.trans[char] = u\"weng\"\n for char in u\"挝蜗涡窝我斡卧握沃\":\n self.trans[char] = u\"wo\"\n for char in u\"巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误\":\n self.trans[char] = u\"wu\"\n for char in u\"昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细\":\n self.trans[char] = u\"xi\"\n for char in u\"瞎虾匣霞辖暇峡侠狭下厦夏吓\":\n self.trans[char] = u\"xia\"\n for char in u\"掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线\":\n self.trans[char] = u\"xian\"\n for char in u\"相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象\":\n self.trans[char] = u\"xiang\"\n for char in u\"萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效\":\n self.trans[char] = u\"xiao\"\n for char in u\"楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑\":\n self.trans[char] = u\"xie\"\n for char in u\"薪芯锌欣辛新忻心信衅\":\n self.trans[char] = u\"xin\"\n for char in u\"星腥猩惺兴刑型形邢行醒幸杏性姓\":\n self.trans[char] = u\"xing\"\n for char in u\"兄凶胸匈汹雄熊\":\n self.trans[char] = u\"xiong\"\n for char in u\"休修羞朽嗅锈秀袖绣\":\n self.trans[char] = u\"xiu\"\n for char in u\"墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续\":\n self.trans[char] = u\"xu\"\n for char in u\"轩喧宣悬旋玄选癣眩绚\":\n self.trans[char] = u\"xuan\"\n for char in u\"靴薛学穴雪血\":\n self.trans[char] = u\"xue\"\n for char in u\"勋熏循旬询寻驯巡殉汛训讯逊迅\":\n self.trans[char] = u\"xun\"\n for char in u\"压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶\":\n self.trans[char] = u\"ya\"\n for char in u\"焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验\":\n self.trans[char] = u\"yan\"\n for char in u\"殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾\":\n self.trans[char] = u\"yang\"\n for char in u\"邀腰妖瑶摇尧遥窑谣姚咬舀药要耀\":\n self.trans[char] = u\"yao\"\n for char in u\"椰噎耶爷野冶也页掖业叶曳腋夜液\":\n self.trans[char] = u\"ye\"\n for char in u\"一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎\":\n self.trans[char] = u\"yi\"\n for char in u\"茵荫因殷音阴姻吟银淫寅饮尹引隐印\":\n self.trans[char] = u\"yin\"\n for char in u\"英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映\":\n self.trans[char] = u\"ying\"\n self.trans['哟'] = 'yo'\n for char in u\"拥佣臃痈庸雍踊蛹咏泳涌永恿勇用\":\n self.trans[char] = u\"yong\"\n for char in u\"幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂\":\n self.trans[char] = u\"you\"\n for char in u\"淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭\":\n self.trans[char] = u\"yu\"\n for char in u\"鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院\":\n self.trans[char] = u\"yuan\"\n for char in u\"曰约越跃钥岳粤月悦阅\":\n self.trans[char] = u\"yue\"\n for char in u\"耘云郧匀陨允运蕴酝晕韵孕\":\n self.trans[char] = u\"yun\"\n for char in u\"匝砸杂\":\n self.trans[char] = u\"za\"\n for char in u\"栽哉灾宰载再在\":\n self.trans[char] = u\"zai\"\n for char in u\"咱攒暂赞\":\n self.trans[char] = u\"zan\"\n for char in u\"赃脏葬\":\n self.trans[char] = u\"zang\"\n for char in u\"遭糟凿藻枣早澡蚤躁噪造皂灶燥\":\n self.trans[char] = u\"zao\"\n for char in u\"责择则泽\":\n self.trans[char] = u\"ze\"\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in u\"增憎曾赠\":\n self.trans[char] = u\"zeng\"\n for char in u\"扎喳渣札轧铡闸眨栅榨咋乍炸诈\":\n self.trans[char] = u\"zha\"\n for char in u\"摘斋宅窄债寨\":\n self.trans[char] = u\"zhai\"\n for char in u\"瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽\":\n self.trans[char] = u\"zhan\"\n for char in u\"樟章彰漳张掌涨杖丈帐账仗胀瘴障\":\n self.trans[char] = u\"zhang\"\n for char in u\"招昭找沼赵照罩兆肇召\":\n self.trans[char] = u\"zhao\"\n for char in u\"遮折哲蛰辙者锗蔗这浙\":\n self.trans[char] = u\"zhe\"\n for char in u\"珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳\":\n self.trans[char] = u\"zhen\"\n for char in u\"蒸挣睁征狰争怔整拯正政帧症郑证\":\n self.trans[char] = u\"zheng\"\n for char in u\"芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒\":\n self.trans[char] = u\"zhi\"\n for char in u\"中盅忠钟衷终种肿重仲众\":\n self.trans[char] = u\"zhong\"\n for char in u\"舟周州洲诌粥轴肘帚咒皱宙昼骤\":\n self.trans[char] = u\"zhou\"\n for char in u\"珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻\":\n self.trans[char] = u\"zhu\"\n for char in u\"抓爪\":\n self.trans[char] = u\"zhua\"\n self.trans['拽'] = 'zhuai'\n for char in u\"专砖转撰赚篆\":\n self.trans[char] = u\"zhuan\"\n for char in u\"桩庄装妆撞壮状\":\n self.trans[char] = u\"zhuang\"\n for char in u\"椎锥追赘坠缀\":\n self.trans[char] = u\"zhui\"\n for char in u\"谆准\":\n self.trans[char] = u\"zhun\"\n for char in u\"捉拙卓桌琢茁酌啄着灼浊\":\n self.trans[char] = u\"zhuo\"\n for char in u\"兹咨资姿滋淄孜紫仔籽滓子自渍字\":\n self.trans[char] = u\"zi\"\n for char in u\"鬃棕踪宗综总纵\":\n self.trans[char] = u\"zong\"\n for char in u\"邹走奏揍\":\n self.trans[char] = u\"zou\"\n for char in u\"租足卒族祖诅阻组\":\n self.trans[char] = u\"zu\"\n for char in u\"钻纂\":\n self.trans[char] = u\"zuan\"\n for char in u\"嘴醉最罪\":\n self.trans[char] = u\"zui\"\n for char in u\"尊遵\":\n self.trans[char] = u\"zun\"\n for char in u\"昨左佐柞做作坐座\":\n self.trans[char] = u\"zuo\"\n # from: https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans[u\"ଂ\"] = \"anusvara\"\n self.trans[u\"ઇ\"] = \"i\"\n self.trans[u\"എ\"] = \"e\"\n self.trans[u\"ગ\"] = \"ga\"\n self.trans[u\"ਜ\"] = \"ja\"\n self.trans[u\"ഞ\"] = \"nya\"\n self.trans[u\"ଢ\"] = \"ddha\"\n self.trans[u\"ધ\"] = \"dha\"\n self.trans[u\"ਬ\"] = \"ba\"\n self.trans[u\"മ\"] = \"ma\"\n self.trans[u\"ଲ\"] = \"la\"\n self.trans[u\"ષ\"] = \"ssa\"\n self.trans[u\"਼\"] = \"nukta\"\n self.trans[u\"ാ\"] = \"aa\"\n self.trans[u\"ୂ\"] = \"uu\"\n self.trans[u\"ે\"] = \"e\"\n self.trans[u\"ੌ\"] = \"au\"\n self.trans[u\"ൎ\"] = \"reph\"\n self.trans[u\"ੜ\"] = \"rra\"\n self.trans[u\"՞\"] = \"?\"\n self.trans[u\"ୢ\"] = \"l\"\n self.trans[u\"૧\"] = \"1\"\n self.trans[u\"੬\"] = \"6\"\n self.trans[u\"൮\"] = \"8\"\n self.trans[u\"୲\"] = \"quarter\"\n self.trans[u\"ൾ\"] = \"ll\"\n self.trans[u\"ਇ\"] = \"i\"\n self.trans[u\"ഉ\"] = \"u\"\n self.trans[u\"ઌ\"] = \"l\"\n self.trans[u\"ਗ\"] = \"ga\"\n self.trans[u\"ങ\"] = \"nga\"\n self.trans[u\"ଝ\"] = \"jha\"\n self.trans[u\"જ\"] = \"ja\"\n self.trans[u\"؟\"] = \"?\"\n self.trans[u\"ਧ\"] = \"dha\"\n self.trans[u\"ഩ\"] = \"nnna\"\n self.trans[u\"ଭ\"] = \"bha\"\n self.trans[u\"બ\"] = \"ba\"\n self.trans[u\"ഹ\"] = \"ha\"\n self.trans[u\"ଽ\"] = \"avagraha\"\n self.trans[u\"઼\"] = \"nukta\"\n self.trans[u\"ੇ\"] = \"ee\"\n self.trans[u\"୍\"] = \"virama\"\n self.trans[u\"ૌ\"] = \"au\"\n self.trans[u\"੧\"] = \"1\"\n self.trans[u\"൩\"] = \"3\"\n self.trans[u\"୭\"] = \"7\"\n self.trans[u\"૬\"] = \"6\"\n self.trans[u\"൹\"] = \"mark\"\n self.trans[u\"ਖ਼\"] = \"khha\"\n self.trans[u\"ਂ\"] = \"bindi\"\n self.trans[u\"ഈ\"] = \"ii\"\n self.trans[u\"ઍ\"] = \"e\"\n self.trans[u\"ଌ\"] = \"l\"\n self.trans[u\"ഘ\"] = \"gha\"\n self.trans[u\"ઝ\"] = \"jha\"\n self.trans[u\"ଡ଼\"] = \"rra\"\n self.trans[u\"ਢ\"] = \"ddha\"\n self.trans[u\"ന\"] = \"na\"\n self.trans[u\"ભ\"] = \"bha\"\n self.trans[u\"ବ\"] = \"ba\"\n self.trans[u\"ਲ\"] = \"la\"\n self.trans[u\"സ\"] = \"sa\"\n self.trans[u\"ઽ\"] = \"avagraha\"\n self.trans[u\"଼\"] = \"nukta\"\n self.trans[u\"ੂ\"] = \"uu\"\n self.trans[u\"ൈ\"] = \"ai\"\n self.trans[u\"્\"] = \"virama\"\n self.trans[u\"ୌ\"] = \"au\"\n self.trans[u\"൨\"] = \"2\"\n self.trans[u\"૭\"] = \"7\"\n self.trans[u\"୬\"] = \"6\"\n self.trans[u\"ੲ\"] = \"iri\"\n self.trans[u\"ഃ\"] = \"visarga\"\n self.trans[u\"ં\"] = \"anusvara\"\n self.trans[u\"ଇ\"] = \"i\"\n self.trans[u\"ഓ\"] = \"oo\"\n self.trans[u\"ଗ\"] = \"ga\"\n self.trans[u\"ਝ\"] = \"jha\"\n self.trans[u\"?\"] = \"?\"\n self.trans[u\"ണ\"] = \"nna\"\n self.trans[u\"ઢ\"] = \"ddha\"\n self.trans[u\"ଧ\"] = \"dha\"\n self.trans[u\"ਭ\"] = \"bha\"\n self.trans[u\"ള\"] = \"lla\"\n self.trans[u\"લ\"] = \"la\"\n self.trans[u\"ଷ\"] = \"ssa\"\n self.trans[u\"ൃ\"] = \"r\"\n self.trans[u\"ૂ\"] = \"uu\"\n self.trans[u\"େ\"] = \"e\"\n self.trans[u\"੍\"] = \"virama\"\n self.trans[u\"ୗ\"] = \"mark\"\n self.trans[u\"ൣ\"] = \"ll\"\n self.trans[u\"ૢ\"] = \"l\"\n self.trans[u\"୧\"] = \"1\"\n self.trans[u\"੭\"] = \"7\"\n self.trans[u\"൳\"] = \"1/4\"\n self.trans[u\"୷\"] = \"sixteenths\"\n self.trans[u\"ଆ\"] = \"aa\"\n self.trans[u\"ઋ\"] = \"r\"\n self.trans[u\"ഊ\"] = \"uu\"\n self.trans[u\"ਐ\"] = \"ai\"\n self.trans[u\"ଖ\"] = \"kha\"\n self.trans[u\"છ\"] = \"cha\"\n self.trans[u\"ച\"] = \"ca\"\n self.trans[u\"ਠ\"] = \"ttha\"\n self.trans[u\"ଦ\"] = \"da\"\n self.trans[u\"ફ\"] = \"pha\"\n self.trans[u\"പ\"] = \"pa\"\n self.trans[u\"ਰ\"] = \"ra\"\n self.trans[u\"ଶ\"] = \"sha\"\n self.trans[u\"ഺ\"] = \"ttta\"\n self.trans[u\"ੀ\"] = \"ii\"\n self.trans[u\"ો\"] = \"o\"\n self.trans[u\"ൊ\"] = \"o\"\n self.trans[u\"ୖ\"] = \"mark\"\n self.trans[u\"୦\"] = \"0\"\n self.trans[u\"૫\"] = \"5\"\n self.trans[u\"൪\"] = \"4\"\n self.trans[u\"ੰ\"] = \"tippi\"\n self.trans[u\"୶\"] = \"eighth\"\n self.trans[u\"ൺ\"] = \"nn\"\n self.trans[u\"ଁ\"] = \"candrabindu\"\n self.trans[u\"അ\"] = \"a\"\n self.trans[u\"ઐ\"] = \"ai\"\n self.trans[u\"ക\"] = \"ka\"\n self.trans[u\"ਸ਼\"] = \"sha\"\n self.trans[u\"ਛ\"] = \"cha\"\n self.trans[u\"ଡ\"] = \"dda\"\n self.trans[u\"ઠ\"] = \"ttha\"\n self.trans[u\"ഥ\"] = \"tha\"\n self.trans[u\"ਫ\"] = \"pha\"\n self.trans[u\"ર\"] = \"ra\"\n self.trans[u\"വ\"] = \"va\"\n self.trans[u\"ୁ\"] = \"u\"\n self.trans[u\"ી\"] = \"ii\"\n self.trans[u\"ੋ\"] = \"oo\"\n self.trans[u\"ૐ\"] = \"om\"\n self.trans[u\"ୡ\"] = \"ll\"\n self.trans[u\"ૠ\"] = \"rr\"\n self.trans[u\"੫\"] = \"5\"\n self.trans[u\"ୱ\"] = \"wa\"\n self.trans[u\"૰\"] = \"sign\"\n self.trans[u\"൵\"] = \"quarters\"\n self.trans[u\"ਫ਼\"] = \"fa\"\n self.trans[u\"ઁ\"] = \"candrabindu\"\n self.trans[u\"ਆ\"] = \"aa\"\n self.trans[u\"ઑ\"] = \"o\"\n self.trans[u\"ଐ\"] = \"ai\"\n self.trans[u\"ഔ\"] = \"au\"\n self.trans[u\"ਖ\"] = \"kha\"\n self.trans[u\"ડ\"] = \"dda\"\n self.trans[u\"ଠ\"] = \"ttha\"\n self.trans[u\"ത\"] = \"ta\"\n self.trans[u\"ਦ\"] = \"da\"\n self.trans[u\"ର\"] = \"ra\"\n self.trans[u\"ഴ\"] = \"llla\"\n self.trans[u\"ુ\"] = \"u\"\n self.trans[u\"ୀ\"] = \"ii\"\n self.trans[u\"ൄ\"] = \"rr\"\n self.trans[u\"ૡ\"] = \"ll\"\n self.trans[u\"ୠ\"] = \"rr\"\n self.trans[u\"੦\"] = \"0\"\n self.trans[u\"૱\"] = \"sign\"\n self.trans[u\"୰\"] = \"isshar\"\n self.trans[u\"൴\"] = \"1/2\"\n self.trans[u\"ਁ\"] = \"bindi\"\n self.trans[u\"આ\"] = \"aa\"\n self.trans[u\"ଋ\"] = \"r\"\n self.trans[u\"ഏ\"] = \"ee\"\n self.trans[u\"ખ\"] = \"kha\"\n self.trans[u\"ଛ\"] = \"cha\"\n self.trans[u\"ട\"] = \"tta\"\n self.trans[u\"ਡ\"] = \"dda\"\n self.trans[u\"દ\"] = \"da\"\n self.trans[u\"ଫ\"] = \"pha\"\n self.trans[u\"യ\"] = \"ya\"\n self.trans[u\"શ\"] = \"sha\"\n self.trans[u\"ി\"] = \"i\"\n self.trans[u\"ੁ\"] = \"u\"\n self.trans[u\"ୋ\"] = \"o\"\n self.trans[u\"ੑ\"] = \"udaat\"\n self.trans[u\"૦\"] = \"0\"\n self.trans[u\"୫\"] = \"5\"\n self.trans[u\"൯\"] = \"9\"\n self.trans[u\"ੱ\"] = \"addak\"\n self.trans[u\"ൿ\"] = \"k\"\n self.trans[u\"ആ\"] = \"aa\"\n self.trans[u\"ଊ\"] = \"uu\"\n self.trans[u\"એ\"] = \"e\"\n self.trans[u\"ਔ\"] = \"au\"\n self.trans[u\"ഖ\"] = \"kha\"\n self.trans[u\"ଚ\"] = \"ca\"\n self.trans[u\"ટ\"] = \"tta\"\n self.trans[u\"ਤ\"] = \"ta\"\n self.trans[u\"ദ\"] = \"da\"\n self.trans[u\"ପ\"] = \"pa\"\n self.trans[u\"ય\"] = \"ya\"\n self.trans[u\"ശ\"] = \"sha\"\n self.trans[u\"િ\"] = \"i\"\n self.trans[u\"െ\"] = \"e\"\n self.trans[u\"൦\"] = \"0\"\n self.trans[u\"୪\"] = \"4\"\n self.trans[u\"૯\"] = \"9\"\n self.trans[u\"ੴ\"] = \"onkar\"\n self.trans[u\"ଅ\"] = \"a\"\n self.trans[u\"ਏ\"] = \"ee\"\n self.trans[u\"କ\"] = \"ka\"\n self.trans[u\"ઔ\"] = \"au\"\n self.trans[u\"ਟ\"] = \"tta\"\n self.trans[u\"ഡ\"] = \"dda\"\n self.trans[u\"ଥ\"] = \"tha\"\n self.trans[u\"ત\"] = \"ta\"\n self.trans[u\"ਯ\"] = \"ya\"\n self.trans[u\"റ\"] = \"rra\"\n self.trans[u\"ଵ\"] = \"va\"\n self.trans[u\"ਿ\"] = \"i\"\n self.trans[u\"ു\"] = \"u\"\n self.trans[u\"ૄ\"] = \"rr\"\n self.trans[u\"ൡ\"] = \"ll\"\n self.trans[u\"੯\"] = \"9\"\n self.trans[u\"൱\"] = \"100\"\n self.trans[u\"୵\"] = \"sixteenth\"\n self.trans[u\"અ\"] = \"a\"\n self.trans[u\"ਊ\"] = \"uu\"\n self.trans[u\"ഐ\"] = \"ai\"\n self.trans[u\"ક\"] = \"ka\"\n self.trans[u\"ଔ\"] = \"au\"\n self.trans[u\"ਚ\"] = \"ca\"\n self.trans[u\"ഠ\"] = \"ttha\"\n self.trans[u\"થ\"] = \"tha\"\n self.trans[u\"ତ\"] = \"ta\"\n self.trans[u\"ਪ\"] = \"pa\"\n self.trans[u\"ര\"] = \"ra\"\n self.trans[u\"વ\"] = \"va\"\n self.trans[u\"ീ\"] = \"ii\"\n self.trans[u\"ૅ\"] = \"e\"\n self.trans[u\"ୄ\"] = \"rr\"\n self.trans[u\"ൠ\"] = \"rr\"\n self.trans[u\"ਜ਼\"] = \"za\"\n self.trans[u\"੪\"] = \"4\"\n self.trans[u\"൰\"] = \"10\"\n self.trans[u\"୴\"] = \"quarters\"\n self.trans[u\"ਅ\"] = \"a\"\n self.trans[u\"ഋ\"] = \"r\"\n self.trans[u\"ઊ\"] = \"uu\"\n self.trans[u\"ଏ\"] = \"e\"\n self.trans[u\"ਕ\"] = \"ka\"\n self.trans[u\"ഛ\"] = \"cha\"\n self.trans[u\"ચ\"] = \"ca\"\n self.trans[u\"ଟ\"] = \"tta\"\n self.trans[u\"ਥ\"] = \"tha\"\n self.trans[u\"ഫ\"] = \"pha\"\n self.trans[u\"પ\"] = \"pa\"\n self.trans[u\"ଯ\"] = \"ya\"\n self.trans[u\"ਵ\"] = \"va\"\n self.trans[u\"ି\"] = \"i\"\n self.trans[u\"ോ\"] = \"oo\"\n self.trans[u\"ୟ\"] = \"yya\"\n self.trans[u\"൫\"] = \"5\"\n self.trans[u\"૪\"] = \"4\"\n self.trans[u\"୯\"] = \"9\"\n self.trans[u\"ੵ\"] = \"yakash\"\n self.trans[u\"ൻ\"] = \"n\"\n self.trans[u\"ઃ\"] = \"visarga\"\n self.trans[u\"ം\"] = \"anusvara\"\n self.trans[u\"ਈ\"] = \"ii\"\n self.trans[u\"ઓ\"] = \"o\"\n self.trans[u\"ഒ\"] = \"o\"\n self.trans[u\"ਘ\"] = \"gha\"\n self.trans[u\"ଞ\"] = \"nya\"\n self.trans[u\"ણ\"] = \"nna\"\n self.trans[u\"ഢ\"] = \"ddha\"\n self.trans[u\"ਲ਼\"] = \"lla\"\n self.trans[u\"ਨ\"] = \"na\"\n self.trans[u\"ମ\"] = \"ma\"\n self.trans[u\"ળ\"] = \"lla\"\n self.trans[u\"ല\"] = \"la\"\n self.trans[u\"ਸ\"] = \"sa\"\n self.trans[u\"¿\"] = \"?\"\n self.trans[u\"ା\"] = \"aa\"\n self.trans[u\"ૃ\"] = \"r\"\n self.trans[u\"ൂ\"] = \"uu\"\n self.trans[u\"ੈ\"] = \"ai\"\n self.trans[u\"ૣ\"] = \"ll\"\n self.trans[u\"ൢ\"] = \"l\"\n self.trans[u\"੨\"] = \"2\"\n self.trans[u\"୮\"] = \"8\"\n self.trans[u\"൲\"] = \"1000\"\n self.trans[u\"ਃ\"] = \"visarga\"\n self.trans[u\"ଉ\"] = \"u\"\n self.trans[u\"ઈ\"] = \"ii\"\n self.trans[u\"ਓ\"] = \"oo\"\n self.trans[u\"ଙ\"] = \"nga\"\n self.trans[u\"ઘ\"] = \"gha\"\n self.trans[u\"ഝ\"] = \"jha\"\n self.trans[u\"ਣ\"] = \"nna\"\n self.trans[u\"ન\"] = \"na\"\n self.trans[u\"ഭ\"] = \"bha\"\n self.trans[u\"ଜ\"] = \"ja\"\n self.trans[u\"ହ\"] = \"ha\"\n self.trans[u\"સ\"] = \"sa\"\n self.trans[u\"ഽ\"] = \"avagraha\"\n self.trans[u\"ૈ\"] = \"ai\"\n self.trans[u\"്\"] = \"virama\"\n self.trans[u\"୩\"] = \"3\"\n self.trans[u\"૨\"] = \"2\"\n self.trans[u\"൭\"] = \"7\"\n self.trans[u\"ੳ\"] = \"ura\"\n self.trans[u\"ൽ\"] = \"l\"\n self.trans[u\"ઉ\"] = \"u\"\n self.trans[u\"ଈ\"] = \"ii\"\n self.trans[u\"ഌ\"] = \"l\"\n self.trans[u\"ઙ\"] = \"nga\"\n self.trans[u\"ଘ\"] = \"gha\"\n self.trans[u\"ജ\"] = \"ja\"\n self.trans[u\"ਞ\"] = \"nya\"\n self.trans[u\"ନ\"] = \"na\"\n self.trans[u\"ബ\"] = \"ba\"\n self.trans[u\"ਮ\"] = \"ma\"\n self.trans[u\"હ\"] = \"ha\"\n self.trans[u\"ସ\"] = \"sa\"\n self.trans[u\"ਾ\"] = \"aa\"\n self.trans[u\"ૉ\"] = \"o\"\n self.trans[u\"ୈ\"] = \"ai\"\n self.trans[u\"ൌ\"] = \"au\"\n self.trans[u\"૩\"] = \"3\"\n self.trans[u\"୨\"] = \"2\"\n self.trans[u\"൬\"] = \"6\"\n self.trans[u\"੮\"] = \"8\"\n self.trans[u\"ർ\"] = \"rr\"\n self.trans[u\"ଃ\"] = \"visarga\"\n self.trans[u\"ഇ\"] = \"i\"\n self.trans[u\"ਉ\"] = \"u\"\n self.trans[u\"ଓ\"] = \"o\"\n self.trans[u\"ഗ\"] = \"ga\"\n self.trans[u\"ਙ\"] = \"nga\"\n self.trans[u\"ઞ\"] = \"nya\"\n self.trans[u\"ଣ\"] = \"nna\"\n self.trans[u\"ധ\"] = \"dha\"\n self.trans[u\"મ\"] = \"ma\"\n self.trans[u\"ଳ\"] = \"lla\"\n self.trans[u\"ഷ\"] = \"ssa\"\n self.trans[u\"ਹ\"] = \"ha\"\n self.trans[u\"ਗ਼\"] = \"ghha\"\n self.trans[u\"ા\"] = \"aa\"\n self.trans[u\"ୃ\"] = \"r\"\n self.trans[u\"േ\"] = \"ee\"\n self.trans[u\"ൗ\"] = \"mark\"\n self.trans[u\"ଢ଼\"] = \"rha\"\n self.trans[u\"ୣ\"] = \"ll\"\n self.trans[u\"൧\"] = \"1\"\n self.trans[u\"੩\"] = \"3\"\n self.trans[u\"૮\"] = \"8\"\n self.trans[u\"୳\"] = \"half\"\n for char in self.trans:\n value = self.trans[char]\n if value == \"?\":\n continue\n while value.encode(encoding, 'replace').decode(encoding) == \"?\" and value in self.trans:\n assert value != self.trans[value], \"%r == self.trans[%r]!\" % (value, value)\n value = self.trans[value]\n self.trans[char] = value", "def __unicode__(self):\n matrix_ = self._repr_matrix(self.matrix[:20, :5])\n lines = matrix_.split('\\n')\n headers = [repr(self)[1:-1]]\n if self._item_ids.size:\n col_headers = [('%-8s' % unicode(item)[:8]) for item in self._item_ids[:5]]\n headers.append(' ' + (' '.join(col_headers)))\n\n if self._user_ids.size:\n for (i, line) in enumerate(lines):\n lines[i] = ('%-8s' % unicode(self._user_ids[i])[:8]) + line\n for (i, line) in enumerate(headers):\n if i > 0:\n headers[i] = ' ' * 8 + line\n lines = headers + lines\n if self.matrix.shape[1] > 5 and self.matrix.shape[0] > 0:\n lines[1] += ' ...'\n if self.matrix.shape[0] > 20:\n lines.append('...')\n\n return '\\n'.join(line.rstrip() for line in lines)", "def __repr__(self):\n return self.textual_representation().encode(\"utf-8\")", "def to_transfac(self):\n m = \"%s\\t%s\\t%s\\n\" % (\"DE\", self.id, \"unknown\")\n for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())):\n m += \"%i\\t%s\\t%s\\n\" % (i, \"\\t\".join([str(int(x)) for x in row]), cons)\n m += \"XX\"\n return m", "def test_i18n03(self):\n output = self.engine.render_to_string('i18n03', {'anton': b'\\xc3\\x85'})\n self.assertEqual(output, 'Å')", "def test_i18n03(self):\n output = self.engine.render_to_string('i18n03', {'anton': b'\\xc3\\x85'})\n self.assertEqual(output, 'Å')", "def __unicode__(self):\n return unicode(self).encode('utf-8')", "def unicode2ascii(_unicrap):\n xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',\n 0xc6:'Ae', 0xc7:'C',\n 0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',\n 0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',\n 0xd0:'Th', 0xd1:'N',\n 0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',\n 0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',\n 0xdd:'Y', 0xde:'th', 0xdf:'ss',\n 0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',\n 0xe6:'ae', 0xe7:'c',\n 0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',\n 0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',\n 0xf0:'th', 0xf1:'n',\n 0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',\n 0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',\n 0xfd:'y', 0xfe:'th', 0xff:'y',\n 0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',\n 0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',\n 0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',\n 0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',\n 0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:\"'\",\n 0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',\n 0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',\n 0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',\n 0xd7:'*', 0xf7:'/'\n }\n\n s = \"\"\n for i in _unicrap:\n ordi = ord(i)\n if ordi in xlate:\n s += xlate[ordi]\n elif ordi >= 0x80:\n pass\n else:\n s += str(i)\n return s", "def history():", "def decode(self, s):", "def decode(self, s):", "def parse(self,value):\r\n\t\treturn unicode(value)", "def __unicode__(self):\r\n return \"'\" + self.text + \"' logged on \" + log_date.strftime('%a, %d, %B, %Y at %X')", "def __str__(self):\n return unicode(self).encode('utf-8')", "def __unicode__(self):\n return unicode(self.asPyDict())", "def test_annotate_text_utf32_directly_index_into_unicode():\n test_string = \"a \\u00e3 \\u0201 \\U0001f636 b\"\n result = analyze.analyze_syntax(test_string, encoding=\"UTF32\")\n tokens = result[\"tokens\"]\n\n assert tokens[0][\"text\"][\"content\"] == \"a\"\n offset = tokens[0][\"text\"].get(\"beginOffset\", 0)\n assert test_string[offset] == tokens[0][\"text\"][\"content\"]\n\n assert tokens[1][\"text\"][\"content\"] == \"\\u00e3\"\n offset = tokens[1][\"text\"].get(\"beginOffset\", 0)\n assert test_string[offset] == tokens[1][\"text\"][\"content\"]\n\n assert tokens[2][\"text\"][\"content\"] == \"\\u0201\"\n offset = tokens[2][\"text\"].get(\"beginOffset\", 0)\n assert test_string[offset] == tokens[2][\"text\"][\"content\"]\n\n # Temporarily disabled\n # assert tokens[3]['text']['content'] == u'\\U0001f636'\n # offset = tokens[3]['text'].get('beginOffset', 0)\n # assert test_string[offset] == tokens[3]['text']['content']\n\n # assert tokens[4]['text']['content'] == u'b'\n # offset = tokens[4]['text'].get('beginOffset', 0)\n # assert test_string[offset] == tokens[4]['text']['content']", "def latin1_to_ascii(self, unicrap):\n xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n 0xc6: 'Ae', 0xc7: 'C',\n 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',\n 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n 0xd0: 'Th', 0xd1: 'N',\n 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n 0xe6: 'ae', 0xe7: 'c',\n 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',\n 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n 0xf0: 'th', 0xf1: 'n',\n 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n 0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: \"'\",\n 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n 0xd7: '*', 0xf7: '/'\n }\n\n r = ''\n for i in unicrap:\n if xlate.has_key(ord(i)):\n r += xlate[ord(i)]\n elif ord(i) >= 0x80:\n pass\n else:\n r += str(i)\n return r", "def get_session_history_as_string(self) -> str:\n return \"-\".join(self.get_session_history_as_list())", "def test_asciitable_m_unicode(self):\n input = '''\n╒════════╤════════╤════════╤════════╤════════╤════════╤════════╕\n│ type │ tota │ used │ fr ee │ shar │ buff │ avai │\n│ │ l │ │ │ ed │ _cac │ labl │\n│ │ │ │ │ │ he │ e │\n╞════════╪════════╪════════╪════════╪════════╪════════╪════════╡\n│ Mem │ 3861 │ 2228 │ 3364 │ 1183 │ 2743 │ 3389 │\n│ │ 332 │ 20 │ 176 │ 2 │ 36 │ 588 │\n├────────┼────────┼────────┼────────┼────────┼────────┼────────┤\n│ Swap │ 2097 │ 0 │ 2097 │ │ │ │\n│ │ 148 │ │ 148 │ │ │ │\n│ │ │ │ kb │ │ │ │\n├────────┼────────┼────────┼────────┼────────┼────────┼────────┤\n│ last │ last │ last │ ab cde │ │ │ final │\n╘════════╧════════╧════════╧════════╧════════╧════════╧════════╛\n '''\n expected = [\n {\n \"type\": \"Mem\",\n \"tota_l\": \"3861\\n332\",\n \"used\": \"2228\\n20\",\n \"fr_ee\": \"3364\\n176\",\n \"shar_ed\": \"1183\\n2\",\n \"buff_cac_he\": \"2743\\n36\",\n \"avai_labl_e\": \"3389\\n588\"\n },\n {\n \"type\": \"Swap\",\n \"tota_l\": \"2097\\n148\",\n \"used\": \"0\",\n \"fr_ee\": \"2097\\n148\\nkb\",\n \"shar_ed\": None,\n \"buff_cac_he\": None,\n \"avai_labl_e\": None\n },\n {\n \"type\": \"last\",\n \"tota_l\": \"last\",\n \"used\": \"last\",\n \"fr_ee\": \"ab cde\",\n \"shar_ed\": None,\n \"buff_cac_he\": None,\n \"avai_labl_e\": \"final\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def e(d):\n return d.encode('UTF-8')", "def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new", "def __unicode__(self):\r\n return unicode(repr(self))", "def __bytes__(self):\n from pandas.core.config import get_option\n\n encoding = get_option(\"display.encoding\")\n return self.__unicode__().encode(encoding, 'replace')", "def get_history(hdr):\n return hdr['HISTORY']", "def toString():", "def display_unicode(self, string):\n if string is None:\n return ''\n return string.decode(\"utf16\", \"ignore\").encode(\"ascii\", 'backslashreplace')", "def __str__(self):\r\n return unicode(self.header)", "def trans_ascii (data):\n quality = []\n asl = data[2]\n for char in asl:\n change= ord (char)-64\n quality.append(change)\n \n return quality", "def _tostr(t):\n\treturn t.__unicode__()", "def get_trans_dict(self):\n translated = dict([(k,v) for (k,v) in self._trans_dict.items() if k is not v])\n frm = \" \".join([ c + ' |' for c in translated.keys()])\n to = \" \".join([ c + ' |' for c in translated.values()])\n\n return \"code: \\t{}\\nactual:\\t{}\".format(frm, to)", "def MS_match2string(ms_match):\n return \"{} {} {}\".format(ms_match[0].__repr__(), ms_match[1], ms_match[2])", "def codec(cls) -> str:\n return 'UTF8'", "def to_unicode(session):\n return six.text_type(session.data)", "def read_message_history(_) -> int:\n return 1 << 16", "def read_message_history(_) -> int:\n return 1 << 16", "def snapshot(self):\n text = \"\"\n text += \"{}:\\n{}\\n\".format('chi', np.array2string(self.chi))\n return text", "def report_index(self, index):\n i = 0\n for k, data in self.matches[index].items():\n if i != 0:\n print\n print fmt(\"['%c': charset - chars]\" % k, MAGENTA)\n print fmt(sorted([x for x in data[\"charset\"]]), WHITE)\n print fmt(data[\"chars\"], WHITE)\n i = 1", "def __unicode__(self):\n return unicode(\"ServerInfo (pk:%d, msg:%s, timestamp:%s)\" %\n (self.pk, self.team_msg, str(self.timestamp)))", "def __bytes__(self):\n return unicode(self).encode('utf-8')", "def enlabel(mi_, ma_):\n\treturn \"Unicode characters from {} to {} codepoints\".format(mi_, ma_)", "def __unicode__(self):\n return unicode(self.GetCalendarString())", "def __unicode__(self):\n return unicode(self.GetCalendarString())", "def test_assembleReverseVideo(self):\n self.assertEqual(\n irc.assembleFormattedText(A.reverseVideo[\"hello\"]), \"\\x0f\\x16hello\"\n )", "def test_add_outcome_unicode_id(self):\n self.protocol.addSuccess(self.unicode_test)\n expected = compat._b(\"successful: \") + compat._u(\n '\\u2603').encode('utf8') + compat._b(\"\\n\")\n self.assertEqual(expected, self.io.getvalue())", "def __str__(self):\n\n return str(self.match.groups()) + ' ' + self.line", "def decodeUtf8(self, arrayBuffer):", "def decodeUtf8(self, arrayBuffer):", "def __unicode__(self):\n\t\treturn u'-'.join(self._string_values())", "def load_history_strings(self) -> Iterable[str]:\n while False:\n yield", "def logP(self, history, word):", "def test_milestone_add_utf8_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute(u'milestone add \\xa9tat_final \"%s\"' #\\xc2\\xa9\n % self._test_date)\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def history():\n return apology(\"TODO\")", "def __repr__(self):\n return repr(self._translation)", "def toString(self): #$NON-NLS-1$\r", "def history(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_history(self)", "def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):\n ucp = (ucs.encode('unicode_escape')[2:]\n .decode('ascii')\n .upper()\n .lstrip('0'))\n url = \"http://codepoints.net/U+{}\".format(ucp)\n name = unicodedata.name(ucs)\n return (u\"libc,ours={},{} [--o{}o--] name={} val={} {}\"\n \" \".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url))", "def get_match_str(self, match):\n return \"\"", "def escapeDecode(s: unicode) -> unicode:\n ...", "def get(self):\n res = ''\n for hist in self.history:\n res += (str(hist) + '\\n')\n return res", "def astr(obj):\n\treturn unicode(obj).encode(\"ascii\", \"replace\")", "def get_history(self):\r\n\r\n return self.board_history", "def _translate_string(self, data):\n data = data.encode('iso-8859-1', errors='replace')\n\n for index, char in enumerate(data):\n yield self._meta.characters - 1 - self._ct[char]", "def DFAtoRE(self):\n\n pass", "def pack_ascii(self):\n\n out = ''\n for w in sorted(self.all_words()):\n assert isinstance(self.value[w], LOTHypothesis), \"*** Can only pack Lexicons with FunctionNode values\"\n out += \"%s:%s;\" % (w, self.value[w].grammar.pack_ascii(self.value[w].value) )\n return out", "def format_tb(tb):\n from traceback import format_tb\n return [e.decode('utf-8', 'surrogateescape') for e in format_tb(tb)]", "def __str__(self):\n return functools.reduce(\n lambda acc, v: acc + str(v[0]) + \" : \" + str(v[1][1]) + \" - lifetime \" + str(v[1][0]) + os.linesep,\n self.store.items(), \"\")", "def __repr__(self):\n r = '<Character id:%s[%s] name:_%s_>' % (self.characterID,\n self.accessSystem,\n self.get('name'))\n if isinstance(r, unicode): r = r.encode('utf_8', 'replace')\n return r", "def _convert_to_text(self):\n if type(self.data) is not list:\n return -1\n out = str()\n for element in self.data:\n out += chr(int(element))\n return (out)", "def getMatchHistory(self, **kwargs):\n return self.makeRequest('GetMatchHistory', **kwargs)", "def __repr__(self):\n\t\tret = \"\"\n\t\tfor i, x in enumerate(self.squares):\n\n\t\t\tret += \"\\t\"\n\t\t\tfor j in range(32): ret += u\"\\u2015\"\n\t\t\tret += \"\\n\\t|\"\n\t\t\tfor y in x:\n\t\t\t\tret += str(y)\n\t\t\t\tret += \" | \"\n\n\t\t\tret += str(i+1) + \"\\n\"\n\n\t\tret += \"\\t\"\n\t\tfor i in range(32): ret += u\"\\u2015\"\n\t\tret += \"\\n \"\n\n\t\tfor l in self.letters:\n\t\t\tret += l+\" \"\n\t\treturn ret", "def __unicode__(self):\n try:\n return unicode(self.srs)\n except:\n return unicode(self.wkt)", "def _translate(self):\r\n\r\n for place, pseudo_binary in self.letters.items():\r\n for letter in self.alphabet:\r\n\r\n with open(os.path.join(self.training_data_folder, letter + '.json'), 'r', encoding = 'utf-8') as js:\r\n data = json.loads(js.read())\r\n\r\n if pseudo_binary in data:\r\n self.result[place] = letter\r\n break\r\n\r\n else:\r\n self.result[place] = '-'\r\n\r\n if not self.devmode:\r\n return 'Not solved'\r\n\r\n return ''.join(self.result.values())", "def printunichars(row):\n print(\"Title:\")\n print(row[0].encode('utf-8'))\n print(\"Body:\")\n print(row[1].encode('utf-8'))\n print(\"Ref:\")\n print(row[2].encode('utf-8'))\n print(\"Url:\")\n print(row[3].encode('utf-8'))", "def to_ascii(self):\n code = self.build()\n for i, line in enumerate(code):\n code[i] = line.replace('1', '|').replace('0', '_')\n return '\\n'.join(code)", "def afficher_damier_ascii(infojeu):\n lignes = []\n lignes += list(\"Légende: 1=\"+ str(infojeu[\"joueurs\"][0][\"nom\"])+\n ', 2='+str(infojeu[\"joueurs\"][1][\"nom\"]) + \"\\n\")\n lignes += list(\" \"+\"-\"*35+\"\\n\")\n for i in range(1, 10):\n lignes += str(10-i) + \" | \"\n for j in range(1, 9):\n strplayer = \".\"\n if [j, 10-i] == infojeu[\"joueurs\"][0][\"pos\"]:\n strplayer = \"1\"\n elif [j, 10-i] == infojeu[\"joueurs\"][1][\"pos\"]:\n strplayer = \"2\"\n if [j+1, 10-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(strplayer + \" | \")\n elif [j+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(strplayer + \" | \")\n else:\n lignes += list(strplayer + \" \")\n if [9, 10-i] == infojeu[\"joueurs\"][0][\"pos\"]:\n lignes += list(\"1 |\")\n elif [9, 10-i] == infojeu[\"joueurs\"][1][\"pos\"]:\n lignes += list(\"2 |\")\n else:\n lignes += list(\". |\")\n if i != 9:\n lignes += list(\"\\n |\")\n for k in range(1, 9):\n if i != 9:\n if [k, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"----\")\n elif [k-1, 10-i] in infojeu[\"murs\"][\"horizontaux\"] and \\\n [k+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(\"---|\")\n elif [k-1, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"--- \")\n elif [k+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(\" |\")\n else:\n lignes += list(\" \")\n if i != 9:\n if [8, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"---|\")\n else:\n lignes += list(\" |\")\n lignes += list(\"\\n\")\n lignes += list(\"--|\"+ \"-\"*35+\"\\n\")\n lignes += list(\" | 1 2 3 4 5 6 7 8 9\")\n lignes = ''.join(lignes)\n print(lignes)" ]
[ "0.5806015", "0.5806015", "0.5598331", "0.5412658", "0.53824925", "0.53676164", "0.5249685", "0.5227061", "0.51851416", "0.5137522", "0.5127437", "0.51141953", "0.5091751", "0.50783116", "0.50761265", "0.50514686", "0.5038033", "0.50119203", "0.50102234", "0.4991699", "0.49879238", "0.49410284", "0.49403694", "0.49358243", "0.49342594", "0.4932376", "0.4921558", "0.49180776", "0.49047673", "0.49047673", "0.48963758", "0.4888165", "0.48881337", "0.48878843", "0.48878843", "0.4884661", "0.48752967", "0.4875263", "0.4862594", "0.48624685", "0.48592597", "0.4846422", "0.48460856", "0.48218948", "0.47911304", "0.47774503", "0.47692806", "0.47688124", "0.47657", "0.47642416", "0.47584194", "0.47558638", "0.47540748", "0.47507128", "0.4749021", "0.47464868", "0.47459644", "0.47395045", "0.47395045", "0.4737176", "0.4726637", "0.47233507", "0.47224358", "0.47182873", "0.47139177", "0.47139177", "0.47137585", "0.46888408", "0.46885374", "0.46882522", "0.46882522", "0.46849585", "0.4667443", "0.46660528", "0.46642864", "0.46635398", "0.46635398", "0.46635398", "0.4661404", "0.46594378", "0.4655334", "0.46477458", "0.46443662", "0.46433654", "0.46431193", "0.46353057", "0.46349835", "0.4633152", "0.46322602", "0.46286187", "0.46245843", "0.4623599", "0.4617016", "0.4610995", "0.46097925", "0.4603352", "0.45998582", "0.45989782", "0.4598684", "0.45956576", "0.45900467" ]
0.0
-1
takes first row of tworow belief np array and converts it to dict indexed by label of positive beliefs
def np_to_belief(np_array,labels): return dict((l,np_array[0,i]) for i,l in enumerate(labels))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_original_labels(array, threshold=0.5, initialization_value=999):\r\n \r\n binarized, belief = get_binarized_and_belief(array=array, threshold=threshold)\r\n \r\n #sanity check\r\n if binarized.shape != belief.shape:\r\n raise ValueError('Sanity check did not pass.')\r\n \r\n # initialize with a crazy label we will be sure is gone in the end\r\n slice_all_but_last_channel = tuple([slice(None) for _ in array.shape[:-1]] + [0])\r\n original_labels = initialization_value * np.ones_like(array[slice_all_but_last_channel])\r\n \r\n # the outer keys correspond to the binarized values\r\n # the inner keys correspond to the order of indices comingn from argsort(ascending) on suspicion, i.e. \r\n # how far the binarized sigmoid outputs were from the original sigmoid outputs \r\n # for example, (2, 1, 0) means the suspicion from least to greatest was: 'WT', 'TC', 'ET'\r\n # (recall that the order of the last three channels is expected to be: 'ET', 'TC', and 'WT')\r\n mapper = {(0, 0, 0): 0, \r\n (1, 1, 1): 4,\r\n (0, 1, 1): 1,\r\n (0, 0, 1): 2,\r\n (0, 1, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 1,\r\n (1, 2, 0): 1,\r\n (0, 2, 1): 0,\r\n (0, 1, 2): 1}, \r\n (1, 1, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 4,\r\n (1, 2, 0): 4,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4},\r\n (1, 0, 1): {(2, 0, 1): 4,\r\n (2, 1, 0): 2, \r\n (1, 0, 2): 2,\r\n (1, 2, 0): 2,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4}, \r\n (1, 0, 0): {(2, 0, 1): 0,\r\n (2, 1, 0): 0, \r\n (1, 0, 2): 0,\r\n (1, 2, 0): 0,\r\n (0, 2, 1): 4,\r\n (0, 1, 2): 4}}\r\n \r\n \r\n \r\n done_replacing = False\r\n \r\n for binary_key, inner in mapper.items():\r\n mask1 = check_subarray(array1=binarized, array2=np.array(binary_key))\r\n if isinstance(inner, int):\r\n original_labels, done_replacing = replace_initializations(done_replacing=done_replacing, \r\n array=original_labels, \r\n mask=mask1, \r\n replacement_value=inner, \r\n initialization_value=initialization_value)\r\n else:\r\n for inner_key, inner_value in inner.items():\r\n mask2 = np.logical_and(mask1, check_subarray(array1=belief, array2=np.array(inner_key)))\r\n original_labels, done_replacing = replace_initializations(done_replacing=done_replacing,\r\n array=original_labels, \r\n mask=mask2, \r\n replacement_value=inner_value, \r\n initialization_value=initialization_value)\r\n \r\n if not done_replacing:\r\n raise ValueError('About to return so should have been done replacing but told otherwise.')\r\n \r\n return original_labels.astype(np.uint8)", "def create_label_map():\n\n cnt = 1\n tmp_array = np.array([10, 15, 25, 30, 40, 47, 57, 63, 69, 74, 81])\n dictionary = dict()\n dictionary[1] = 1\n for idx, val in enumerate(tmp_array):\n for j in range(cnt + 1, val):\n dictionary[j] = int(idx + 2)\n cnt = j\n return dictionary", "def elan_annotation_to_binary(annotation_data):\n label_dict = {}\n for annotation in annotation_data:\n label = 1 if annotation[2] == 'Engaged' else 0\n label_dict[\"{0},{1}\".format(annotation[0], annotation[1])] = label\n return label_dict", "def init_label_dict(num_classes):\n label_dict={}\n for i in range(num_classes):\n label_dict[i]=(0,0,0)\n return label_dict", "def feature_dict(sent, i):\n # WORK HERE!!\n return {}", "def field_labels(label_row, datum_row):\n return dict(zip(label_row, datum_row))", "def one_hot_vocab_encoding(w2vp: W2VPreprocessor \n ) -> Dict[str, np.ndarray]:\n return {\n w: i for i, w in enumerate(w2vp.vocabulary)\n }", "def coherent_subsequent_states(Infomap_labels):\r\n unique_labels= np.unique(Infomap_labels)\r\n dictionary= {}\r\n for i in range(len(unique_labels)):\r\n label_index=[]\r\n for j in range(len(Infomap_labels)):\r\n if unique_labels[i]==Infomap_labels[j]:\r\n label_index.append(j)\r\n subsequent=groupSequence(label_index)\r\n \r\n dictionary[i]=subsequent\r\n \r\n return dictionary", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def make_represented_genders(metric_df, label_lang):\n return dict(metric_df[['bias_value', 'bias_label']].drop_duplicates().to_dict('split')['data'])", "def calculate_class_weights(label_data):\n neg, pos = np.bincount(label_data)\n weight_for_0 = 1 / neg\n weight_for_1 = 1 / pos\n return {0: weight_for_0, 1: weight_for_1}", "def finalLabels(self) -> Tuple[ndarray, Dict[str, int]]:\n test = set([])\n resultTwo: Dict[str, int] = {}\n result = np.empty((self.dataSize,), dtype=int)\n for i, cluster in enumerate(self.clusters):\n for prototypeIdx in cluster:\n prototypeHash = hashSequence(self.data[prototypeIdx])\n resultTwo[prototypeHash] = self.classes[i]\n result[prototypeIdx] = i\n test.add(prototypeIdx)\n for i, nonPrototypeIdx in enumerate(self._getNonPrototypeIndices(self.clusters)):\n nonPrototypeHash = hashSequence(self.data[nonPrototypeIdx])\n resultTwo[nonPrototypeHash] = self.classes[self.labels[i]]\n result[nonPrototypeIdx] = self.labels[i]\n test.add(nonPrototypeIdx)\n return result, resultTwo", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels", "def get_label_map(labels):\n label_map = dict()\n for i,v in enumerate(np.ravel(labels.data)):\n if v in label_map.keys():\n label_map.get(v).append(i)\n else:\n label_map[v] = [i]\n return label_map", "def encode_ST_labels(labels):\n return np.array([1 if sentiment == 'bullish' else 0 for sentiment in labels])", "def get_training_labels():\n\n\tmapping = dict()\n\tmapping[constants.ASCause.apsp] = 0\n\tmapping[constants.ASCause.bl] = 1\n\tmapping[constants.ASCause.ce] = 2\n\tmapping[constants.ASCause.dfl] = 3\n\tmapping[constants.ASCause.lrssi] = 4\n\tmapping[constants.ASCause.pwr_state] = 5\n\treturn mapping", "def feature_sign_dict(three_feature_list):\n\n feature_dict = {}\n\n for i in list(range(1, 11)):\n feature_dict[-i] = three_feature_list[0]\n\n feature_dict[0] = three_feature_list[1]\n\n for i in list(range(1, 11)):\n feature_dict[i] = three_feature_list[2]\n\n return feature_dict", "def get_mappings():\n original_dict = ClassifierDataset.get_labels()\n return dict(zip(original_dict.values(), original_dict.keys()))", "def process_label(intents, w2v,class_id_startpoint=0):\n class_dict = {}\n label_vec = []\n class_id = class_id_startpoint\n \n for line in intents:\n # check whether all the words in w2v dict\n line=line[0]\n label = line.split(' ')\n for w in label:\n if not w in w2v.vocab:\n print('not in w2v dict', w)\n\n # compute label vec\n label_sum = np.sum([w2v[w] for w in label], axis = 0)\n label_vec.append(label_sum)\n # store class names => index\n class_dict[' '.join(label)] = class_id\n class_id = class_id + 1\n #print('=====label vec', label_vec)\n return class_dict, np.asarray(label_vec)", "def one_hot_encode(label, label_values):\n semantic_map = []\n for colour in label_values:\n equality = np.equal(label, colour)\n class_map = np.all(equality, axis = -1)\n semantic_map.append(class_map)\n semantic_map = np.stack(semantic_map, axis=-1)\n\n return semantic_map", "def process_label(self, foreground_labels):\n # Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}\n unique_nonnegative_indices = np.unique(foreground_labels)\n mapped_labels = foreground_labels.copy()\n for k in range(unique_nonnegative_indices.shape[0]):\n mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k\n foreground_labels = mapped_labels\n return foreground_labels", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def labels_b(self):\n return self._labels_b", "def predict(self, row):\n label_vote = dict()\n for i in range(len(self.forest)):\n result = self.forest[i].predict(row)\n label = max(result, key=result.get)\n \n if label_vote.get(label, None) is None:\n label_vote[label] = 0\n\n label_vote[label] += 1\n \n return max(label_vote, key=result.get)", "def normalize_labels(labels):\n new_labels = np.array([-1] * len(labels))\n labels = np.array(labels)\n label_dict = dict()\n for i, label in enumerate(set(labels)):\n new_labels[np.where(labels == label)] = i\n label_dict[i] = label\n return label_dict, new_labels", "def get_binarized_and_belief(array, threshold=0.5):\r\n \r\n # check assumption above\r\n if (np.amax(array) > 1.0) or (np.amin(array) < 0.0):\r\n raise ValueError('Voxel value fed to lambda in converting to original labels was out of range.')\r\n \r\n # obtain binarized output\r\n binarized = binarize(array=array, threshold=threshold)\r\n \r\n # we will sort from least to greatest, so least suspicion is what we will believe\r\n raw_suspicion = np.absolute(array - binarized)\r\n \r\n belief = np.argsort(raw_suspicion, axis=-1)\r\n \r\n return binarized, belief", "def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels", "def array2(self):\r\n profbox(whoami())\r\n # research\r\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\r\n labelnode = slicer.mrmlScene.GetNodeByID(inputLabelID)\r\n i = labelnode.GetImageData()\r\n shape = list(i.GetDimensions())\r\n shape.reverse()\r\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\r\n labels = []\r\n val = [[0, 0, 0] for i in range(a.max() + 1)]\r\n for i in xrange(2, a.max() + 1):\r\n w = numpy.transpose(numpy.where(a == i))\r\n # labels.append(w.mean(axis=0))\r\n val[i] = [0, 0, 0]\r\n val[i][0] = w[int(round(w.shape[0] / 2))][2]\r\n val[i][1] = w[int(round(w.shape[0] / 2))][1]\r\n val[i][2] = w[int(round(w.shape[0] / 2))][0]\r\n if val[i] not in self.previousValues:\r\n labels.append(val[i])\r\n self.previousValues.append(val[i])\r\n return labels", "def get_gold_pred_idx_dict(self, y_true, y_pred):\n gold_pred_idx_dict = defaultdict(lambda: defaultdict(list))\n gold_pred_ct_dict = defaultdict(lambda: defaultdict(int)) \n\n for gold_idx in range(3,self.nerTags.size):\n gold_filter = (y_true == gold_idx).astype(\"int\") # 1/0 all rows with that gold_idx\n for pred_idx in range(3,self.nerTags.size):\n pred_filter = (y_pred == pred_idx).astype(\"int\") # 1/0 all rows with that ner_idx\n match_ner_idx = np.nonzero(np.all([gold_filter, pred_filter],axis=0).astype(\"int\"))[0]\n gold_pred_idx_dict[gold_idx][pred_idx] = match_ner_idx \n gold_pred_ct_dict[gold_idx][pred_idx] = match_ner_idx.shape[0] \n\n return gold_pred_idx_dict, gold_pred_ct_dict", "def extract_belief(ontology, state, threshold=0.3):\n\n \"\"\"\n need to check if the value at argmax is bigger than a threshold\n \"\"\"\n\n request_idx = np.argmax([item[1] for item in state[\"request\"].items()])\n frequency_idx = np.argmax([item[1] for item in state[\"frequency\"].items()])\n illness_type_idx = np.argmax([item[1] for item in state[\"type\"].items()])\n symptom_idx = np.argmax([item[1] for item in state[\"symptom\"].items()])\n escalation_idx = np.argmax([item[1]\n for item in state[\"escalation\"].items()])\n duration_idx = np.argmax([item[1] for item in state[\"duration\"].items()])\n confirmation_idx = np.argmax([item[1]\n for item in state[\"confirmation\"].items()])\n\n # then it is neccessary to map the indices back to words\n\n request = is_plausible(state[\"request\"].items(\n ), ontology, \"request\", request_idx, threshold)\n frequency = is_plausible(\n state[\"frequency\"].items(), ontology, \"frequency\", frequency_idx, threshold)\n illness_type = is_plausible(\n state[\"type\"].items(), ontology, \"type\", illness_type_idx, threshold)\n symptom = is_plausible(state[\"symptom\"].items(\n ), ontology, \"symptom\", symptom_idx, threshold)\n\n escalation = is_plausible(state[\"escalation\"].items(\n ), ontology, \"escalation\", escalation_idx, threshold)\n\n duration = is_plausible(state[\"duration\"].items(\n ), ontology, \"duration\", duration_idx, threshold)\n confirmation = is_plausible(state[\"confirmation\"].items(\n ), ontology, \"confirmation\", confirmation_idx, threshold)\n\n if request is not None:\n request = \"request \"+request\n if frequency is not None:\n frequency = \"frequency \"+frequency\n if illness_type is not None:\n illness_type = \"type \"+illness_type\n if symptom is not None:\n symptom = \"symptom \"+symptom\n if escalation is not None:\n escalation = \"escalation \"+escalation\n if duration is not None:\n duration = \"duration \"+duration\n if confirmation is not None:\n confirmation = \"confirmation \"+confirmation\n values = [request, frequency, illness_type,\n symptom, escalation, duration, confirmation]\n values = [val for val in values if val is not None]\n return sorted(values)", "def GetClassWeights(Labels,Weights):\n ClassWeight = {}\n for Class in np.unique(Labels):\n TotalWeight = np.sum(Weights[Labels == Class])\n CWeight = np.sum(Weights)/(len(np.unique(Labels))*TotalWeight)\n ClassWeight.update({int(Class):CWeight})\n\n print(ClassWeight)\n return ClassWeight", "def get_label_ix_mapping(labels):\n return {label: i for i, label in enumerate(labels)}", "def _to_dict(self, features, real_classes, weights):\n d = {}\n for i in range(len(features)):\n if real_classes[i, 0] in d.keys():\n if weights is None:\n d[real_classes[i, 0]].append(features[i])\n else:\n d[real_classes[i, 0]].append(np.concatenate((features[i], [weights[i]])))\n else:\n if weights is None:\n d.update({real_classes[i, 0]: [features[i]]})\n else:\n d.update({real_classes[i, 0]: [np.concatenate((features[i], [weights[i]]))]})\n for k in d.keys():\n d[k] = np.array(d[k])\n return d", "def map_label_colors(array, ignore_vals=[0]):\n colset = [(166, 206, 227),\n (31, 120, 180),\n (178, 223, 138),\n (51, 160, 44),\n (251, 154, 153),\n (227, 26, 28),\n (253, 191, 111),\n (255, 127, 0),\n (202, 178, 214),\n (106, 61, 154),\n (255, 255, 153),\n (177, 89, 40)]\n levels = np.unique(array)\n levels = [l for l in levels if l not in ignore_vals]\n if len(levels) == 0:\n return\n if len(levels) == 1:\n return({levels[0]: colset[0]})\n step = len(colset) / (len(levels) - 1)\n\n col_idx = np.arange(0, len(colset), step)\n colors = {}\n for idx in range(len(levels)):\n colors[levels[idx]] = colset[col_idx[idx]]\n return colors", "def viterbi(self, word_seq):\n # Initialize scores\n scores = [{}]\n path = {}\n # Populate scores\n for i in range(0, len(word_seq)):\n for label in self.label_type_map:\n scores[i][label] = 0\n scores.append({})\n self.initialize(scores, word_seq, path)\n path = self.iterate(scores, word_seq, path)\n return self.identify(scores, word_seq, path)", "def getClassCounts(b):\n c = {k:0 for k in labels.keys()}\n for r in b:\n c[r[0]] += 1\n return c", "def word2features(self,sent, i):\n word = sent[i][0]\n #postag = sent[i][1]\n\n features = {\n 'bias': 1.0,\n 'word.lower()': word.lower(),\n 'word.isupper()': word.isupper(),\n 'word.istitle()': word.istitle(),\n 'word.isdigit()': word.isdigit(),\n 'word.shape()':self.shape(word),\n 'word.isalnum()':word.isalnum(),\n 'word.isalpha()':word.isalpha(),\n # 'postag': postag,\n # 'postag[:2]': postag[:2],\n }\n if i > 0:\n word1 = sent[i - 1][0]\n #postag1 = sent[i - 1][1]\n features.update({\n '-1:word.lower()': word1.lower(),\n '-1:word.istitle()': word1.istitle(),\n '-1:word.isupper()': word1.isupper(),\n '-1:word.isdigit()': word1.isdigit(),\n '-1:word.isalnum()':word1.isalnum(),\n '-1:word.isalpha()':word1.isalpha(),\n # '-1:postag': postag1,\n # '-1:postag[:2]': postag1[:2],\n })\n else:\n features['BOS'] = True\n\n if i > 1:\n word2 = sent[i - 2][0]\n #postag2 = sent[i - 2][1]\n features.update({\n '-2:word.lower()': word2.lower(),\n '-2:word.istitle()': word2.istitle(),\n '-2:word.isupper()': word2.isupper(),\n '-2:word.isdigit()': word2.isdigit(),\n '-2:word.isalnum()': word2.isalnum(),\n '-2:word.isalpha()': word2.isalpha(),\n # '-2:postag': postag2,\n # '-2:postag[:2]': postag2[:2],\n })\n else:\n features['BOS1'] = True\n if i > 2:\n word3 = sent[i - 3][0]\n #postag3 = sent[i - 3][1]\n features.update({\n '-3:word.lower()': word3.lower(),\n '-3:word.istitle()': word3.istitle(),\n '-3:word.isupper()': word3.isupper(),\n '-3:word.isdigit()': word3.isdigit(),\n '-3:word.isalnum()': word3.isalnum(),\n '-3:word.isalpha()': word3.isalpha(),\n # '-3:postag': postag3,\n # '-3:postag[:2]': postag3[:2],\n })\n else:\n features['BOS2'] = True\n\n if i > 3:\n word4 = sent[i - 4][0]\n #postag4 = sent[i - 4][1]\n features.update({\n '-4:word.lower()': word4.lower(),\n '-4:word.istitle()': word4.istitle(),\n '-4:word.isupper()': word4.isupper(),\n '-4:word.isdigit()': word4.isdigit(),\n '-4:word.isalnum()': word4.isalnum(),\n '-4:word.isalpha()': word4.isalpha(),\n # '-4:postag': postag4,\n # '-4:postag[:2]': postag4[:2],\n })\n else:\n features['BOS2'] = True\n\n if i < len(sent) - 1:\n word1 = sent[i + 1][0]\n features.update({\n '+1:word.lower()': word1.lower(),\n '+1:word.istitle()': word1.istitle(),\n '+1:word.isupper()': word1.isupper(),\n '+1:word.isdigit()': word1.isdigit(),\n '+1:word.isalnum()': word1.isalnum(),\n '+1:word.isalpha()': word1.isalpha(),\n # '+1:postag': postag1,\n # '+1:postag[:2]': postag1[:2],\n })\n else:\n features['EOS'] = True\n if i < len(sent) - 2:\n word12 = sent[i + 2][0]\n #postag12 = sent[i + 2][1]\n features.update({\n '+2:word.lower()': word12.lower(),\n '+2:word.istitle()': word12.istitle(),\n '+2:word.isupper()': word12.isupper(),\n '+2:word.isdigit()': word12.isdigit(),\n '+2:word.isalnum()': word12.isalnum(),\n '+2:word.isalpha()': word12.isalpha(),\n # '+2:postag': postag12,\n # '+2:postag[:2]': postag12[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 3:\n word13 = sent[i + 3][0]\n #postag13 = sent[i + 3][1]\n features.update({\n '+3:word.lower()': word13.lower(),\n '+3:word.istitle()': word13.istitle(),\n '+3:word.isupper()': word13.isupper(),\n '+3:word.isdigit()': word13.isdigit(),\n '+3:word.isalnum()': word13.isalnum(),\n '+3:word.isalpha()': word13.isalpha(),\n # '+3:postag': postag13,\n # '+3:postag[:2]': postag13[:2],\n })\n else:\n features['EOS2'] = True\n if i < len(sent) - 4:\n word14 = sent[i + 4][0]\n #postag14 = sent[i + 4][1]\n features.update({\n '+4:word.lower()': word14.lower(),\n '+4:word.istitle()': word14.istitle(),\n '+4:word.isupper()': word14.isupper(),\n '+4:word.isdigit()': word14.isdigit(),\n '+4:word.isalnum()': word14.isalnum(),\n '+4:word.isalpha()': word14.isalpha(),\n # '+4:postag': postag14,\n # '+4:postag[:2]': postag14[:2],\n })\n else:\n features['EOS2'] = True\n return features", "def infer_data_labels(X_labels, cluster_labels):\r\n #Empty array of len(X)\r\n predicted_labels = np.zeros(len(X_labels)).astype(np.uint8)\r\n \r\n for i, cluster in enumerate(X_labels):\r\n for key, value in cluster_labels.items():\r\n if cluster in value:\r\n predicted_labels[i] = key\r\n \r\n return predicted_labels", "def predict(w, b, image_matrix):\n\tm = image_matrix.shape[1] \t\t\t\t\t# grab set size again\n\tprediction_labels = np.zeros((3, m))\t\t# init vector\n\n\tactivation_layer = sigmoid(np.dot(w.T, image_matrix) + b) # computer sigmoid on prediction data\n\n\t# iterates over the activation layer, rounding to the nearest integer, and assigning value to prediction label array\n\tfor i in range(activation_layer.shape[1]):\t# covers each data set\n\t\tfor j in range(3): \t\t\t\t\t\t# covers label value within each data set\n\t\t\tif activation_layer[j, i] > 0.5:\t\t# rounding activation value to nearest int (0 or 1)\n\t\t\t\tprediction_labels[j, i] = 1\t\t# assigning such value to respective location in the prediction label array\n\t\t\telse:\t\t\t\t\t\t\t\t\n\t\t\t\tprediction_labels[j, i] = 0\t\t# if lower than 0.5, the label is set to False; 0\n\n\t# sanity check\n\tassert(prediction_labels.shape == (3, m))\n\n\treturn prediction_labels", "def classify(self, vector):\n return {}", "def test_dict_labels_sorted(self):\n le = {3: \"a\", 2: \"c\", 1: \"b\"}\n oz = ClassificationScoreVisualizer(GaussianNB(), encoder=le)\n npt.assert_array_equal(oz._labels(), [\"b\", \"c\", \"a\"])", "def CopyToDict(self):\n return {'labels': self.labels}", "def label_data(data):\n if data == 'cat': return [1, 0]\n elif data == 'dog': return [0, 1]", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def feat_dict(pos_feat,text):\n dict = {}\n bigrams = ngrams(word_tokenize(text),2)\n trigrams = ngrams(word_tokenize(text),3)\n \n for feat in pos_feat:\n dict[feat]=features(feat,text,bigrams,[],[])\n return dict", "def get_bone_data(eng):\n path = cf.pose_dict_path;\n b_dict = eng.load(path,'B');\n b = np.array(b_dict['B']);\n new_b = np.zeros((b.shape[0],16));\n for i,p in enumerate(b):\n new_b[i] = hm_to_mp(p);\n b_dict['B'] = matlab.double(new_b.tolist());\n return b_dict;", "def _get_labels(touches):\n \n out = touches.copy(deep=True)\n # pandas df.min() ignores NaN values\n first_touch = touches[['stop_loss', 'take_profit']].min(axis=1)\n for loc, t in first_touch.items():\n if pd.isnull(t):\n out.loc[loc, 'label'] = 0\n elif t == touches.loc[loc, 'stop_loss']:\n out.loc[loc, 'label'] = -1\n else:\n out.loc[loc, 'label'] = 1\n return out", "def labeled_dicoms(self):\n return [sorted(self.data)[i-1][1:] for i in self.labeled]", "def _row_to_labels(row):\n labels = {}\n label_keys = ['name', 'qty', 'range_end', 'unit', 'comment']\n for key in label_keys:\n labels[key] = row[key]\n return labels", "def make_contingency_tables(\n y: np.ndarray, flagged_A: np.ndarray, flagged_B: np.ndarray\n) -> Dict[int, np.ndarray]:\n\n y = np.array(y).astype(np.int64).flatten()\n flagged_A = np.array(flagged_A).astype(np.bool_).flatten()\n flagged_B = np.array(flagged_B).astype(np.bool_).flatten()\n\n if len(flagged_A) != len(y) or len(flagged_B) != len(y):\n raise ValueError(\n f\"Expected arrays y, flagged_A, and flagged_B of the same length: \\\n got {len(y)}, {len(flagged_A)}, and {len(flagged_B)}.\"\n )\n\n contingency_tables = {}\n for class_id in np.unique(y):\n\n items_flagged_A = flagged_A[y == class_id]\n items_flagged_B = flagged_B[y == class_id]\n\n a = (~items_flagged_A & ~items_flagged_B).sum()\n b = (~items_flagged_A & items_flagged_B).sum()\n c = (items_flagged_A & ~items_flagged_B).sum()\n d = (items_flagged_A & items_flagged_B).sum()\n\n table = np.array([[a, b], [c, d]])\n contingency_tables[class_id] = table\n\n return contingency_tables", "def predict(self, unknown):\n for title in unknown:\n for ind in range(len((unknown[list(unknown.keys())[0]]))):\n unknown[title][ind] = (unknown[title][ind] - self.normalization_n[ind]) / (self.normalization_d[ind])\n print(unknown)\n unknown_labels = {}\n for title in unknown:\n neighbors = self.k_neighbors(unknown[title], self.dataset, self.k)\n unknown_labels[title] = self.rate(neighbors, self.labels)\n return unknown_labels", "def _decode_from_preds(spec: _Spec, preds: _Array) -> Dict[str, _DataPoint]:\n result = {}\n for name in preds.keys():\n _, loc, typ = spec[name]\n data = preds[name]\n if typ == _Type.SCALAR:\n pass\n elif typ == _Type.MASK:\n data = (data > 0.0) * 1.0\n elif typ in [_Type.MASK_ONE, _Type.CATEGORICAL]:\n cat_size = data.shape[-1]\n best = jnp.argmax(data, -1)\n data = hk.one_hot(best, cat_size)\n elif typ == _Type.POINTER:\n data = jnp.argmax(data, -1)\n else:\n raise ValueError('Invalid type')\n result[name] = probing.DataPoint(\n name=name, location=loc, type_=typ, data=data)\n return result", "def get_label_scores_mapping(labels, scores):\n return {label: scores[i] for i, label in enumerate(labels)}", "def _all_labels_to_bitmasks(all_labels):\n l_dict = {}\n for i, label in enumerate(all_labels):\n l_dict[label.name] = 1<<i\n return l_dict", "def labels_b_v(self):\n return self._labels_b_v", "def one_hot_map(ref_map: np.ndarray) -> np.ndarray:\n ref_map += abs(BG_CLASS)\n one_hot_ref_map = np.zeros(shape=[ref_map.shape[ROW_AXIS], ref_map.shape[COLUMNS_AXIS],\n ref_map.max() + abs(BG_CLASS)])\n rows, columns = list(range(ref_map.shape[ROW_AXIS])), list(range((ref_map.shape[COLUMNS_AXIS])))\n for i, j in product(rows, columns):\n one_hot_ref_map[i, j, ref_map[i, j].astype(int)] = CLASS_LABEL\n return one_hot_ref_map", "def _convert_to_onehot_labels(seg_label, num_classes):\n\n batch_size = seg_label.size(0)\n onehot_labels = seg_label.new_zeros((batch_size, num_classes))\n for i in range(batch_size):\n hist = seg_label[i].float().histc(\n bins=num_classes, min=0, max=num_classes - 1)\n onehot_labels[i] = hist > 0\n return onehot_labels", "def prepare_val_res(score, threshold):\n new_whale = 'new_whale'\n\n res ={}\n\n train_arr = np.array(train)\n\n for i,p in enumerate(val):\n t = []\n s = set()\n a = score[i,:]\n\n top_label_probs = {}\n cond = a > threshold\n cond_index = np.where(cond)[0]\n cond_images = train_arr[cond_index]\n for j, img in enumerate(cond_images):\n if tagged[img] in top_label_probs:\n top_label_probs[tagged[img]] += a[cond_index[j]]\n else:\n top_label_probs[tagged[img]] = a[cond_index[j]]\n\n sorted_top_label_probs = sort_dict_by_values(top_label_probs)\n\n t = []\n for lb, _ in sorted_top_label_probs:\n t.append(lb)\n\n if len(t) < 5:\n t.append(new_whale)\n\n for index in np.argsort(a)[::-1]:\n if tagged[train_arr[index]] not in t:\n t.append(tagged[train_arr[index]])\n if len(t) >= 5:\n break\n\n assert len(t) >= 5\n\n res[p[:-4]+'.jpg'] = t[:5]\n\n return res", "def select_features(features_list, data):\n\n '''Initalize arrays'''\n f_index = []\n f_dic = {}\n t_index = []\n t_dic = {}\n\n '''\n double loop over both coloum headings and config\n -> very ugly, if time improve\n '''\n for x in range(0, len(data[1].columns.names)):\n for y in range(0, len(features_list)):\n\n if data[1].columns.names[x] == features_list[y]\\\n and features_list[y][:1] != '!' \\\n and features_list[y][:1] != '#':\n f_index = np.append(f_index, x)\n f_dic.update({features_list[y]: x})\n\n if features_list[y][:1] == '!' \\\n and features_list[y][1:] == data[1].columns.names[x]:\n t_index = np.append(t_index, x)\n t_dic.update({features_list[y][1:]: x})\n\n return f_index.astype(int), f_dic, t_index.astype(int), t_dic", "def _classify(tree, x):\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer", "def one_hot_encode(x_: ArrayLike) -> tuple[IntArray, dict[str, int]]:\n x: np.ndarray = np.copy(x_)\n if x.ndim == 1:\n x = x[:, np.newaxis]\n shape = x.shape\n has_na = np.any(pd.isna(x))\n if x.dtype == object:\n x = x.astype(str)\n categories, codes = np.unique(x, return_inverse=True)\n num_classes = len(categories)\n encoded_x = np.zeros((x.size, num_classes), dtype=np.uint8)\n encoded_x[np.arange(x.size), codes.astype(np.uint8).ravel()] = 1\n encoded_x = encoded_x.reshape(*shape, num_classes)\n if has_na:\n # remove NaN column\n categories = categories[:-1]\n encoded_x = encoded_x[:, :, :-1]\n mapping = {\n _category_name(category): code for code, category in enumerate(categories)\n }\n return encoded_x, mapping", "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def label_to_onehot(labels):\n label_dict = {'THEORETICAL': 0, 'ENGINEERING': 1, 'EMPIRICAL': 2, 'OTHERS': 3}\n onehot = [0, 0, 0, 0]\n for l in labels.split():\n onehot[label_dict[l]] = 1\n return onehot", "def eigenvalue_label_dict_am(am_max,verbose=False):\n\n eigenvalue_label_dict = {\n float(J*(J+1)): float(J)\n for J in mcscript.utils.value_range(am_max%1,am_max,1)\n }\n return eigenvalue_label_dict", "def woe_iv_categ(self):\n df = self.predictors.copy()\n df['target'] = self.target.copy()\n IV_dict = {}\n woe_dict = {}\n\n for col in self.predictors.columns:\n # binning values\n bins = np.linspace(df[col].min()-0.1, df[col].max()+0.1, len(set(df[col]))) # each bin should have at least 5% of the observation\n groups = df.groupby(np.digitize(df[col], bins))\n df[col] = pd.cut(df[col], bins)\n\n # getting class counts for each bin\n count_series = df.groupby([col, 'target']).size()\n new_df = count_series.to_frame(name = 'size').reset_index()\n\n new_df['size'] = new_df['size'] + 0.5\n df1 = new_df[new_df['target']==0].reset_index(drop=True)\n df2 = new_df[new_df['target']==1].reset_index(drop=True)\n df1['size1'] = df2['size']\n new_df = df1.drop(columns=['target'])\n sum_ = new_df['size'].sum()\n sum1 = new_df['size1'].sum()\n # Calculate woe and IV\n new_df['woe'] = np.log((new_df['size']/sum_)/(new_df['size1']/sum1))\n new_df['IV'] = ((new_df['size']/sum_) - (new_df['size1']/sum1)) * new_df['woe']\n new_df = new_df.replace([np.inf, -np.inf], np.nan)\n new_df.dropna(inplace=True)\n woe_dict[col] = new_df.drop(columns=['size','size1'])\n IV_dict[col] = new_df['IV'].sum()\n return woe_dict, IV_dict", "def calculateBeliefs(self):\n\n belief = {}\n\n for question in self.getQuestions():\n q = str(question.id)\n belief[q] = self.HELPER_init_belief()\n\n #print belief[q]\n for answer in self.getQuestionCompletedAnswers(question):\n #print q\n #print str(answer.question.id)\n assert str(answer.question.id) == q\n w_skill = answer.worker.inference_results['EM']['skill']\n # answer.value must be \"0\" or \"1\"\n assert answer.value == \"0\" or answer.value == \"1\"\n #print answer.value, w_skill\n belief[q] = self.HELPER_update_belief(belief[q], answer.value, w_skill)\n #print belief[q]\n\n #print \"Question beliefs:\", belief\n #print \"##################\"\n return belief", "def extractImpact(data):\n return {key : array([hellingerDistance(i.px, data['{}'].px) for i in val]) for key, val in data.items() if key != '{}'}", "def _preprocess(*elements):\n output_dict = {}\n for idx, elem in enumerate(elements):\n uint8_img = elem['image']\n patch = data_provider.full_image_to_patch(uint8_img, patch_size, num_channels)\n label = tf.one_hot(idx, num_classes)\n output_dict[idx] = {'images': patch, 'labels': label}\n return output_dict", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def get_feature_set_SC2(tweet, sentimentvalues):\n pos_tag_freq = {}\n additional_freq = {}\n for phrase in tweet.tagged_words:\n for word in phrase:\n try:\n tag = word['pos']\n pos_tag_freq[tag] = pos_tag_freq.get(tag, 0) + 1\n if tag in ADJECTIVES:\n additional_freq['adjectives'] = additional_freq.get(tag, 0) + 1\n elif tag in ADVERBS: \n additional_freq['adverbs'] = additional_freq.get(tag, 0) + 1\n elif tag in PRONOUNS:\n additional_freq['pronoun'] = 1\n except KeyError:\n continue\n for key in pos_tag_freq.keys():\n pos_tag_freq[key] = pos_tag_freq[key]*1.0\n #number of adjectives in sentence, number of adverbs in sentence(except ikke), pronoun in sentence(binary) \n #Number of exclamation marks, number of emoticons,\n emoticons = tweet.nrof_happyemoticons+tweet.nrof_sademoticons\n if emoticons>0:\n additional_freq['emoticons'] = emoticons*1.0\n if tweet.nrof_exclamations>0:\n additional_freq['exclamations'] = tweet.nrof_exclamations*1.0\n \n #Add lexicon values\n #total subjectivity score from word polarities, total objectivity score, number of subjective words, number of objective words, e\n sub_score = sentimentvalues[0]+sentimentvalues[1]\n obj_score = sentimentvalues[2]\n if sub_score>0:\n additional_freq[\"sub_score\"] = sub_score+1.0\n if obj_score>0:\n additional_freq[\"obj_score\"] = obj_score+1.0\n \n #Concatenate the dicts\n features= dict(pos_tag_freq.items() + additional_freq.items())\n \n return features", "def select_neighborhood(similarity_matrix, utility_matrix, target_user, target_business):\n items_dict = {}\n new_matrix = utility_matrix[target_user].dropna()\n\n for business in new_matrix.index:\n if new_matrix[business] and similarity_matrix[business][target_business] > 0:\n items_dict[business] = similarity_matrix[business][target_business]\n\n return pd.Series(items_dict)", "def get_belief_vector(b: dict, t: int):\n\n my_list = [k[1] for k in b.keys() if k[1] == t]\n\n # number of vertices + capture\n nu = len(my_list)\n # set of capture + vertices V_c = [0, 1, ... n]\n V_c = ext.get_idx_vertices(nu)[0]\n\n belief = []\n for v in V_c:\n beta = b.get((v, t))\n belief.append(beta)\n\n return belief", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def prediction_b(self):\r\n return self._prediction_b", "def pollster_predictions(poll_rows):\r\n\r\n\r\n\r\n dict = {} #Create an empty dictionary\r\n dict = dict.fromkeys(unique_column_values(poll_rows, \"Pollster\"))\r\n\r\n #Extract all the unique Pollster values and assign them as specific keys\r\n \r\n for keys in dict: #Iterate through each key of the dictionary\r\n list = [] #Create an empty list\r\n for poll_row in poll_rows: #Iterate through each row\r\n if (most_recent_poll_row(poll_rows, keys, poll_row[\"State\"]) != None):\r\n list.append(most_recent_poll_row(poll_rows, keys, poll_row[\"State\"]))\r\n dict[keys] = state_edges(list)\r\n return dict\r\n\r\n #Using only the most recent poll for a state, the pollsters become the key\r\n #of the dictionary, and are assigned to the value of the state and its edge\r", "def indices_to_one_hot(data, nb_classes):\n\ttargets = np.array(data).reshape(-1)\n\treturn np.eye(nb_classes)[targets]", "def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict", "def label_mat(mat):\n # Index and range of average used for labeling.\n gather_avg_25_i = 2\n avg_range = 25\n # Labels for rising and falling price.\n rising_i = 1\n falling_i = 0\n num_classes = 2\n labels = np.zeros([mat.shape[0] - avg_range + 1, num_classes])\n for i in range(mat.shape[0] - avg_range + 1):\n # If average 25 day price rises after 24 days assign rising label, else\n # assign falling label.\n if mat[i, gather_avg_25_i] < mat[i + avg_range - 1, gather_avg_25_i]:\n labels[i, rising_i] = 1.0\n else:\n labels[i, falling_i] = 1.0\n return labels", "def lyft_labels():\n\n return {\n 0: 'None',\n 7: 'Roads',\n 10: 'Vehicles'\n }", "def read_orig_values(self):\n\n self.ovmap = {}\n\n for line in open(self.mname, 'r'):\n featval, bits = line.strip().split(',')\n feat, val = featval.split(':')\n\n for i, b in enumerate(bits):\n f = '{0}:b{1}'.format(feat, i + 1)\n v = self.fvmap.dir[(f, '1')]\n\n if v not in self.ovmap:\n self.ovmap[v] = [feat]\n\n if -v not in self.ovmap:\n self.ovmap[-v] = [feat]\n\n self.ovmap[v if b == '1' else -v].append(val)", "def label_name_to_one_hot(self, label_name):\n label_name_to_int = {n: i for i, n in enumerate(BOX_LABELS)}\n label_id = label_name_to_int[label_name]\n one_hot = np.zeros(shape=[len(BOX_LABELS)], dtype=np.float32)\n one_hot[label_id] = 1\n return one_hot", "def get_bow_dummies(self):\n\n # Get an np matrix of zeros based on defined dim\n zero_matrix = np.zeros(self.dim, np.int)\n\n # Create a dataframe containing feature columns and 0's\n zero_df = pd.DataFrame(zero_matrix, columns=self.features)\n\n # Get a dictionary of index and features per doc\n doc_features_dict = self.index_feats_dict()\n doc_ids = doc_features_dict.keys()\n doc_feats = doc_features_dict.values()\n\n print(zero_df)\n # For each row in zero_df, indicate 1 for every\n # feature word present in a doc of a dataframe\n for index, feats in zip(doc_ids, doc_feats):\n zero_df.ix[index, feats] = 1", "def adj_dict(self):\n adj_dict = {i: [] for i in self.indices}\n for coeff in self.interactions[1:]:\n for _inds, value in coeff.items():\n for i in _inds:\n _inds_list = list(_inds)\n _inds_list.remove(i)\n adj_dict[i].append([_inds_list, value])\n return adj_dict", "def dictionary(cleaned_data,threshold):\n news = []\n for date in cleaned_data:\n for headlines in cleaned_data[date]:\n news.append(headlines)\n\n word_freq = nltk.FreqDist(itertools.chain(*news))\n id_to_word = ['<pad>'] + [word for word, cnt in word_freq.items() if cnt >= threshold] + ['<unk>']\n word_to_id = {word:idx for idx, word in enumerate(id_to_word)}\n \n return id_to_word, word_to_id", "def binarize_labels(labels):\n labels = np.where(labels == 0, labels, 1)\n\n return labels", "def hausdorff_2d_distance(pred: np.ndarray, target: np.ndarray) -> Dict:\n assert len(pred.shape) == 2 or len(target.shape) == 2\n labels = np.unique(target)\n labels = labels[labels != 0]\n scores = {}\n for label in labels:\n mask_pred = pred == label\n mask_gt = target == label\n label = str(int(label))\n mask_pred = mask_pred.astype(int)\n mask_gt = mask_gt.astype(int)\n gt_empty = np.sum(mask_gt) == 0\n pred_empty = np.sum(mask_pred) == 0\n\n # hausdorff not defined if both are empty ( 0/0 situation)\n if gt_empty and pred_empty:\n scores[label] = 1.0\n else:\n hausdorff1 = directed_hausdorff(mask_pred, mask_gt)[0]\n hausdorff2 = directed_hausdorff(mask_gt, mask_pred)[0]\n hausdorff = max(hausdorff1, hausdorff2)\n scores[label] = hausdorff\n return scores", "def _get_labels(self, label_vector):\n return () if label_vector is None else \\\n list(OrderedDict.fromkeys([label for term in label_vector \\\n for label, power in term if power != 0]))", "def denseFeature(self, feat):\n return {'feat': feat}", "def tag_with_features(self, efeats):\n if len(efeats)==3:\n print \"d\"\n\n # build array of dicts\n state_dicts = []\n for e_phi in efeats: \n state_dicts = self.viterbi1(e_phi, state_dicts)\n \n \n # trace back\n yyhat, phis = self.traceback(efeats, state_dicts)\n assert len(efeats)==len(yyhat)#len(yyhat), \n\n return (yyhat, phis)", "def createDecimatedDict(data, adjacency_dict_GRID, N_adj_threshold):\n boundaryPointsCounter = 0\n boundaryPointsDict = {}\n indexList = list(data.keys())\n for i in indexList:\n data[i]['E_adj']= adjacency_dict_GRID[i]\n if len(data[i]['E_adj']) >= N_adj_threshold:\n data[i]['E_type'] = 'Int'\n # elif len(data[i]['E_adj']) < N_adj_threshold:\n # data[i]['E_type'] = 'Ext'\n else:\n data[i]['E_type'] = 'Boundary'\n boundaryPointsDict[i] = data[i]['Coordinate']\n boundaryPointsCounter+=1\n ratio = boundaryPointsCounter/len(indexList)\n print('OrigSize:', len(indexList))\n print('BpoundarySize:', boundaryPointsCounter)\n\n return data, boundaryPointsDict, ratio", "def get_upos_1h_labels() -> Dict[str, List[int]]:\n global upos_1h_labels\n if upos_1h_labels is not None:\n return upos_1h_labels\n else:\n # Use pandas library to generate upos one-hort encoded values\n upos_1h = pd.get_dummies(UPOS_TAGS).values\n upos_1h_labels: Dict[str, List[int]] = {UPOS_TAGS[i]: v for (i, v) in enumerate(upos_1h)}\n return upos_1h_labels", "def class_bias(y_true, majority_mask, kept_mask, class_labels):\n majority_contingency_tables = make_contingency_tables(\n y_true,\n majority_mask,\n kept_mask,\n )\n\n chi2_spd = {}\n for c in class_labels:\n class_c_table = majority_contingency_tables.get(c)\n if class_c_table is None:\n chi2_val = None\n spd_val = None\n else:\n chi2_val = np.mean(chi2_p_value(class_c_table))\n spd_val = np.mean(spd(class_c_table))\n chi2_spd[c] = (chi2_val, spd_val)\n\n return chi2_spd", "def convert_dict_to_arr(features: {}, labels: {}) -> ([], []):\n\n features_arr = []\n labels_arr = []\n\n for id, featuresList in features.items():\n\n labels_arr.append([labels.get(str(id))[0], labels.get(str(id))[1], id])\n\n # Elementary features\n v = featuresList[\"volume\"]\n a = featuresList[\"area\"]\n c = featuresList[\"compactness\"]\n bb = featuresList[\"bbox_volume\"]\n d = featuresList[\"diameter\"]\n e = featuresList[\"eccentricity\"]\n elem_features = [v, a, c, bb, d, e]\n \n # Global features\n a3, d1, d2, d3, d4 = [], [], [], [], []\n for x in featuresList[\"A3\"][0]:\n a3.append(x)\n for x in featuresList[\"D1\"][0]:\n d1.append(x)\n for x in featuresList[\"D2\"][0]:\n d2.append(x)\n for x in featuresList[\"D3\"][0]:\n d3.append(x)\n for x in featuresList[\"D4\"][0]:\n d4.append(x)\n glob_features = np.concatenate((a3, d1, d2, d3, d4))\n features_arr.append(np.concatenate((elem_features, glob_features)))\n\n np.savetxt(s.SAVED_DATA + 'features_arr.txt', np.asarray(features_arr), delimiter=',')\n\n return np.asarray(features_arr), np.asarray(labels_arr)", "def _get_bbox_regression_labels(bbox_target_data, num_classes):\r\n\r\n clss = bbox_target_data[:, 0]\r\n # print (\"===============class size: \" + str(clss))\r\n bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) #clss.size = 128 ---> bbox_targets = 128 * 84, moi roi la 1*84 dimesion\r\n bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)\r\n inds = np.where(clss > 0)[0]\r\n for ind in inds:\r\n cls = clss[ind]\r\n start = 4 * cls\r\n end = start + 4\r\n start=int(start)\r\n\tend=int(end)\r\n\tbbox_targets[ind, start:end] = bbox_target_data[ind, 1:] #gan gia tri tai class tuong ung la bbox_target_data, con lai la so 0\r\n bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS\r\n return bbox_targets, bbox_inside_weights", "def predict_labels(weights, data):\n y_pred = np.dot(data, weights)\n y_pred[np.where(y_pred <= 0)] = -1\n y_pred[np.where(y_pred > 0)] = 1\n \n return y_pred", "def predict_labels(weights, data):\n y_pred = np.dot(data, weights)\n y_pred[np.where(y_pred <= 0)] = -1\n y_pred[np.where(y_pred > 0)] = 1\n \n return y_pred" ]
[ "0.6541998", "0.6310633", "0.5895067", "0.566295", "0.5637139", "0.5578173", "0.55679846", "0.55309737", "0.54985076", "0.5497383", "0.54970396", "0.5491054", "0.5483852", "0.5483852", "0.54637516", "0.54519457", "0.5446611", "0.5395349", "0.53934413", "0.53921664", "0.5384632", "0.5378295", "0.53603697", "0.53526324", "0.5343171", "0.5340845", "0.53375775", "0.5334531", "0.5333955", "0.53243893", "0.531366", "0.5299509", "0.5292755", "0.52734447", "0.5259324", "0.52494496", "0.5236199", "0.5231756", "0.5214111", "0.5211945", "0.52062774", "0.51990604", "0.5194088", "0.51814514", "0.5169462", "0.5165812", "0.5165084", "0.51645344", "0.5155692", "0.51533955", "0.515334", "0.51531047", "0.5149964", "0.51436806", "0.51396054", "0.51369417", "0.51353306", "0.5128463", "0.5116446", "0.510963", "0.5102719", "0.5102552", "0.50985664", "0.5092865", "0.50845414", "0.5083663", "0.5076534", "0.5073436", "0.5064911", "0.50602543", "0.504968", "0.5042543", "0.50390345", "0.5037368", "0.5033511", "0.50287855", "0.50244296", "0.50208217", "0.50106084", "0.50076044", "0.50060284", "0.5005377", "0.5002273", "0.50019306", "0.49926254", "0.49905682", "0.49887922", "0.4986016", "0.49851197", "0.49776876", "0.49749702", "0.4974027", "0.49728385", "0.49681294", "0.49534374", "0.49481252", "0.4945341", "0.49431553", "0.49419755", "0.49419755" ]
0.78097486
0
takes a list of votes and predicts based on threshold returns true iff fraction of true votes >= f
def thresh_vote(lst, f): if len(lst) == 0: # guess 0 by default (appropriate for our dataset) q = 0 else: q = float(sum(lst)) / len(lst) return q >= f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binary_predict(probs, threshold = 0.5):\n return (probs >= threshold) * np.ones(len(probs))", "def sensitivity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n return recall(targets, preds, threshold)", "def get_predict(prediction, threshold):\n\n prediction[prediction < threshold] = 0\n prediction[prediction >= threshold] = 1\n \n return prediction", "def predictRating(toPredict, candidateList):\n\n ratingRelevantCandidates = []\n\n #Remove candidates with no rating specified\n for candidate in candidateList:\n currentCandidate = candidate[1]\n\n if float(currentCandidate['vote_avg']) > 0:\n ratingRelevantCandidates.append((float(currentCandidate['vote_avg']), candidate))\n\n #print(\"ratings::::::::\",currentCandidate['vote_avg'])\n\n #Remove outlier candidates based on rating\n ratingMean = np.mean([x[0] for x in ratingRelevantCandidates])\n print(\"ratingMean\", ratingMean)\n ratingSD = np.std([x[0] for x in ratingRelevantCandidates])\n print(\"ratingSD\", ratingSD)\n\n finalRatings = [x for x in ratingRelevantCandidates if (float(x[0]) < ratingMean + ratingSD)]#1.5 *\n finalRatings = [x for x in finalRatings if (float(x[0]) > ratingMean - ratingSD)]#.75 *\n\n finalRatingCandidatesWithWeight = []\n\n #Weight each candidate based on vote count, direct and actor popularity and matching score from part 1\n for candidate in finalRatings:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n voteCountPoints = int(candidate[1][1]['vote_count'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRatings]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints \\\n + PREDICTION_VOTECOUNT_WEIGHT * voteCountPoints\n\n finalRatingCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRatingCandidateWeights = np.sum([float(x[0]) for x in finalRatingCandidatesWithWeight])\n sumRatingTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRatingCandidatesWithWeight])\n\n ratingPrediction = float(sumRatingTimesCandidateWeight / sumRatingCandidateWeights)\n\n return ratingPrediction", "def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]", "def predict_with_threshold(y_pred_proba, threshold):\n\n y_pred = [1 if x >= threshold else 0 for x in y_pred_proba]\n return pd.Series(data=y_pred, name='y_pred')", "def thresholding(pred,label,thres):\n \n conf =[]\n \n for i in thres:\n \n pr_th,lab_th = (pred>i),(label>i)\n conf += confusion(pr_th,lab_th)\n \n return np.array(conf).reshape(-1,4)", "def evaluate(labels, predictions):\n positive_count = 0\n positive = 0\n negative_count = 0\n negative = 0\n for i in range(len(labels)):\n if labels[i] == 1:\n positive_count+=1\n if predictions[i] == 1:\n positive +=1\n else:\n negative_count+=1\n if predictions[i] == 0:\n negative +=1\n\n sensitivity = positive / positive_count\n specificity = negative / negative_count\n\n return (sensitivity, specificity)", "def evaluate(labels, predictions):\n pos = 0\n neg = 0\n true_pos_rate = 0\n true_neg_rate = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n pos += 1\n else:\n neg += 1\n if predictions[i] == labels[i]:\n if predictions[i] == 1:\n true_pos_rate += 1\n else:\n true_neg_rate += 1\n \n sensitivity = true_pos_rate / pos\n specificity = true_neg_rate / neg\n\n return (sensitivity, specificity)", "def accuracy(targets: List[int], preds: Union[List[float], List[List[float]]], \n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)", "def accuracy(targets: List[int],\n preds: Union[List[float], List[List[float]]],\n threshold: float = 0.5) -> float:\n if type(preds[0]) == list: # multiclass\n hard_preds = [p.index(max(p)) for p in preds]\n else:\n hard_preds = [1 if p > threshold else 0 for p in preds] # binary prediction\n\n return accuracy_score(targets, hard_preds)", "def decide(el, il, model, threshold):\n\n if model == 0:\n return el >= threshold[0] and il >=threshold[1]\n elif model == 1:\n return el >= threshold[0] or il >= threshold[1]\n elif model == 2:\n return harmonic_mean([el, il]) >= harmonic_mean(threshold)\n else:\n return bool(round(random.random()))", "def _performance(Classifier, features, labels, threshold):\n correct = 0\n for index, vector in enumerate(features):\n result = _minimal_predict(Classifier, vector, threshold)\n if result == \"Positive\" and labels[index] == 1.0 or result == \"Negative\" and \\\n labels[index] == 0.0 or result == \"Neutral\":\n correct += 1\n Classifier.performance = correct / len(labels) * 100\n return Classifier.performance", "def specificity(\n targets: List[int], preds: List[float], threshold: float = 0.5\n) -> float:\n hard_preds = [1 if p > threshold else 0 for p in preds]\n tn, fp, _, _ = confusion_matrix(targets, hard_preds).ravel()\n return tn / float(tn + fp)", "def evaluate(self, threshold=0.5):\n pass", "def predict(self, X):\n z = self.transform(X)\n pred = z < self.threshold\n return pred", "def ensembleVote(x, classes, ensemble):\n votes = np.array([0 for kk in range(len(classes))])\n for i in ensemble:\n votes = votes + classProbs(x, ensemble[i][\"tree\"], classes)\n maxVote = 0\n loc = None\n for ind, vote in enumerate(votes):\n if vote > maxVote:\n maxVote = vote\n loc = ind\n prediction = classes[loc]\n return prediction", "def evaluate(labels, predictions):\n correct_positive = 0\n correct_negative = 0\n total_positive = 0\n total_negative = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n total_positive += 1\n if predictions[i] == 1:\n correct_positive += 1\n else:\n total_negative += 1\n if predictions[i] == 0:\n correct_negative += 1\n\n sensitivity = correct_positive / total_positive\n specificity = correct_negative / total_negative\n\n return sensitivity, specificity", "def get_thresholdtable_from_fpr(scores,labels, fpr_list):\n threshold_list = []\n live_scores = []\n for score, label in zip(scores,labels):\n if label == 0:\n live_scores.append(float(score))\n live_scores.sort(reverse=True)\n live_nums = len(live_scores)\n for fpr in fpr_list:\n i_sample = int(fpr * live_nums)\n i_sample = max(1, i_sample)\n threshold_list.append(live_scores[i_sample - 1])\n return threshold_list", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def round_using_t(prediction, threshold):\n return (prediction >= threshold).astype('int')", "def preds_proba_to_preds_class(preds_proba,threshold):\n return [True if pred > threshold else False for pred in preds_proba]", "def evaluate(labels, predictions):\n\n truePositiveCounter = 0\n trueNegativeCounter = 0\n truePositiveCorrect = 0\n trueNegativeCorrect = 0\n \n sensitivity = 0\n specificity = 0\n\n for i in range(len(labels)):\n if labels[i] == 1:\n truePositiveCounter += 1\n if(labels[i] == predictions[i]):\n truePositiveCorrect += 1\n elif labels[i] == 0:\n trueNegativeCounter += 1\n if(labels[i] == predictions[i]):\n trueNegativeCorrect += 1\n\n sensitivity = truePositiveCorrect / truePositiveCounter\n specificity = trueNegativeCorrect / trueNegativeCounter\n\n return sensitivity, specificity", "def partial_match_score(\n truth: List[Rationale], pred: List[Rationale], thresholds: List[float]\n) -> List[PartialMatchScore]:\n\n ann_to_rat = _keyed_rationale_from_list(truth)\n pred_to_rat = _keyed_rationale_from_list(pred)\n\n num_classifications = {k: len(v) for k, v in pred_to_rat.items()}\n num_truth = {k: len(v) for k, v in ann_to_rat.items()}\n ious: Dict[str, Dict[str, float]] = defaultdict(dict)\n for k in set(ann_to_rat.keys()) | set(pred_to_rat.keys()):\n for p in pred_to_rat.get(k, []):\n best_iou = 0.0\n for t in ann_to_rat.get(k, []):\n num = len(\n set(range(p.start_token, p.end_token))\n & set(range(t.start_token, t.end_token))\n )\n denom = len(\n set(range(p.start_token, p.end_token))\n | set(range(t.start_token, t.end_token))\n )\n iou = 0 if denom == 0 else num / denom\n if iou > best_iou:\n best_iou = iou\n ious[k][p] = best_iou\n\n scores: List[PartialMatchScore] = []\n for threshold in thresholds:\n threshold_tps: Dict[str, float] = {}\n for k, vs in ious.items():\n threshold_tps[k] = sum(int(x >= threshold) for x in vs.values())\n micro_r = (\n sum(threshold_tps.values()) / sum(num_truth.values())\n if sum(num_truth.values()) > 0\n else 0\n )\n micro_p = (\n sum(threshold_tps.values()) / sum(num_classifications.values())\n if sum(num_classifications.values()) > 0\n else 0\n )\n micro_f1 = _f1(micro_r, micro_p)\n macro_rs = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0 for k, n in num_truth.items()\n )\n macro_ps = list(\n threshold_tps.get(k, 0.0) / n if n > 0 else 0\n for k, n in num_classifications.items()\n )\n macro_r = sum(macro_rs) / len(macro_rs) if len(macro_rs) > 0 else 0\n macro_p = sum(macro_ps) / len(macro_ps) if len(macro_ps) > 0 else 0\n macro_f1 = _f1(macro_r, macro_p)\n\n scores.append(\n PartialMatchScore(\n threshold=threshold,\n micro=InstanceScore(p=micro_p, r=micro_r, f1=micro_f1),\n macro=InstanceScore(p=macro_p, r=macro_r, f1=macro_f1),\n )\n )\n\n return scores", "def evaluateObjective(posts, threshold):\n partialSum = 0\n for post in posts:\n partialSum += max(np.sign(post[\"similarity\"] - threshold) * post[\"score\"], 0)\n return partialSum", "def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)", "def get_tpr_from_threshold(scores,labels, threshold_list):\n tpr_list = []\n hack_scores = []\n for score, label in zip(scores,labels):\n if label == 1:\n hack_scores.append(float(score))\n hack_scores.sort(reverse=True)\n hack_nums = len(hack_scores)\n for threshold in threshold_list:\n hack_index = 0\n while hack_index < hack_nums:\n if hack_scores[hack_index] <= threshold:\n break\n else:\n hack_index += 1\n if hack_nums != 0:\n tpr = hack_index * 1.0 / hack_nums\n else:\n tpr = 0\n tpr_list.append(tpr)\n return tpr_list", "def predict(self, X, threshold=0.5):\n\n return [int(self._predict(Xi) >= threshold) for Xi in X]", "def pred_from_prob(a,threshold):\n bin_preds = np.zeros((np.size(a,0),))\n bin_preds[np.where(a[:,1]>threshold)]=1.0\n return bin_preds", "def get_optimal_threshhold(true_label, prediction, iterations=100, size=17):\n best_threshhold = [0.2]*size\n for t in range(size):\n best_fbeta = 0\n temp_threshhold = [0.2]*size\n for i in range(iterations):\n temp_value = i / float(iterations)\n temp_threshhold[t] = temp_value\n temp_fbeta = fbeta(true_label, prediction > temp_threshhold)\n if temp_fbeta > best_fbeta:\n best_fbeta = temp_fbeta\n best_threshhold[t] = temp_value\n return best_threshhold", "def recall(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:\n hard_preds = [1 if p > threshold else 0 for p in preds]\n return recall_score(targets, hard_preds)", "def one_prediction(predictions, step_nb, threshold):\n number_sequences = step_nb//50\n total_prediction = 0\n for i in range(number_sequences):\n total_prediction += 1/predictions[i]\n return(total_prediction/step_nb)", "def scoring_function(self, model, y_true, y_predicted_probability):", "def build_predict(tf_prob, threshold=0.5):\n prediction = tf.cast(tf_prob, tf.float64)\n threshold = float(threshold)\n return tf.cast(tf.greater(prediction, threshold), tf.float32)", "def fpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[self.test_errors] >= threshold) / float(numpy.sum(self.test_errors))", "def f1_score_per_label(y_true, y_pred, threshold, eps=1e-9):\n \n y_pred = torch.ge(y_pred.float(), threshold).float()\n\n y_true = y_true.float()\n\n tp_l = (y_pred * y_true).sum(0).float()\n\n fp_l = (y_pred * (1 - y_true)).sum(0).float()\n\n fn_l = ((1 - y_pred) * y_true).sum(0).float()\n\n precision_label = tp_l.div(tp_l + fp_l + eps)\n\n recall_label = tp_l.div(tp_l + fn_l + eps)\n\n f1_label = (precision_label * recall_label).div(precision_label + recall_label + eps) * 2\n\n return f1_label, precision_label, recall_label", "def apply_thresholding(x):\n return x > threshold_otsu(x)", "def evaluate(labels, predictions):\n TP = 0\n actualP = 0\n TN = 0\n actualN = 0\n for label, prediction in zip(labels, predictions):\n if label ==1:\n actualP +=1\n if prediction == 1:\n TP +=1\n else:\n actualN +=1\n if prediction ==0:\n TN +=1\n \n sensitivity = float(TP/actualP)\n specificity = float(TN/actualN)\n return (sensitivity, specificity)", "def evaluate(labels, predictions):\n #labels and predictions\n truePos = 0\n trueNeg = 0\n for data in range(len(labels)):\n if((predictions[data] == 1) and (predictions[data] == labels[data])):\n truePos+=1\n elif((predictions[data] == 0) and (predictions[data] == labels[data])):\n trueNeg+=1\n sensitivity = truePos/(len(labels) + 1)\n specificity = trueNeg/(len(labels) + 1)\n return (sensitivity, specificity)\n \n\n #raise NotImplementedError", "def give_balanced_classes(reviews, votes, votes_threshold):\n if votes_threshold <= 0:\n print \"Needs positive threshold\"\n return\n\n negative_reviews_indices = []\n\n # Find all the funny reviews we can\n final_reviews = []\n final_labels = []\n for i, review in enumerate(reviews):\n if votes[i] >= votes_threshold:\n final_reviews.append(review)\n final_labels.append(1)\n elif votes[i] == 0:\n negative_reviews_indices.append(i)\n\n # We want balanced classes so take same number\n np.random.shuffle(negative_reviews_indices)\n num_positive_reviews = len(final_reviews)\n for i in range(num_positive_reviews):\n final_reviews.append(reviews[negative_reviews_indices[i]])\n final_labels.append(0)\n\n # Shuffle final reviews and labels\n combined_lists = zip(final_reviews, final_labels)\n np.random.shuffle(combined_lists)\n final_reviews[:], final_labels[:] = zip(*combined_lists)\n\n print \"Returning %d positive reviews and a total of %d reviews\" % (num_positive_reviews, len(final_reviews))\n\n return (final_reviews, final_labels)", "def precision_threshold(predictions, targets, threshold=0.7):\n number_of_examples_meeting_threshold = 0\n\n for pred, targ in zip(predictions, targets):\n total_positive_guesses = sum(pred)\n correct_positive_guesses = 0\n\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1:\n correct_positive_guesses += 1\n\n example_precision = correct_positive_guesses / total_positive_guesses\n if example_precision > threshold:\n number_of_examples_meeting_threshold += 1\n\n print(number_of_examples_meeting_threshold)\n examples_meeting_threshold_ratio = number_of_examples_meeting_threshold / len(predictions)\n print(examples_meeting_threshold_ratio)", "def result_poll(votes):\n return sum(votes) >= 2 / 3 * len(votes)", "def evaluation_detections(thresholds, bboxes_gt, bboxes_detected, num_instances):\r\n TP = np.zeros(len(thresholds), dtype=int)\r\n FP = np.zeros(len(thresholds), dtype=int)\r\n\r\n scores_detections = [[] for i in range(len(thresholds))]\r\n # scores_detections is pair of values [result, confidence] where result is true if the example is correctly\r\n # classified and confidence is the confidence of the prediction. It's used to compute the precision-recall\r\n # curve. Confidence score is random if the predicted scores do not belong to a detector.\r\n\r\n for key in bboxes_detected.keys():\r\n for bbox_noisy in bboxes_detected[key]:\r\n if key in bboxes_gt: # if we have detected stuff and it is in the gt\r\n scores = [bbox_iou(bbox_noisy[1:5], bbox[1:5]) for bbox in bboxes_gt[key]]\r\n max_score = max(scores)\r\n for i, threshold in enumerate(thresholds):\r\n if max_score > threshold:\r\n TP[i] += 1\r\n # we give correct boxes a slightly higher confidence score\r\n scores_detections[i].append([1, bbox_noisy[5]])\r\n else:\r\n FP[i] += 1\r\n scores_detections[i].append([0, bbox_noisy[5]])\r\n else: # if we have detected stuff and it is not in the gt\r\n for i, threshold in enumerate(thresholds):\r\n FP[i] += 1\r\n\r\n FN = num_instances - TP # number of instances not detected\r\n return TP, FP, FN, np.array(scores_detections)", "def perform_thresholding(f,M,type):\n if type == \"largest\":\n a = np.sort(np.ravel(abs(f)))[::-1] #sort a 1D copy of F in descending order\n T = a[M]\n y = f*(abs(f) > T)\n elif type == \"soft\":\n s = abs(f) - M\n s = (s + abs(s))/2\n y = np.sign(f)*s\n elif type == \"hard\":\n y = f*(abs(f) > M)\n return y", "def accuracy(targets: List[int], preds: List[float], threshold: float = 0.5) -> float:\n hard_preds = [1 if p > threshold else 0 for p in preds]\n return accuracy_score(targets, hard_preds)", "def _F_performance(self, score, thresholds, condition):\r\n if type(thresholds) is float:\r\n thresholds = self._compute_thresholds(thresholds)\r\n F = np.zeros(shape=(1, len(thresholds)))\r\n impostors = 0\r\n L = len(score)\r\n for count, thr in enumerate(thresholds):\r\n N = 0\r\n for idx in range(0, L):\r\n N += condition(score[idx], thr)\r\n F[0, count] = N / L\r\n return F[0]", "def build_predict_2(tf_prob, threshold=0.5):\n prediction = tf.cast(tf_prob, tf.float64)\n threshold = float(threshold)\n return tf.cast(tf.less_equal(prediction, threshold), tf.float32)", "def predStat(self,x,f):\n return f([tree.predict(x) for tree in self.forest])", "def precision(gt, pred, k):\n k = min(len(pred), k)\n den = min(len(gt), k)\n return sum([int(pred[i] in gt) for i in range(k)]) / den", "def evaluate(labels, predictions):\n\n # Positive and positive identified count\n pos = 0\n posid = 0\n\n # Negative and positive identified count\n neg = 0\n negid = 0\n\n for label, pred in zip(labels, predictions):\n if label == 1:\n pos += 1\n if pred == 1:\n posid += 1\n elif label == 0:\n neg += 1\n if pred == 0:\n negid += 1\n else:\n raise ValueError\n\n # `sensitivity` should be a floating-point value from 0 to 1\n # representing the \"true positive rate\": the proportion of\n # actual positive labels that were accurately identified.\n sens = float(posid / pos)\n\n # `specificity` should be a floating-point value from 0 to 1\n # representing the \"true negative rate\": the proportion of\n # actual negative labels that were accurately identified.\n spec = float(negid / neg)\n\n return (sens, spec)", "def find_TPR_threshold(y, scores, desired_TPR):\n for threshold in np.arange(1,0,-0.01):\n y_hat = (scores>=threshold).astype(int)\n confusion = confusion_matrix(y, y_hat)\n TN, FP, FN, TP = confusion.flat\n TPR = TP / (TP + FN)\n FPR = FP / (FP + TN)\n if TPR >= desired_TPR:\n return threshold, FPR", "def selectThreshold(yval, pval):\n bestEpsilon = 0\n bestF1 = 0\n F1 = 0\n\n stepsize = (pval.max()-pval.min())/1000\n for epsilon in np.arange(pval.min(), pval.max()+stepsize/2, stepsize):\n predictions = (pval < epsilon)\n tp = ((predictions == 1) & (yval == 1)).sum()\n fp = ((predictions == 1) & (yval == 0)).sum()\n fn = ((predictions == 0) & (yval == 1)).sum()\n prec = tp/(tp+fp)\n rec = tp/(tp+fn)\n F1 = 2*prec*rec/(prec+rec)\n\n if F1 > bestF1:\n bestF1 = F1\n bestEpsilon = epsilon\n\n return bestEpsilon, bestF1", "def _find_threshold(self, feature, y_train, num_class):\n assert len(num_class) == 2, \"This function only assumes work with binary classification.\"\n best_threshold = 0.0\n max_exact_classification = 0.0\n is_positive_negative = False\n sorted_feature = sorted(np.unique(feature))\n for i in range(len(sorted_feature)-1):\n # assume the value less than threshold is negative (0), greater than threshold is positive (1)\n threshold = (sorted_feature[i] + sorted_feature[i+1]) / 2\n left_partition = y_train[feature < threshold]\n right_partition = y_train[feature > threshold]\n negative_positive = ((len(left_partition[left_partition == 0]) + len(right_partition[right_partition == 1]))\n / len(feature))\n # assume the value less than threshold is positive (1), greater than threshold is negative. (0)\n positive_negative = ((len(left_partition[left_partition == 1]) + len(right_partition[right_partition == 0]))\n / len(feature))\n # make decision here\n is_positive_negative = positive_negative > negative_positive\n choose = positive_negative if is_positive_negative else negative_positive\n if max_exact_classification < choose:\n max_exact_classification = choose\n best_threshold = threshold\n return best_threshold, is_positive_negative", "def prediction_processing(predictions, labels, threshold, step_nb):\n new_labels = []\n new_predictions = []\n number_sequences = step_nb//50\n\n for k in range(len(labels)//number_sequences):\n total_prediction = 0\n isLabelTrue = labels[number_sequences*k]\n for i in range(number_sequences):\n total_prediction += (1/predictions[number_sequences*k+i])\n if not(isLabelTrue == (labels[number_sequences*k+i])):\n logger.error('Problem.')\n if total_prediction > threshold:\n total_prediction = False\n else:\n total_prediction = True\n new_labels.append(isLabelTrue)\n new_predictions.append(total_prediction)\n\n recall_1 = recall_score(new_labels, new_predictions)\n recall_0 = recall_score(new_labels, new_predictions, pos_label=0)\n precision_1 = precision_score(new_labels, new_predictions)\n precision_0 = precision_score(new_labels, new_predictions, pos_label=0)\n return((recall_1, recall_0, precision_1, precision_0), new_predictions, new_labels)", "def predict(data):\n engineered_set = feature_engineering(data)\n prediction = get_prediction(engineered_set, data)\n rounded_pred = round(prediction, 4)\n print(rounded_pred)\n if rounded_pred < 0.1:\n rounded_pred = 0.1\n\n return rounded_pred", "def predict(self, X):\n predictions = [c.predict(X) for c in self.classifiers]\n votes = np.zeros((len(X),self.label_count), dtype='i8')\n for row in xrange(len(X)):\n for model in xrange(self.model_count):\n for label in xrange(len(self.label_sets[model])):\n votes[row,self.label_sets[model][label]] += predictions[model][row][label]\n\n voters = np.zeros(self.label_count, dtype='i8')\n for label_set in self.label_sets:\n voters[label_set] += 1\n\n for row in xrange(len(X)):\n for label in xrange(self.label_count):\n votes[row, label] = int(float(votes[row, label]) / float(voters[label]) > 0.5)\n\n return votes", "def predict(self, x):\n return [1 if probability > self.decision_threshold else 0 for probability in self.predict_probs(x)]", "def fraction_exceeds(vector, threshold):\n return float(len(np.where(vector > threshold)[0])) / float(len(vector))", "def threshold(self, value):\r\n threshold = 0.5\r\n if value >= threshold:\r\n return 1\r\n else:\r\n return 0", "def prediction_prob(self, x, weights):\n _, probs = self.predict_probability(x, weights)\n preds = []\n for p in probs:\n if p>0.5: preds.append(1)\n else: preds.append(-1)\n return preds", "def predict(self, threshold=0.5):\n probabilities = self.probability_array()\n classes = np.zeros(self.N)\n classes[probabilities > threshold] = 1\n return classes", "def test_fitted_lof_predict():\n\n lof_detector = make_lof_detector(k=10)\n x_ref = np.random.randn(100, 2)\n lof_detector.infer_threshold(x_ref, 0.1)\n x = np.array([[0, 10], [0, 0.1]])\n\n y = lof_detector.predict(x)\n y = y['data']\n scores = lof_detector.score(x)\n assert np.all(y['instance_score'] == scores)\n assert y['instance_score'][0] > y['instance_score'][1]\n assert y['threshold_inferred']\n assert y['threshold'] is not None\n assert isinstance(y['threshold'], float)\n assert y['p_value'].all()\n assert (y['is_outlier'] == [True, False]).all()", "def predictRevenue(toPredict, candidateList,predictedRating):\n\n revenueRelevantCandidates = []\n\n #Remove candidates with revenue of 0 where there is not data on the revenue\n for candidate in candidateList:\n\n currentCandidate = candidate[1]\n\n #candidates rating should be in between predictedRating + 0.8 and predictedRating - 0.8\n if predictedRating - 0.8 <= currentCandidate['vote_avg'] <= predictedRating + 0.8:\n if int(currentCandidate['revenue_adj']) > 0 and int(currentCandidate['budget_adj']) > 0:\n revenueRelevantCandidates.append((float(currentCandidate['revenue_adj'])/float(currentCandidate['budget_adj']), candidate))\n else:\n revenueRelevantCandidates.append((0, candidate))\n\n #Calculate the mean and standard deviation of the candidates revenue\n revenueMean = np.mean([x[0] for x in revenueRelevantCandidates])\n revenueSD = np.std([x[0] for x in revenueRelevantCandidates])\n\n #print(revenueMean)\n #print(revenueSD)\n\n\n #Remove outliers from the candidates\n finalRevenues = [x for x in revenueRelevantCandidates if (x[0] != 0)]\n finalRevenues = [x for x in finalRevenues if (float(x[0]) < revenueMean + revenueSD)]\n finalRevenues = [x for x in finalRevenues if (float(x[0]) > revenueMean - revenueSD)]\n\n #TEST: SHOW ALL CANDIDATES WITH ROR\n #print(finalRevenues)\n\n\n #Calculate the weights for each of the remaining candidates\n finalRevenueCandidatesWithWeight = []\n\n for candidate in finalRevenues:\n directorPoints = compareDirectorPoints(toPredict['director'], candidate[1][1]['director'])\n actorPoints = compareActorPoints(toPredict['cast'], candidate[1][1]['cast'])\n matchPoints = candidate[1][0] / np.max([float(x[1][0]) for x in finalRevenues]) * 100\n candidateWeight = PREDICTION_MATCHPOINTS_WEIGHT * matchPoints \\\n + PREDICTION_ACTOR_WEIGHT * actorPoints \\\n + PREDICTION_DIRECTOR_WEIGHT * directorPoints\n finalRevenueCandidatesWithWeight.append((candidateWeight, candidate[0]))\n\n #Calculate the prediction\n sumRevenueCandidateWeights = np.sum([float(x[0]) for x in finalRevenueCandidatesWithWeight])\n sumRevenueTimesCandidateWeight = np.sum([float(x[0]) * float(x[1]) for x in finalRevenueCandidatesWithWeight])\n\n revenuePrediction = float(sumRevenueTimesCandidateWeight / sumRevenueCandidateWeights)\n\n return revenuePrediction", "def predict(parameters, X):\n\n\t# Computes probabilities using forward propagation, and classifies to 0/1 using 0.5 as the threshold.\n\tA2, cache = forward_propagation(X, parameters)\n\tpredictions = (A2 > 0.5) #[1 if i > 0.5 else 0 for i in A2]\n\n\treturn predictions", "def _ovr_decision_function(predictions, confidences, n_classes):\n n_samples = predictions.shape[0]\n votes = np.zeros((n_samples, n_classes))\n sum_of_confidences = np.zeros((n_samples, n_classes))\n\n k = 0\n for i in range(n_classes):\n for j in range(i + 1, n_classes):\n sum_of_confidences[:, i] -= confidences[:, k]\n sum_of_confidences[:, j] += confidences[:, k]\n votes[predictions[:, k] == 0, i] += 1\n votes[predictions[:, k] == 1, j] += 1\n k += 1\n\n # Monotonically transform the sum_of_confidences to (-1/3, 1/3)\n # and add it with votes. The monotonic transformation is\n # f: x -> x / (3 * (|x| + 1)), it uses 1/3 instead of 1/2\n # to ensure that we won't reach the limits and change vote order.\n # The motivation is to use confidence levels as a way to break ties in\n # the votes without switching any decision made based on a difference\n # of 1 vote.\n transformed_confidences = sum_of_confidences / (\n 3 * (np.abs(sum_of_confidences) + 1)\n )\n return votes + transformed_confidences", "def decision():\n return random.random() > 0.5", "def score_for_threshold(y, y_hat, score_func, threshold):\n y_rounded = np.where(y_hat >= threshold, 1, 0)\n return score_func(y, y_rounded)", "def cal_precision(preds, gts, tp_threshold=0.9, fp_threshold=0.1):\r\n TPs, tplabels = cal_TP(preds, gts)\r\n FPs, fplabels = cal_FP(preds, gts)\r\n \r\n correct_pred = set()\r\n correspond_pred = []\r\n # correspond_gt = []\r\n for i, (tp, label) in enumerate(zip(TPs, tplabels)):\r\n if ( fplabels[int(label)-1] == i+1\r\n and tp >= tp_threshold \r\n and label > 0 \r\n and FPs[int(label)-1] <= fp_threshold ):\r\n correct_pred.add(i+1)\r\n correspond_pred.append(label)\r\n else:\r\n correspond_pred.append(0.)\r\n # for _, (fp, label) in enumerate(zip(FPs, fplabels)):\r\n # if fp <= fp_threshold and label > 0 and TPs[int(label)-1] >= tp_threshold:\r\n # correct_pred.add(label)\r\n # correspond_gt.append(label)\r\n # else:\r\n # correspond_gt.append(0.)\r\n result = [[i, correspond_pred[i-1]] for i in correct_pred]\r\n\r\n return len(correct_pred) / len(TPs), result #list(correct_pred), list(correspond_pred)#, list(correspond_gt)\r", "def check_true(answer, label):\n if answer > 0.5 and label == 1:\n return 1\n elif answer < 0.5 and label == 0:\n return 1\n else:\n return 0", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def threshold_probs(probs):\n classes = np.ones(len(probs),)\n classes[probs < 0.5] = 0\n return classes", "def round_pred_at_threshold(squares_dict, threshold=THRESHOLD):\n for sq in squares_dict:\n predict = sq.predict\n if predict < threshold:\n sq.replace(pred_int = 0)\n else:\n sq.replace(pred_int = 1)\n return squares_dict", "def stats(detections, faces):\n vp, fp, fn, vn = 0, 0, 0, 0\n max_label = np.max(faces[:, 0])\n for i in range(max_label + 1):\n detections_i = get_label_with_index(detections, i)\n faces_i = get_label_with_index(faces, i)\n local_vp = 0\n for face in faces_i:\n found = False\n for detection in detections_i:\n if intersection_ratio(face, detection) >= 0.5:\n found = True\n break\n if found:\n vp += 1\n local_vp += 1\n else:\n fn += 1\n fp += len(detections_i) - local_vp\n\n precision = vp / (vp + fp)\n rappel = vp / (vp + fn)\n f_score = 2 * ((precision * rappel) / (precision + rappel))\n\n return precision, rappel, f_score", "def binary_accuracy(y_true, y_pred, threshold=0.5):\n assert y_true.shape == y_pred.shape\n y_pred_thresholded = (y_pred >= threshold).astype(float)\n correct_predictions = y_pred_thresholded == y_true \n return correct_predictions.mean()", "def nn_threshold_predict(X, nn, theta):\n tmp = theta.reshape((1, np.size(theta))) - nn.predict(X)\n pred = np.sum(tmp < 0, axis=1).astype(np.int)\n return np.array(pred)", "def constant_feature_detect(data, threshold=0.98):\n\n data_copy = data.copy(deep=True)\n quasi_constant_feature = []\n for feature in data_copy.columns:\n predominant = (\n (data_copy[feature].value_counts() / np.float(len(data_copy)))\n .sort_values(ascending=False)\n .values[0]\n )\n if predominant >= threshold:\n quasi_constant_feature.append(feature)\n print(len(quasi_constant_feature), \" variables are found to be almost constant\")\n return quasi_constant_feature", "def prediction():\r\n\r\n\r\n\tpredictVal = []\r\n\taccuracy = 0.0\r\n\r\n\t# Calculate accuracy for each class in testData\r\n\tfor item in testData:\r\n\t\tclass0Prediction = posProb / 100\r\n\t\tclass1Prediction = negProb / 100\r\n\t\t\r\n\t\t# Multiply the prior probablities for negative and positive reviews by their feature likelihoods \r\n\t\tfor word in item[2]:\r\n\t\t\tclass0Prediction *= class0Dict[word]\r\n\t\t\tclass1Prediction *= class1Dict[word]\r\n\r\n\t\t# Give every item in testData a predicted value\r\n\t\tif(class0Prediction > class1Prediction):\r\n\t\t\tpredictVal.append('0')\r\n\t\telse:\r\n\t\t\tpredictVal.append('1')\r\n\r\n\tfor i in range(len(testData)):\r\n\t\tif(testData[i][1] == predictVal[i]):\r\n\t\t\taccuracy += 1\r\n\r\n\t\t\t\r\n\taccuracy = 100 * (accuracy / len(testData))\r\n\treturn(predictVal, accuracy)", "def predict_boosting_example(x, h_ens):\r\n\r\n arr = []\r\n sum_alpha = 0\r\n\r\n for y in h_ens:\r\n # splitting hypothesis, weight pairs\r\n alpha, tree = h_ens[y]\r\n tst_pred = predict_example(x, tree)\r\n # appending prediction\r\n arr.append(tst_pred*alpha)\r\n sum_alpha += alpha\r\n predict_egz = np.sum(arr) / sum_alpha\r\n # weak learner\r\n if predict_egz >= 0.5:\r\n return 1\r\n else:\r\n return 0", "def infer_threshold(self, x: np.ndarray, fpr: float) -> None:\n self.backend.infer_threshold(self.backend._to_backend_dtype(x), fpr)", "def important_features_(self):\n return self.scores_ > self.score_cutoff_", "def evaluation(model_path, threshold):\n classifier = joblib.load(model_path)\n\n positive = np.load(\"./processed_data/validation/positive.npy\")\n unlabeled = np.load(\"./processed_data/validation/unlabeled.npy\")\n\n p_result = np.array(classifier.predict_proba(positive[:, :-1])[:, 1])\n plt.hist(p_result, bins=300)\n plt.show()\n\n tp_rate = np.where(p_result >= threshold, 1, 0).sum() / p_result.shape[0]\n print(tp_rate)\n\n u_result = np.array(classifier.predict_proba(unlabeled[:, :-1])[:, 1])\n plt.hist(u_result, bins=300)\n plt.show()\n\n\n # the following steps aim to filter 'possible' negative instances in the evaluation-unlabeled set\n stageone_classifier = joblib.load(\"./solver_result/liblinear/0.01/logistic.pkl\")\n stgone_result = np.array(stageone_classifier.predict_proba(unlabeled[:,:-1])[:, 1])\n possibly_negative = unlabeled[np.where(stgone_result <= _negative_threshold)]\n print(positive.shape)\n print(unlabeled.shape)\n print(possibly_negative.shape)\n possi_ng_result = np.array(classifier.predict_proba(possibly_negative[:, :-1])[:, 1])\n fp_rate = np.where(possi_ng_result >= threshold, 1, 0).sum() / possi_ng_result.shape[0]\n plt.hist(possi_ng_result, bins=300)\n plt.show()\n\n print(fp_rate)\n print(\"TP: \" + str(tp_rate) + \" FP: \" + str(fp_rate) + \" GMean: \" + str(math.sqrt(tp_rate * (1 - fp_rate))))", "def predict(self, X):\r\n return 1 if self.predict_prob(X) > 0.5 else 0", "def selectThreshold(yval, pval):\n\tbestEpsilon = 0\n\tbestF1 = 0\n\tstepsize = (np.max(pval) - np.min(pval)) / 1000\n\n\tfor epsilon in np.arange(np.min(pval), np.max(pval), stepsize):\n\t\tpredictions = (pval < epsilon) + 0\n\t\ttp = np.sum((yval == 1) & (predictions == 1))\n\t\tfp = np.sum((yval == 0) & (predictions == 1))\n\t\tfn = np.sum((yval == 1) & (predictions == 0))\n\t\tif tp + fp == 0:\n\t\t\tcontinue\n\t\tprec = float(tp) / (tp + fp) # tips: cast int to float, or you will get 0\n\t\trec = float(tp) / (tp + fn)\n\t\tF1 = 2.0 * prec * rec / (prec + rec)\n\t\tif F1 > bestF1:\n\t\t\tbestF1 = F1\n\t\t\tbestEpsilon = epsilon\n\treturn bestEpsilon, bestF1", "def get_preds_at_or_above_threshold(input_df,\n inferrer_list,\n threshold):\n if threshold == 0.:\n raise ValueError('The given threshold was 0. Please supply a '\n 'value between 0 (exclusive) and 1 (inclusive). A value '\n 'of zero will report every label for every protein.')\n predictions = np.mean([\n inferrer.get_activations(input_df.sequence.values.tolist())\n for inferrer in inferrer_list\n ],\n axis=0)\n cnn_label_vocab = inferrer_list[0].get_variable('label_vocab:0').astype(str)\n\n output_dict = {'sequence_name': [], 'predicted_label': [], 'confidence': []}\n\n for idx, protein_sparse in enumerate(predictions):\n protein = np.asarray(protein_sparse.todense())[0]\n proteins_above_threshold = protein >= threshold\n labels_predicted = cnn_label_vocab[proteins_above_threshold]\n for label, confidence in zip(labels_predicted,\n protein[proteins_above_threshold]):\n output_dict['sequence_name'].append(input_df.sequence_name.values[idx])\n output_dict['predicted_label'].append(label)\n output_dict['confidence'].append(confidence)\n\n return pd.DataFrame(output_dict)", "def weighted_majority_vote(c_pred,m_pred,f_pred,acc_c,acc_m,acc_f, dataset):\n c,m,f = np.argmax(c_pred),np.argmax(m_pred),np.argmax(f_pred)\n coarse = np.zeros(2)\n middle = np.zeros(4)\n fine = np.zeros(10)\n\n if dataset == 'cifar10':\n middle = np.zeros(5)\n coarse[c] = 1\n middle[m] = 1\n fine[f] = 1\n res = np.zeros(10)\n w1 = np.log(acc_c/(1.-acc_c))\n w2 = np.log(acc_m/(1.-acc_m))\n w3 = np.log(acc_f/(1.-acc_f))\n if dataset == 'cifar10':\n for i in range(10):\n if i <2:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 2<=i <4:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 4 <=i<6:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n elif 6<=i<8:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[4] + w3*fine[i]\n else :\n for i in range(10):\n if i <3:\n res[i] = w1*coarse[0] + w2*middle[0] + w3*fine[i]\n elif 3<=i <5:\n res[i] = w1*coarse[0] + w2*middle[1] + w3*fine[i]\n elif 5 <=i<8:\n res[i] = w1*coarse[1] + w2*middle[2] + w3*fine[i]\n else:\n res[i] = w1*coarse[1] + w2*middle[3] + w3*fine[i]\n index = np.argmax(res)\n return(index)", "def majority_vote(c_pred,m_pred,f_pred,dataset):\n c,m,f = np.argmax(c_pred),np.argmax(m_pred),np.argmax(f_pred)\n coarse = np.zeros(2)\n middle = np.zeros(4)\n fine = np.zeros(10)\n if dataset == 'cifar10':\n middle = np.zeros(5)\n coarse[c] = 1\n middle[m] = 1\n fine[f] = 1\n res = np.zeros(10)\n if dataset == 'cifar10':\n for i in range(10):\n if i <2:\n res[i] = coarse[0] + middle[0] + fine[i]\n elif 2<=i <4:\n res[i] = coarse[0] + middle[1] + fine[i]\n elif 4 <=i<6:\n res[i] = coarse[1] + middle[2] + fine[i]\n elif 6<=i<8:\n res[i] = coarse[1] + middle[3] + fine[i]\n else:\n res[i] = coarse[1] + middle[4] + fine[i]\n else :\n for i in range(10):\n if i <3:\n res[i] = coarse[0] + middle[0] + fine[i]\n elif 3<=i <5:\n res[i] = coarse[0] + middle[1] + fine[i]\n elif 5 <=i<8:\n res[i] = coarse[1] + middle[2] + fine[i]\n else:\n res[i] = coarse[1] + middle[3] + fine[i]\n index = np.argmax(res)\n return(index)", "def evaluate(inputs, labels):\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=0)\n return np.mean(preds == trues)", "def evaluateVoteCount(toCompare):\n\n #weight = 0\n\n if int(toCompare['vote_count']) >= 5000:\n weight = 100\n elif 3000 <= int(toCompare['vote_count']) < 5000:\n weight = 80\n elif 2000 <= int(toCompare['vote_count']) < 3000:\n weight = 60\n elif 1000 <= int(toCompare['vote_count']) < 2000:\n weight = 40\n elif 500 <= int(toCompare['vote_count']) < 1000:\n weight = 20\n else:\n weight = 0\n return weight", "def evaluate(labels, predictions):\n # create 4 variables to represent sensitivity,specificity,total_positive values & total_negative values.\n sensitivity = float(0)\n specificity = float(0)\n\n total_positive = float(0)\n total_negative = float(0)\n\n # run through a for loop to evaluate the sensitivity and specificity of a data set\n for label, prediction in zip(labels, predictions):\n\n if label == 1:\n total_positive += 1\n if prediction == label:\n sensitivity += 1\n\n if label == 0:\n total_negative += 1\n if prediction == label:\n specificity += 1\n\n # data normalization\n sensitivity /= total_positive\n specificity /= total_negative\n\n return sensitivity, specificity", "def gp_optimize_threshold(gp_model, X_val, y_val, X_scaler, y_scaler, optimize_for=\"profits\"): \n y_hat, conf = gp_model.predict(X_val)\n regressed_payment = y_scaler.inverse_transform(y_hat).reshape(-1)\n loan_amt = X_scaler.inverse_transform(X_val)[:,0]\n\n # This ratio is a guage of how likely a person will pay back.\n # It is compared with a threshold to determine whether or not to loan.\n payment_to_loan_ratio = regressed_payment / loan_amt\n\n # Sort in descending order\n sorted_ind = np.argsort(-payment_to_loan_ratio)\n sorted_payment_to_loan_ratio = payment_to_loan_ratio[sorted_ind]\n X_sorted, y_sorted = X_val[sorted_ind,:], y_val[sorted_ind]\n\n threshold, highest_opt_val = 0, 0\n for i, thresh in enumerate(sorted_payment_to_loan_ratio): \n X_loanee = X_sorted[:i+1,:]\n y_loanee = y_sorted[:i+1]\n \n loan_amt_loanee = np.sum(X_scaler.inverse_transform(X_loanee)[:,0])\n payments_loanee = np.sum(y_loanee)\n\n # Optimize for different values\n if optimize_for == \"profits\":\n opt_val = payments_loanee - loan_amt_loanee\n elif optimize_for == \"profit_percentage\":\n opt_val = (payments_loanee - loan_amt_loanee) / loan_amt_loanee\n else:\n raise Exception(\"Illegal optimize_for value: %s\" % optimize_for)\n\n # Keep track of highest value (that is being optimized for)\n if opt_val > highest_opt_val:\n threshold = thresh\n highest_opt_val = opt_val\n return threshold", "def evaluate(inputs, labels):\n # Your code here.\n _, probs = forward(inputs)\n preds = predict(probs)\n trues = np.argmax(labels, axis=1)\n return np.mean(preds == trues)", "def precision(qtd_true_positives, list_of_true_negative_documents, ref=0):\n\n\tfp = 0\n\tfor d in list_of_true_negative_documents:\n\t\tif d.predicted_polarity > ref:\n\t\t\tfp = fp + 1\n\tqtd_true_positives = decimal.Decimal(qtd_true_positives)\n\tfp = decimal.Decimal(fp)\n\treturn (qtd_true_positives / (qtd_true_positives + fp))", "def predict(self, reviews):\n self.vect_rev = self.vectorizer.transform(reviews)\n self.dmat = xgb.DMatrix(self.vect_rev)\n self.probs = self.bst.predict(self.dmat)\n\n # Get tough on reviews by requiring 0.6 probability threshold\n self.preds = 1 * (self.probs > 0.6)", "def accuracy(pred, labels):\n pred = torch.sigmoid(pred)\n predicted = (pred > 0.5).int()\n correct = (predicted == labels).sum().item()\n return correct / labels.shape[0]", "def OF1_CalculateThresholdValues(param_list, classNum):\n thresholdValues = [(-1., -1.) for _ in range(classNum-1)] # np.arange(classNum - 1)\n #numRow = sp.math.factorial(classNum-1)\n #numCol = classNum-1\n #thresholdValues = np.arange(numCol*numRow).reshape(numRow, numCol)\n indexOrder = np.argsort(param_list[classNum:classNum * 2])\n\n P = [param_list[indexOrder[i]] for i in range(classNum)]\n my = np.sort(param_list[classNum:classNum * 2])\n sigma = [param_list[classNum * 2 + indexOrder[i]] for i in range(classNum)]\n\n for i in range(classNum - 1):\n a = sigma[i] ** 2 - sigma[i + 1] ** 2\n b = 2 * ( my[i] * ( sigma[i + 1] ** 2 ) - my[i + 1] * ( sigma[i] ** 2 ) )\n c = ( sigma[i] * my[i + 1] ) ** 2 - ( sigma[i + 1] * my[i] ) ** 2 + 2 * ( ( sigma[i] * sigma[i + 1] ) ** 2 ) * math.log(( ( sigma[i + 1] * P[i] ) / ( sigma[i] * P[i + 1] ) ))\n\n p = np.poly1d([a, b, c], False, \"T\")\n p_roots = np.roots(p)\n\n if p_roots.size == 1:\n thresholdValues[i] = (np.real(p_roots[0]), -1)\n else:\n r1 = np.real(p_roots[0])\n r2 = np.real(p_roots[1])\n if (r1 == r2) or (r2 < 0.) or (r2 > 255.):\n thresholdValues[i] = (r1, -1)\n elif (r1 < 0) or (r1 > 255):\n thresholdValues[i] = (r2, -1)\n else:\n thresholdValues[i] = (r1, r2)\n #r1 = np.amin(p_roots)\n #r2 = np.amax(p_roots)\n #if i > 0:\n #if r1 >= thresholdValues[i-1]:\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n #else:\n #if (r1 >= my[i]) and (r1 < my[i+1]):\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n\n return thresholdValues", "def prediction(self, x, weights):\n scores, _ = self.predict_probability(x, weights)\n preds = []\n for s in scores:\n if s>0: preds.append(1)\n else: preds.append(-1)\n return preds", "def predict(self, testing_set):\r\n # Run prediction by multiply inputs with the weight and map it\r\n # Through the activation function\r\n final_prob = 0\r\n probability = self.activation(self.weighted_sum(testing_set))\r\n prediction = self.threshold(probability)\r\n if prediction == 1:\r\n final_prob = probability\r\n else:\r\n final_prob = 1 - probability\r\n return [prediction, final_prob]", "def eval_det_cls(pred, gt, ovthresh=0.25, use_07_metric=False):\n\n # construct gt objects\n class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}}\n npos = 0\n for img_id in gt.keys():\n bbox = np.array(gt[img_id])\n det = [False] * len(bbox)\n npos += len(bbox)\n class_recs[img_id] = {'bbox': bbox, 'det': det}\n # pad empty list to all other imgids\n for img_id in pred.keys():\n if img_id not in gt:\n class_recs[img_id] = {'bbox': np.array([]), 'det': []}\n\n # construct dets\n image_ids = []\n confidence = []\n BB = []\n for img_id in pred.keys():\n for box,score in pred[img_id]:\n image_ids.append(img_id)\n confidence.append(score)\n BB.append(box)\n confidence = np.array(confidence)\n BB = np.array(BB) # (nd,4 or 8,3)\n\n # sort by confidence\n sorted_ind = np.argsort(-confidence)\n sorted_scores = np.sort(-confidence)\n BB = BB[sorted_ind, ...]\n image_ids = [image_ids[x] for x in sorted_ind]\n\n # go down dets and mark TPs and FPs\n nd = len(image_ids)\n tp = np.zeros(nd)\n fp = np.zeros(nd)\n for d in range(nd):\n if d%100==0: \n print(d)\n R = class_recs[image_ids[d]]\n bb = BB[d,:].astype(float)\n ovmax = -np.inf\n BBGT = R['bbox'].astype(float)\n\n if BBGT.size > 0:\n # compute overlaps\n for j in range(BBGT.shape[0]):\n iou = get_iou(bb, BBGT[j,...]) \n if iou > ovmax:\n ovmax = iou\n jmax = j\n\n #print d, ovmax\n if ovmax > ovthresh:\n if not R['det'][jmax]:\n tp[d] = 1.\n R['det'][jmax] = 1\n else:\n fp[d] = 1.\n else:\n fp[d] = 1.\n\n # compute precision recall\n fp = np.cumsum(fp)\n tp = np.cumsum(tp)\n rec = tp / float(npos)\n print('NPOS: ', npos)\n print('ND:', nd)\n # avoid divide by zero in case the first detection matches a difficult\n # ground truth\n prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)\n ap = voc_ap(rec, prec, use_07_metric)\n\n return rec, prec, ap", "def cal_thresh(pred_prob,labels):\n mu_stds = []\n for i in range(19):\n pos_mu, pos_std = fit(pred_prob[labels==i, i])\n mu_stds.append([pos_mu, pos_std])\n return mu_stds", "def Vote(i, j, budget, count):\r\n if(count < budget):\r\n if(random.uniform(0, i+j) < i):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if(random.uniform(0, 1) < 0.5):\r\n return True\r\n else:\r\n return False" ]
[ "0.6548085", "0.6381974", "0.6354342", "0.6264731", "0.6184546", "0.6126238", "0.61081564", "0.60839015", "0.6067138", "0.60335726", "0.5974422", "0.59717226", "0.5970143", "0.59656197", "0.5960877", "0.5954389", "0.59533507", "0.5947933", "0.5928161", "0.5925397", "0.59252214", "0.5909234", "0.59064037", "0.5898637", "0.58979887", "0.5890211", "0.5889917", "0.5866966", "0.58560133", "0.58470017", "0.5832793", "0.5828154", "0.58233494", "0.58211195", "0.5819072", "0.5800164", "0.5799013", "0.57949966", "0.5787278", "0.5765179", "0.57632023", "0.57396936", "0.5737328", "0.57372797", "0.57275957", "0.57163954", "0.5708641", "0.56948245", "0.56918925", "0.5682091", "0.56812435", "0.5655527", "0.5652588", "0.56309557", "0.56299776", "0.5627217", "0.56154907", "0.56146294", "0.5609561", "0.56050164", "0.5597008", "0.55808103", "0.5559725", "0.55584776", "0.555642", "0.5552554", "0.555227", "0.55482405", "0.5548062", "0.5531639", "0.55314696", "0.5526291", "0.55210793", "0.55074334", "0.54856735", "0.5471689", "0.5456503", "0.5454799", "0.5446557", "0.5440856", "0.54407257", "0.543288", "0.5425297", "0.5413037", "0.5407989", "0.54013497", "0.54008114", "0.5396936", "0.5396704", "0.5396455", "0.53951436", "0.5390976", "0.5386233", "0.53830194", "0.5379765", "0.5378986", "0.5377274", "0.537682", "0.5374473", "0.537361" ]
0.7472907
0
Takes dictionaries of predicted and ground truth and returns confusion matrix
def confusion_matrix(predicted, gt): tp = [k for k in predicted if predicted[k] and gt[k]] tn = [k for k in predicted if not predicted[k] and not gt[k]] fp = [k for k in predicted if predicted[k] and not gt[k]] fn = [k for k in predicted if not predicted [k] and gt[k]] return tp, tn, fp, fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_confusion_matrix_intersection_mats(groundtruth, predicted):\n\n confusion_matrix_arrs = {}\n\n groundtruth_inverse = np.logical_not(groundtruth)\n predicted_inverse = np.logical_not(predicted)\n\n confusion_matrix_arrs['tp'] = np.logical_and(groundtruth, predicted)\n confusion_matrix_arrs['tn'] = np.logical_and(groundtruth, predicted_inverse)\n confusion_matrix_arrs['fp'] = np.logical_and(groundtruth_inverse, predicted)\n confusion_matrix_arrs['fn'] = np.logical_and(groundtruth, predicted_inverse)\n\n return confusion_matrix_arrs", "def get_confusion_matrix(y_true, y_pred):\r\n\r\n ## 3 classes\r\n TP1, TP2, TP3, FP1, FP2, FP3, TN1, TN2, TN3, FN1, FN2, FN3 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 0 and y_pred[i] == 0:\r\n TN1 += 1\r\n elif y_true[i] == 0 and y_pred[i] != 0:\r\n FP1 += 1\r\n elif y_true[i] != 0 and y_pred[i] == 0:\r\n FN1 += 1\r\n elif y_true[i] != 0 and y_pred[i] != 0:\r\n TP1 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 1 and y_pred[i] == 1:\r\n TN2 += 1\r\n elif y_true[i] == 1 and y_pred[i] != 1:\r\n FP2 += 1\r\n elif y_true[i] != 1 and y_pred[i] == 1:\r\n FN2 += 1\r\n elif y_true[i] != 1 and y_pred[i] != 1:\r\n TP2 += 1\r\n\r\n for i in range(y_true.shape[0]):\r\n if y_true[i] == 2 and y_pred[i] == 2:\r\n TN3 += 1\r\n elif y_true[i] == 2 and y_pred[i] != 2:\r\n FP3 += 1\r\n elif y_true[i] != 2 and y_pred[i] == 2:\r\n FN3 += 1\r\n elif y_true[i] != 2 and y_pred[i] != 2:\r\n TP3 += 1\r\n\r\n conf_matrix1 = [\r\n [TP1, FP1],\r\n [FN1, TN1]\r\n ]\r\n conf_matrix2 = [\r\n [TP2, FP2],\r\n [FN2, TN2]\r\n ]\r\n conf_matrix3 = [\r\n [TP3, FP3],\r\n [FN3, TN3]\r\n ]\r\n\r\n return conf_matrix1, conf_matrix2, conf_matrix3", "def confusion_matrix(self,predictions,labels):\n TP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == True))\n FP = np.sum((np.round(predictions) == True) * (np.asarray(labels, dtype=bool) == False))\n FN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == True))\n TN = np.sum((np.round(predictions) == False) * (np.asarray(labels, dtype=bool) == False))\n\n return np.array([[TP,FP],[FN,TN]])", "def confusionMatrix(actual, predict, truePositiveClass=''):\n classes = list(set(actual + predict))\n if len(truePositiveClass) > 0:\n id0 = classes.index(truePositiveClass)\n classes[id0] = classes[0]\n classes[0] = truePositiveClass\n cMatrix = np.zeros( (len(classes), len(classes)) )\n\n for i in range(0,len(predict)):\n ida = classes.index(actual[i])\n idp = classes.index(predict[i])\n cMatrix[ida][idp] += 1\n return cMatrix", "def confusion_matrix(classifier_output, true_labels):\n\n # TODO: finish this.\n true_pos = 0.0\n true_neg = 0.0\n false_neg = 0.0\n false_pos = 0.0\n for elem1,elem2 in zip(classifier_output, true_labels):\n if(elem1==elem2) and (elem1==1):\n true_pos += 1\n elif(elem1==elem2) and (elem2!=1):\n true_neg += 1\n elif(elem1 != 1):\n false_neg +=1\n else:\n false_pos +=1\n conf_matrix = np.array([[true_pos, false_neg],[false_pos, true_neg]])\n return conf_matrix", "def get_confmatrix(self,y_pred,y_test):", "def confusion_matrix(actual: list, predicted: list) -> list:\n return confusion_matrix(actual, predicted)", "def Confusion_Matrix(predicted_labels: list, actual_labels: list):\n labels = set(actual_labels)\n\n predicted_labels = list(map(custom_round, predicted_labels))\n\n matrix = pd.DataFrame(index=labels, columns=labels)\n\n matrix = matrix.fillna(0)\n\n for i in range(len(actual_labels)):\n matrix[actual_labels[i]][predicted_labels[i]] += 1\n m = matrix.values\n\n plt.matshow(m, cmap=plt.cm.Blues)\n\n for i in range(2):\n for j in range(2):\n c = m[j, i]\n plt.text(i, j, str(c), va='center', ha='center')\n\n plt.show()", "def custom_confusion_matrix(predictions, targets):\n tp, fp, fn, tn = [], [], [], []\n\n for pred, targ in zip(predictions, targets):\n for shift_pred, shift_targ in zip(pred, targ):\n if shift_pred == 1 and shift_targ == 1: # True positive\n tp.append(1)\n elif shift_pred == 1 and shift_targ == 0: # False positive\n fp.append(1)\n elif shift_pred == 0 and shift_targ == 1: # False negative\n fn.append(1)\n elif shift_pred == 0 and shift_targ == 0: # True negative:\n tn.append(1)\n\n tp_count = len(tp)\n fp_count = len(fp)\n fn_count = len(fn)\n tn_count = len(tn)\n\n conf_matrix = np.array([\n [tp_count, fp_count],\n [fn_count, tn_count]\n ])\n\n return conf_matrix", "def confusion_matrix(links_true, links_pred, total=None):\n\n links_true = _get_multiindex(links_true)\n links_pred = _get_multiindex(links_pred)\n\n tp = true_positives(links_true, links_pred)\n fp = false_positives(links_true, links_pred)\n fn = false_negatives(links_true, links_pred)\n\n if total is None:\n tn = numpy.nan\n else:\n if isinstance(total, pandas.MultiIndex):\n total = len(total)\n tn = true_negatives(links_true, links_pred, total)\n\n return numpy.array([[tp, fn], [fp, tn]])", "def confusion_matrix(expected, predicted):\n\n retval = numpy.zeros((10,10), dtype=float)\n\n for k in range(10):\n pred_k = predicted[expected==k] # predictions that are supposed to be 'k'\n retval[:,k] = numpy.array([len(pred_k[pred_k==p]) for p in range(10)])\n retval[:,k] /= len(pred_k)\n\n return retval", "def confusion_matrix(y_true, y_pred, labels):\n\n #Define variables\n matrix = []\n #Creates matrix dimensions\n for i in range(len(labels)):\n matrix.append([])\n for j in range(len(labels)):\n matrix[i].append(0)\n\n for i in range(len(y_true)):\n trueIndex = -1\n predIndex = -1\n #Get indexes of true and predicted values\n for j, label in enumerate(labels):\n if(label == y_true[i]):\n trueIndex = j\n if(label == y_pred[i]):\n predIndex = j\n matrix[trueIndex][predIndex] = matrix[trueIndex][predIndex] + 1\n\n return matrix", "def getConfusionMatrix(pred, real):\n # print pd.crosstab(pred, real) \n \n total = float(real.shape[0])\n \n tp = 0 # true positive\n tn = 0 # true negitive\n fp = 0 # false positive\n fn = 0 # false negitive\n for predicted, actual in zip(pred, real):\n if predicted == actual:\n if predicted == 1:\n tp += 1\n else:\n tn += 1\n else:\n if predicted == 1:\n fp += 1\n else:\n fn += 1\n \n\n print \"(tp, tn, fp, fn):\" , tp, tn, fp, fn\n print \"accuracy is :\", (tp+tn)/total", "def Evaluate_Prediction(prediction_mask, true_mask, feature_dict, \n test_name = 'Test'):\n \n # true_mask has 3 layers but they are redundant\n true_mask = true_mask[:,:,0]\n \n # Convert from Prob to 0,1,2...\n prediction_mask = prediction_mask.argmax(axis = 2) + 1 \n\n # Compute confusion matrix -- subtract 1 so that first label is \"0\" \n conf = custom_confusion_matrix(prediction_mask.flatten(), true_mask.flatten(), feature_dict)\n \n # Convert mask to proper shape for loss function - shape should have 4 dimensions with one-hot encoding\n true_mask = Expand_Mask(mask = true_mask, num_class = len(feature_dict)) ## to 0,1\n true_mask = np.expand_dims(true_mask, axis=0)\n true_mask = true_mask.astype(np.float)\n\n # Convert prediction into proper shape for loss function\n prediction_mask = Expand_Mask(mask = prediction_mask, num_class = len(feature_dict)) #to 0,1\n prediction_mask = np.expand_dims(prediction_mask, axis=0) \n prediction_mask = prediction_mask.astype(np.float)\n \n score = {'Test':test_name, \n 'Dice':Dice_Coef_Multilabel(true_mask, prediction_mask).numpy(), \n 'Accuracy':np.mean(tf.metrics.categorical_accuracy(true_mask, prediction_mask)), \n 'CE':np.mean(tf.metrics.categorical_crossentropy(true_mask, prediction_mask))}\n \n return [score, conf]", "def custom_confusion_matrix(prediction_vector, true_vector, feature_dict ):\n \n values = list(feature_dict.keys())\n values.sort()\n nvals = len(values)\n confusion_matrix = np.zeros((nvals, nvals))\n for i in range(len(values)):\n for j in range(len(values)):\n mask = (true_vector==values[i]) & (prediction_vector==values[j]) \n confusion_matrix[i,j] = mask.sum()\n \n return confusion_matrix", "def confusion_matrix(\n true_labels,\n predicted_labels\n ) -> np.array:\n n_samples_true, n_samples_predicted = len(true_labels), len(predicted_labels)\n if n_samples_true != n_samples_predicted:\n raise ValueError()\n n_classes = len(set(true_labels))\n matrix = np.zeros((n_classes,n_classes))\n for i in range(len(true_labels)):\n true_label = true_labels[i]\n predicted_label = predicted_labels[i]\n matrix[predicted_label][true_label] += 1\n return matrix", "def confusion_matrix(predict, labels, num_classes):\n # Compute the count of correct and error samples in each snr.\n conf = np.zeros([num_classes, num_classes])\n for i in range(0, len(labels)):\n j = labels[i]\n k = np.argmax(predict[i])\n conf[j, k] = conf[j, k] + 1\n\n # Compute the count of correct and error ratio in each snr.\n # =====confusion matrix=====.\n conf_norm = np.zeros([num_classes, num_classes])\n for i in range(0, num_classes):\n conf_norm[i, :] = conf[i, :] / np.sum(conf[i, :])\n\n return conf_norm", "def confusion_matrix(self, y_true=None, y_pred=None, labels=None, normalize=None, **kwargs):\n y_true, y_pred, binary, representor, decimal = self.get_processed_data(y_true, y_pred, decimal=None)\n matrix, imap, imap_count = cu.calculate_confusion_matrix(y_true, y_pred, labels, normalize)\n return matrix, imap, imap_count", "def confusionMatrix(testDataPredictions, testDataOriginal):\n matrix = {\"predicted >50K correctly as >50K\": 0, \"predicted >50K incorrectly as <=50K\": 0,\n \"predicted <=50K correctly as <=50K\": 0, \"predicted <=50K incorrectly as >50K\": 0}\n\n for instance in range(len(testDataPredictions)):\n prediction = testDataPredictions[instance]\n original = testDataOriginal[14].iloc[instance]\n\n #calculating total number of TP,TN,FP and FN\n\n if prediction == 1.0 and original == 1.0:\n matrix[\"predicted >50K correctly as >50K\"] += 1.00\n elif prediction == 0.0 and original == 1.0:\n matrix[\"predicted >50K incorrectly as <=50K\"] += 1.00\n elif prediction == 0.0 and original == 0.0:\n matrix[\"predicted <=50K correctly as <=50K\"] += 1.00\n elif prediction == 1.0 and original == 0.0:\n matrix[\"predicted <=50K incorrectly as >50K\"] += 1.00\n\n #Making the confusion matrix look readable on console printing\n print('----------------')\n print('CONFUSION MATRIX')\n print( 'TP: ', matrix[\"predicted >50K correctly as >50K\"], '||', 'FP: ', matrix[\"predicted >50K incorrectly as <=50K\"])\n print('----------------')\n print('FN: ', matrix[\"predicted <=50K incorrectly as >50K\"], '||', 'TN: ', matrix[\"predicted <=50K correctly as <=50K\"])\n\n # definition of sensitivity, precision and specificity formulas\n sensitivity = matrix[\"predicted >50K correctly as >50K\"] / (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted <=50K incorrectly as >50K\"])\n\n precision = matrix[\"predicted >50K correctly as >50K\"]/ (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n specificity = matrix[\"predicted <=50K correctly as <=50K\"] / (\n matrix[\"predicted <=50K correctly as <=50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n print('Precision: ' + str(precision*100) + '%')\n print('Sensitivity: '+ str(sensitivity*100)+ '%')\n print('Specificity: '+ str(specificity*100) +'%')\n\n return matrix, precision, sensitivity, specificity", "def make_metrics(self, predictions):\n\n pred_idx = []\n pred_classes = []\n\n target_idx = []\n target_classes = []\n target_count = len(self._dataset.class_idx2text)\n\n for data_id, pred in predictions.items():\n target = self._dataset.get_ground_truth(data_id)\n\n pred_idx.append(pred[\"class_idx\"])\n pred_classes.append(self._dataset.class_idx2text[pred[\"class_idx\"]])\n\n target_idx.append(target[\"class_idx\"])\n target_classes.append(target[\"class_text\"])\n\n metrics = {\n \"accuracy\": simple_accuracy(pred_idx, target_idx),\n }\n\n if target_count == 2:\n # binary class\n f1_metric = f1(pred_idx, target_idx)\n metrics.update(f1_metric)\n\n matthews_corr_metric = matthews_corr(pred_idx, target_idx)\n metrics.update(matthews_corr_metric)\n return metrics", "def confusion_matrix_(y_true, y_pred, labels=None):\r\n tp = 0\r\n tn = 0\r\n fp = 0\r\n fn = 0\r\n if labels == None:\r\n values = list(set(y_true))\r\n else:\r\n values = labels\r\n if (len(values)) != 2:\r\n return None\r\n for i, elem in enumerate(y_true):\r\n if y_pred[i] == values[1] and y_true[i] == y_pred[i]:\r\n tp += 1\r\n elif y_pred[i] == values[1] and y_true[i] != y_pred[i]:\r\n fp += 1\r\n elif y_pred[i] == values[0] and y_true[i] == y_pred[i]:\r\n tn += 1\r\n elif y_pred[i] == values[0] and y_true[i] != y_pred[i]:\r\n fn += 1\r\n matrix = np.array([[tp, fp], [fn, tn]])\r\n return matrix", "def confusion_matrix(y_true, y_pred, labels):\r\n matrix = []\r\n\r\n for i, yt in enumerate(labels):\r\n matrix.append([])\r\n for _, yp in enumerate(labels):\r\n matrix[i].append(0)\r\n\r\n for t, p in zip(y_true, y_pred):\r\n t_num = labels.index(t)\r\n p_num = labels.index(p)\r\n matrix[t_num][p_num] += 1\r\n\r\n return matrix", "def confusion_matrix(y_true, y_pred, table_show=True):\n\tFIRST_CLASS = 1\n\tSECOND_CLASS = 0\n\n\tzipped = np.array(list(zip(y_true, y_pred)))\n\ttp, fn, fp, tn = 0, 0, 0, 0\n\n\tfor y_true, y_pred in zipped:\n\t\tif y_true == y_pred and y_true == FIRST_CLASS:\n\t\t\ttp += 1\n\t\telif y_true == y_pred and y_true == SECOND_CLASS:\n\t\t\ttn += 1\n\t\telif y_true != y_pred and y_true == SECOND_CLASS:\n\t\t\tfp += 1\n\t\telse:\n\t\t\tfn += 1\n\n\tif table_show:\n\t\treturn np.array([tp, fn, fp, tn]).reshape([2,2])\n\n\treturn tp, fn, fp, tn", "def confusion_matrix(df):\n rows, true_counts = np.unique(df[\"label\"].values, return_counts=True)\n cols, predicted_counts = np.unique(df[\"label\"].values, return_counts=True)\n\n matrix = np.ndarray(shape=(len(rows), len(cols)), dtype=float)\n for ri, row in enumerate(rows):\n for ci, col in enumerate(cols):\n matrix[ri][ci] = len(df[(df.label == row) & (df.classification == col)])\n\n return matrix, rows, cols", "def confusion_matrix(gt, pred) -> np.ndarray:\n \n # Number of classes inferred from gt. Assuming classes are enumerated 0 ..\n n_classes = gt.max() + 1\n cm = np.zeros((n_classes, n_classes), dtype=np.uint32)\n \n # Fill matrix\n for gt_class in range(n_classes):\n for pred_class in range(n_classes):\n cm[pred_class, gt_class] = ((pred == pred_class) & (gt == gt_class)).sum()\n \n return cm", "def confusion_matrix_pd(Y_true, Y_pred):\n Y_true = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_true, axis=1)])\n Y_pred = pd.Series([ACTIVITIES[y] for y in np.argmax(Y_pred, axis=1)])\n return pd.crosstab(Y_true, Y_pred, rownames=['True'], colnames=['Pred'])", "def confusion_matrix(y_true, y_pred):\n skplt.plot_confusion_matrix(y_true, y_pred, normalize=True)\n plt.show()", "def _prep_confusion_matrix(self, y_test, y_pred, labels):\n\n # Calculate confusion matrix and flatten it to a simple array\n if len(y_test.shape) == 1:\n confusion_array = metrics.confusion_matrix(y_test, y_pred).ravel()\n\n # Structure into a DataFrame suitable for Qlik\n result = []\n i = 0\n for t in labels:\n for p in labels:\n result.append([str(t), str(p), confusion_array[i]])\n i = i + 1\n self.model.confusion_matrix = pd.DataFrame(result, columns=[\"true_label\", \"pred_label\", \"count\"])\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)\n # Handle confusion matrix format for multi-label classification\n else:\n confusion_array = metrics.multilabel_confusion_matrix(y_test, y_pred)\n result = pd.DataFrame(confusion_array.reshape(-1, 4), columns=[\"true_negative\", \"false_positive\", \"false_negative\", \"true_positive\"])\n self.model.confusion_matrix = pd.DataFrame(np.arange(len(confusion_array)), columns=[\"step\"])\n self.model.confusion_matrix = pd.concat([self.model.confusion_matrix, result], axis=1)\n self.model.confusion_matrix.insert(0, \"model_name\", self.model.name)", "def compute_confuse_matrix(y_targetlabel_list_single, y_logit_array_single, label_dict, name='default'):\n #1.get target label and predict label\n # y_target_labels=get_target_label_short(y_targetlabel_list_single) #e.g. y_targetlabel_list[0]=[2,12,88]\n y_target_labels = y_targetlabel_list_single\n\n # y_predict_labels=[i for i in range(len(y_logit_array_single)) if y_logit_array_single[i]>=0.50] #TODO 0.5PW e.g.[2,12,13,10]\n # y_predict_labels= y_logit_array_single.index(min(y_logit_array_single))\n\n flag = max(y_logit_array_single)\n y_predict_labels = []\n for i in range(len(y_logit_array_single)):\n if abs(y_logit_array_single[i] - flag) < 0.1:\n y_predict_labels.append(i)\n\n a = list(set(y_target_labels))\n b = list(set(y_predict_labels))\n acc = operator.eq(a,b)\n\n #if len(y_predict_labels)<1: y_predict_labels=[np.argmax(y_logit_array_single)] #TODO ADD 2018.05.29\n if random.choice([x for x in range(random_number)]) ==1:\n print(name+\".y_target_labels:\",y_target_labels,\";y_predict_labels:\",y_predict_labels) #debug purpose\n\n #2.count number of TP,FP,FN for each class\n y_labels_unique=[]\n y_labels_unique.extend(y_target_labels)\n y_labels_unique.extend(y_predict_labels)\n y_labels_unique=list(set(y_labels_unique))\n for i,label in enumerate(y_labels_unique): #e.g. label=2\n TP, FP, FN = label_dict[label]\n if label in y_predict_labels and label in y_target_labels:#predict=1,truth=1 (TP)\n TP=TP+1\n elif label in y_predict_labels and label not in y_target_labels:#predict=1,truth=0(FP)\n FP=FP+1\n elif label not in y_predict_labels and label in y_target_labels:#predict=0,truth=1(FN)\n FN=FN+1\n label_dict[label] = (TP, FP, FN)\n return label_dict, acc", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def plot_confusion_matrix(\n Y_pred,\n Y_true,\n config,\n target_names=None,\n species_dict=None,\n title=\"Confusion matrix\",\n cmap=None,\n normalize=False,\n scores=False,\n score_size=12,\n save=None,\n):\n # Remove predictions if true labels are not in test set.\n missings = list(\n chain.from_iterable(\n (Y_true[i] + d for d in range(1, diff))\n for i, diff in enumerate(map(sub, Y_true[1:], Y_true))\n if diff > 1\n )\n )\n target_names = list(target_names)\n # print(\"Y_pred:\", Y_pred)\n # print(\"Y_true:\", Y_true)\n # print(\"len(target_names):\", len(target_names))\n v_missings = []\n for k, v in species_dict.items():\n if v in missings:\n print(\"Species {}: {} has no test images anymore.\".format(v, k))\n if v not in Y_pred:\n v_missings.append(v)\n # print(\"v_missings:\", v_missings)\n # print(\"len(target_names):\", len(target_names))\n # i, = np.where(Y_pred in missings)\n if title is True:\n title = config.exp_name\n cm = confusion_matrix(y_pred=Y_pred, y_true=Y_true)\n for m in v_missings:\n cm = np.insert(cm, m, 0, axis=0)\n cm = np.insert(cm, m, 0, axis=1)\n # print(cm.shape)\n # print(cm)\n cm[np.isnan(cm)] = 1\n accuracy = np.trace(cm) / float(np.sum(cm))\n misclass = 1 - accuracy\n if cmap is None:\n cmap = plt.get_cmap(\"Blues\")\n if normalize:\n cm = cm.astype(\"float32\") / cm.sum(axis=1)[:, np.newaxis]\n cm = np.round(cm, 2)\n thresh = cm.max() / 1.5 if normalize else cm.max() / 2\n fig = plt.figure(figsize=(35, 35))\n if title is not None:\n plt.title(title, fontsize=75)\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.grid(False)\n cb = plt.colorbar(fraction=0.046, pad=0.04)\n cb.ax.tick_params(labelsize=45)\n if target_names is not None:\n tick_marks = np.arange(len(target_names))\n plt.xticks(\n tick_marks,\n target_names,\n rotation=45,\n horizontalalignment=\"right\",\n fontsize=9,\n )\n plt.yticks(tick_marks, target_names, fontsize=9)\n if scores:\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if normalize:\n if cm[i, j] > 0:\n plt.text(\n j,\n i,\n \"{:0.2f}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n fontsize=score_size,\n )\n else:\n if cm[i, j] > 0:\n plt.text(\n j,\n i,\n \"{:,}\".format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n fontsize=score_size,\n )\n plt.tight_layout()\n plt.ylabel(\"True label\", fontsize=45)\n plt.xlabel(\n \"Predicted label\\naccuracy={:0.4f}, misclass={:0.4f}\".format(\n accuracy, misclass\n ),\n fontsize=45,\n )\n if save is not None:\n plt.savefig(save)\n logging.info(\"The confusion matrix has been saved as {}\".format(save))\n plt.show(fig)\n plt.close()", "def create_confusion_matrix(prediction: list, true_y: list, save_location: str):\n # Create confusion matrix in pandas\n results = pd.DataFrame({\"prediction\": prediction, \"expected\": true_y})\n results = results.assign(combined=results.loc[:, \"prediction\"] + \"_\" + results.loc[:, \"expected\"])\n confusion_matrix = results.pivot_table(\n values=\"combined\",\n index=\"prediction\",\n columns=\"expected\",\n aggfunc=lambda x: len(x),\n ).fillna(0)\n\n # Create plot\n fig = plt.figure(figsize=(18, 16), dpi=80, edgecolor=\"k\")\n sns.heatmap(confusion_matrix, annot=True, fmt=\"g\")\n\n os.makedirs(save_location, exist_ok=True)\n plt.savefig(os.path.join(save_location, \"confusion_matrix.png\"))\n\n # save pandas confusion_matrix\n confusion_matrix.to_csv(os.path.join(save_location, \"confusion_matrix.csv\"), index=False)", "def confusion_df(y_true, y_pred):\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n \n # Put into pandas dataframe\n confusion = pd.DataFrame({'Predicted Negative': [tn, fn], 'Predicted Positive': [fp, tp]}, \n index=['Actual Negative', 'Actual Positive']) \n \n return confusion", "def evaluate(ground_truth, prediction):\n\n def prfs_to_dict(prfs):\n \"\"\"Returns a precision_recall_fscore_support() result as a dict.\"\"\"\n return {\"precision\": prfs[0], \"recall\": prfs[1], \"fscore\": prfs[2]}\n\n results = {}\n items_count = len(ground_truth)\n\n # accuracy\n accuracy = accuracy_score(ground_truth, prediction)\n results[\"accuracy\"] = accuracy\n\n # confusion matrix\n categories = set(ground_truth) | set(prediction)\n confusions = {\n gold: {pred: 0 for pred in categories} for gold in categories\n }\n for g, p in zip(ground_truth, prediction):\n confusions[g][p] += 1\n results[\"confusions\"] = confusions\n\n # class wise precision, recall & f1\n classwise = precision_recall_fscore_support(\n ground_truth, prediction, average=None, warn_for=()\n )\n results[\"true_cat_dist\"] = list(classwise[-1])\n results[\"classwise\"] = {\n str(cl): prfs_to_dict(\n [classwise[0][cl], classwise[1][cl], classwise[2][cl]]\n )\n for cl in categories\n }\n\n # average precision, recall & f1\n results[\"macro_avg\"] = prfs_to_dict(\n precision_recall_fscore_support(\n ground_truth,\n prediction,\n average=\"macro\",\n pos_label=None,\n warn_for=(),\n )\n )\n results[\"micro_avg\"] = prfs_to_dict(\n precision_recall_fscore_support(\n ground_truth,\n prediction,\n average=\"micro\",\n pos_label=None,\n warn_for=(),\n )\n )\n results[\"weigh_avg\"] = prfs_to_dict(\n precision_recall_fscore_support(\n ground_truth,\n prediction,\n average=\"weighted\",\n pos_label=None,\n warn_for=(),\n )\n )\n\n # marginals\n gold_category_distribution = {\n g: sum([confusions[g][p] for p in categories]) for g in categories\n }\n pred_category_distribution = {\n p: sum([confusions[g][p] for g in categories]) for p in categories\n }\n\n # kappa\n expected_agreement_fleiss = sum(\n [\n (\n (gold_category_distribution[c] + pred_category_distribution[c])\n / (2.0 * items_count)\n )\n ** 2\n for c in categories\n ]\n )\n expected_agreement_cohen = sum(\n [\n (float(gold_category_distribution[c]) / items_count)\n * (float(pred_category_distribution[c]) / items_count)\n for c in categories\n ]\n )\n kappa_fleiss = (\n 1.0\n * (accuracy - expected_agreement_fleiss)\n / (1 - expected_agreement_fleiss)\n )\n kappa_cohen = (\n 1.0\n * (accuracy - expected_agreement_cohen)\n / (1 - expected_agreement_cohen)\n )\n results[\"k_fleiss\"] = {\n \"k\": kappa_fleiss,\n \"AE\": expected_agreement_fleiss,\n \"AO\": accuracy,\n }\n results[\"k_cohen\"] = {\n \"k\": kappa_cohen,\n \"AE\": expected_agreement_cohen,\n \"AO\": accuracy,\n }\n\n return results", "def confusion(prediction, truth):\n\n confusion_vector = prediction / truth\n # Element-wise division of the 2 tensors returns a new tensor which holds a\n # unique value for each case:\n # 1 where prediction and truth are 1 (True Positive)\n # inf where prediction is 1 and truth is 0 (False Positive)\n # nan where prediction and truth are 0 (True Negative)\n # 0 where prediction is 0 and truth is 1 (False Negative)\n\n true_positives = torch.sum(confusion_vector == 1).item()\n false_positives = torch.sum(confusion_vector == float('inf')).item()\n true_negatives = torch.sum(torch.isnan(confusion_vector)).item()\n false_negatives = torch.sum(confusion_vector == 0).item()\n\n return true_positives, false_positives, true_negatives, false_negatives", "def confusion_matrix_heatmap(y_test, preds, classification_labels):\n labels = list(set(y_test))\n long_labels = [ll + \" (\" + str(l) + \")\" for ll, l\n in zip(classification_labels, labels)]\n cm = confusion_matrix(y_test, preds, labels=labels)\n fig = plt.figure(figsize=(20, 20))\n ax = fig.add_subplot(111)\n cax = ax.matshow(cm)\n plt.title('Confusion matrix of the classifier')\n fig.colorbar(cax)\n ax.set_xticks(np.arange(len(labels)))\n ax.set_yticks(np.arange(len(labels)))\n ax.set_xticklabels(labels, rotation=45)\n ax.set_yticklabels(long_labels)\n\n for i in range(len(cm)):\n for j in range(len(cm)):\n text = ax.text(j, i, cm[i, j],\n ha=\"center\", va=\"center\", color=\"w\")\n\n plt.xlabel('Predicted')\n plt.ylabel('True')\n # fig.tight_layout()\n plt.show()", "def classification_metrics(self, target_data, predicted):\n from sklearn import preprocessing\n from sklearn import metrics \n\n y_true_copy, predictions = pd.DataFrame(self.target_data), predicted\n #y_true_copy.unique()\n np.unique(y_true_copy)\n encode = {}\n for i in range(len(np.unique(y_true_copy))):\n encode[np.unique(y_true_copy)[i]] = i\n \n predicted_copy = [encode[i] for i in predictions]\n \n y_true_copy.replace(encode, inplace=True)\n \n if len(y_true_copy) != 0:\n #Accuracy\n accuracy = round(metrics.accuracy_score(y_true_copy, predicted_copy),2) \n #Precision\n precision = round(metrics.precision_score(y_true_copy, predicted_copy, zero_division=1),2) \n #Recall\n recall = round(metrics.recall_score(y_true_copy, predicted_copy, zero_division=1),2)\n tn, fp, fn, tp = metrics.confusion_matrix(y_true_copy, predicted_copy).ravel()\n #False Positive Rate (FPR)\n fpr = round((fp/(fp+tn)),2)\n #Flase Negative Rate (FNR)\n fnr = round((fn/(tp+fn) if (tp+fn) else 0),2) \n results = {'accuracy':accuracy, 'precision':precision, 'recall':recall, 'fpr': fpr, 'fnr':fnr}\n return results\n else:\n raise Exception(\"Metrics calculation failed\")", "def get_confusion_matrix(labels_true: np.ndarray, labels_pred: np.ndarray) -> sparse.csr_matrix:\n check_vector_format(labels_true, labels_pred)\n mask = (labels_true >= 0) & (labels_pred >= 0)\n if np.sum(mask):\n n_labels = max(max(labels_true), max(labels_pred)) + 1\n row = labels_true[mask]\n col = labels_pred[mask]\n data = np.ones(np.sum(mask), dtype=int)\n return sparse.csr_matrix((data, (row, col)), shape=(n_labels, n_labels))\n else:\n raise ValueError('No sample with both true non-negative label and predicted non-negative label.')", "def confusion_matrix(true_values, predictions, threshold, title, **kwargs):\n matrix = metrics.confusion_matrix(true_values, predictions >= threshold)\n\n title = title + ' (threshold {:.5f})'.format(threshold)\n print(title)\n return format_confusion_matrix(matrix, title=title, classes=['non-BGC', 'BGC'], **kwargs)", "def get_confusion_matrix(self):\n return confusion_matrix(self.test_y, self.predict())", "def confusion(self,xs,ys):\n n = self.param['numClasses']\n assert n > 1, \"Confusion matrices can only be obtained for classification data.\" \n preds = self.predicts(xs)\n conf = [[0] * n for i in range(n)]\n for (y,p) in zip(ys,preds):\n conf[y][p] += 1\n return conf", "def show_confusion_matrix(correct_y, predict_y, category_list, results_path,\n mapping_fn=None, data_x=None):\n import matplotlib.pyplot as plt\n confused_examples = join(results_path, 'confused')\n if data_x is not None:\n if exists(confused_examples):\n ut.remove_dirs(confused_examples, quiet=True)\n ut.ensuredir(confused_examples)\n size = len(category_list)\n\n if mapping_fn is None:\n # Identity\n category_mapping = {key: index for index,\n key in enumerate(category_list)}\n category_list_ = category_list\n else:\n category_mapping = mapping_fn(category_list)\n assert all([category in category_mapping.keys()\n for category in category_list]), 'Not all categories are mapped'\n values = list(category_mapping.values())\n assert len(list(set(values))) == len(\n values), 'Mapped categories have a duplicate assignment'\n assert 0 in values, 'Mapped categories must have a 0 index'\n temp = list(category_mapping.iteritems())\n temp = sorted(temp, key=itemgetter(1))\n category_list_ = [t[0] for t in temp]\n\n confidences = np.zeros((size, size))\n counters = {}\n for index, (correct, predict) in enumerate(zip(correct_y, predict_y)):\n # Ensure type\n correct = int(correct)\n predict = int(predict)\n # Get the \"text\" label\n example_correct_label = category_list[correct]\n example_predict_label = category_list[predict]\n # Perform any mapping that needs to be done\n correct_ = category_mapping[example_correct_label]\n predict_ = category_mapping[example_predict_label]\n # Add to the confidence matrix\n confidences[correct_][predict_] += 1\n if data_x is not None and correct_ != predict_:\n example = data_x[index]\n example_name = '%s^SEEN_INCORRECTLY_AS^%s' % (\n example_correct_label, example_predict_label, )\n if example_name not in counters.keys():\n counters[example_name] = 0\n counter = counters[example_name]\n counters[example_name] += 1\n example_name = '%s^%d.png' % (example_name, counter)\n example_path = join(confused_examples, example_name)\n # TODO: make write confused examples function\n cv2.imwrite(example_path, example)\n\n row_sums = np.sum(confidences, axis=1)\n norm_conf = (confidences.T / row_sums).T\n\n fig = plt.figure(1)\n plt.clf()\n ax = fig.add_subplot(111)\n ax.set_aspect(1)\n res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet,\n interpolation='nearest')\n\n for x in range(size):\n for y in range(size):\n ax.annotate(str(int(confidences[x][y])), xy=(y, x),\n horizontalalignment='center',\n verticalalignment='center')\n\n cb = fig.colorbar(res) # NOQA\n plt.xticks(np.arange(size), category_list_[0:size], rotation=90)\n plt.yticks(np.arange(size), category_list_[0:size])\n margin_small = 0.1\n margin_large = 0.9\n plt.subplots_adjust(left=margin_small, right=margin_large,\n bottom=margin_small, top=margin_large)\n plt.xlabel('Predicted')\n plt.ylabel('Correct')\n return fig", "def get_confusion_matrix(true_label, predictions, num_index):\n class_matrix = np.zeros(shape=(num_index, num_index))\n false_group = [[] for _ in range(num_index)]\n for idx, true, pred in zip(range(len(predictions)),true_label, predictions):\n class_matrix[true][pred] += 1\n if true != pred:\n false_group[true].append(idx)\n return class_matrix, false_group", "def get_confusion_matrix(self, y_true, y_pred):\n import tensorflow as tf\n import pandas as pd\n \n assert( self.nerTags)\n \n # get ner tags, but not the <s>, </s.>, <unk> ones\n tags = self.nerTags.ids_to_words( range( 3, len(self.nerTags.wordset)))\n \n cm = tf.confusion_matrix( y_true, y_pred)\n sess = tf.Session()\n with sess.as_default():\n cm = sess.run(cm)\n\n # drop the tags we don't use (<s>, etc.)\n cm = cm[3:,3:]\n\n return pd.DataFrame( cm, index=tags, columns=tags)", "def metrics(self, predictions, gts, label_list):\n prediction_labels = np.concatenate([predictions.flatten()])\n gt_labels = np.concatenate([gts.flatten()])\n\n cm = metrics.confusion_matrix(\n gt_labels,\n prediction_labels,\n range(len(label_list)))\n\n # print(\"Confusion matrix :\")\n # print(cm)\n # print(\"---\")\n # Compute global accuracy\n accuracy = sum([cm[x][x] for x in range(len(cm))])\n total = sum(sum(cm))\n oa = accuracy * 100 / float(total)\n # print(\"{} pixels processed\".format(total))\n # print(\"Total accuracy : {}%\".format(accuracy * 100 / float(total)))\n # print(\"---\")\n # Compute kappa coefficient\n total = np.sum(cm)\n pa = np.trace(cm) / float(total)\n pe = np.sum(np.sum(cm, axis=0) * np.sum(cm, axis=1)) / float(total * total)\n kappa = (pa - pe) / (1 - pe)\n # print(\"Kappa: \" + str(kappa))\n return kappa, oa", "def binary_classification_metrics(prediction, ground_truth):\n precision = 0\n recall = 0\n accuracy = 0\n f1 = 0\n\n correct = sum(a == b for a, b in zip(prediction, ground_truth))\n\n true_pos = 0\n false_pos = 0\n false_neg = 0\n\n for i in range(len(prediction)):\n if prediction[i]:\n true_pos += (prediction[i] == ground_truth[i])\n false_pos += (prediction[i] != ground_truth[i])\n else:\n false_neg += (prediction[i] != ground_truth[i])\n\n accuracy = correct / len(ground_truth)\n\n if true_pos + false_pos == 0:\n precision = 0\n else:\n precision = true_pos / (true_pos + false_pos)\n if true_pos + false_neg == 0:\n recall = 0\n else:\n recall = true_pos / (true_pos + false_neg)\n if precision + recall == 0:\n f1 = 0\n else:\n f1 = 2 * (precision * recall) / (precision + recall)\n\n return precision, recall, f1, accuracy", "def grouped_confusion_matrix(y_true, y_pred, y_pred_max):\n # Sanity checks\n assert set(np.unique(y_true)).issubset(set([0, 1, 2]))\n assert set(np.unique(y_pred)).issubset(set([0, 1]))\n assert len(y_pred) == len(y_true)\n assert y_true.ndim == 1\n assert y_pred.ndim == 1\n\n conf_mx = np.zeros((3, 2), dtype=int)\n\n for gt_label in range(3):\n mask = y_true == gt_label # find ground truth\n\n # for pred_label in range(2):\n # conf_mx[gt_label, pred_label] = np.sum(y_pred_max[mask] == pred_label)\n if gt_label < 2:\n for pred_label in range(2):\n conf_mx[gt_label, pred_label] = np.sum(y_pred_max[mask] == pred_label)\n else:\n for pred_label in range(2):\n conf_mx[gt_label, pred_label] = np.sum(y_pred[mask] == pred_label)\n\n\n return conf_mx", "def compute_confusion_matrix(model, lb, all_images, true_labels):\n\n # # load the model and label binarizer\n # print(\"[INFO] loading network and label binarizer...\")\n # model = load_model(model)\n # lb = pickle.loads(open(label_bin, \"rb\").read())\n\n lab2i = {label: j for j, label in enumerate(lb.classes_)}\n print(f\"Lab2i {lab2i}\")\n\n # make a prediction on the image\n preds = model.predict(all_images)\n print(f\"Shape preds {preds.shape}\")\n # print(f'Preds {preds}')\n\n all_best_i = preds.argmax(axis=1)\n print(f\"Shape all_best_i {all_best_i.shape}\")\n\n confusion = np.zeros((len(lb.classes_), len(lb.classes_)), dtype=np.uint16)\n\n for j, pro in enumerate(preds):\n # i = pro.argmax(axis=1)\n i = pro.argmax(axis=0)\n predicted_label = lb.classes_[i]\n correct = \"TRUE\" if true_labels[j] == predicted_label else \"FALSE\"\n print(\n f\"True: {true_labels[j]}\\tPredicted {predicted_label} with {pro[i]*100:.4f}%\\t{correct}\"\n )\n\n confusion[lab2i[predicted_label], lab2i[true_labels[j]]] += 1\n\n # print(f'Confusion matrix\\n{confusion}')\n return confusion, lb.classes_", "def createConfusionMatrix(all_predicts, thresholds):\n for data, threshold in zip(all_predicts, thresholds):\n y_test = [elem[0] for elem in data]\n y_pred = [elem[1] for elem in data]\n\n class_names = sorted(set(y_test))\n\n # Compute confusion matrix\n cnf_matrix = confusion_matrix(y_test, y_pred)\n\n # Convert values to float\n cnf_matrix = cnf_matrix.astype(float)\n\n # Convert values to percentage of row\n for i, vec in enumerate(cnf_matrix):\n cnf_matrix[i] = vec / sum(vec)\n\n # Turn it into a dataframe\n df_cm = pd.DataFrame(cnf_matrix, index= class_names)\n df_cm.columns = class_names\n plt.figure(figsize=(10, 7))\n sb.heatmap(df_cm, annot=True, fmt='.4f', linewidth=1, cbar=False, cmap='Blues')\n plt.title('Inception v3 \\nAccuracy: {0:.3f}\\n'.format(accuracy_score(y_test, y_pred)))\n plt.ylabel('True label')\n plt.xlabel('Predicted label');\n plt.savefig(\"conf_matrix_{0}.png\".format(threshold), dpi=100, format=\"png\")", "def confusionMatrix(self):\n \n cn_matrix = confusion_matrix(self.y_Actual, self.y_Predicted, labels=[\"Positive\", \"Neutral\", \"Negative\"])\n print(cn_matrix)\n print(\"Accuracy Score:\", accuracy_score(self.y_Actual, self.y_Predicted))\n print(classification_report(self.y_Actual, self.y_Predicted))", "def evaluate(y_test, pred_labels):\n \n # Converts one-hot code to a label (the index of 1)\n y_test_labels = np.argmax(y_test, axis=1)\n \n # Compare test labels to predicted labels\n score = accuracy_score(y_test_labels, pred_labels)\n \n return y_test_labels, score", "def get_confusion_matrix(gt_label, pred_label, class_num):\r\n index = (gt_label * class_num + pred_label).astype('int32')\r\n label_count = np.bincount(index)\r\n confusion_matrix = np.zeros((class_num, class_num))\r\n\r\n for i_label in range(class_num):\r\n for i_pred_label in range(class_num):\r\n cur_index = i_label * class_num + i_pred_label\r\n if cur_index < len(label_count):\r\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\r\n\r\n return confusion_matrix", "def make_predictions(self, output_dict):\n\n data_indices = output_dict[\"data_idx\"]\n pred_logits = output_dict[\"logits\"]\n pred_class_idxs = torch.argmax(pred_logits, dim=-1)\n\n predictions = {\n self._dataset.get_id(data_idx.item()): {\"class_idx\": pred_class_idx.item()}\n for data_idx, pred_class_idx in zip(list(data_indices.data), list(pred_class_idxs.data))\n }\n\n return predictions", "def confusion_matrix(labels,\n predictions,\n weights,\n y,\n num_classes,\n dtype,\n kernel_name=\"cce_confusion_matrix\",\n need_build=True,\n need_print=False):\n\n shape = labels.get(\"shape\")\n util.check_tensor_shape_size(shape)\n shape_predictions = predictions.get(\"shape\")\n util.check_tensor_shape_size(shape_predictions)\n if weights is not None:\n shape_weights = weights.get(\"shape\")\n util.check_tensor_shape_size(shape_weights)\n weights_dtype = weights.get(\"dtype\").lower()\n else:\n shape_weights = None\n weights_dtype = None\n labels_dtype = labels.get(\"dtype\").lower()\n predictions_dtype = predictions.get(\"dtype\").lower()\n\n params_check(shape, shape_predictions, dtype, labels_dtype,\n predictions_dtype, shape_weights, weights_dtype)\n\n util.check_kernel_name(kernel_name)\n labels = tvm.placeholder(shape, dtype=labels_dtype, name=\"labels\")\n prediction = tvm.placeholder(\n shape, dtype=predictions_dtype, name=\"prediction\")\n out_shape = (num_classes, num_classes)\n if weights is not None:\n weight = tvm.placeholder(shape, dtype=weights_dtype, name=\"weight\")\n res = tvm.extern([out_shape], [labels, prediction, weight], \\\n lambda ins, outs: confusion_matrix_ir(ins[0], ins[1], ins[2],\n output=outs[0]),\n dtype=dtype, name=kernel_name)\n sch = tvm.create_schedule([res.op])\n if need_build:\n with build_config:\n mod = tvm.build(\n sch, [labels, prediction, weight, res],\n \"cce\",\n name=kernel_name)\n if need_print:\n with build_config:\n print(\n tvm.lower(\n sch, [labels, prediction, weight, res],\n simple_mode=True))\n else:\n res = tvm.extern([out_shape], [labels, prediction], \\\n lambda ins, outs: confusion_matrix_ir_weight_none(ins[0], ins[1],\n output=outs[0]),\n dtype=dtype, name=kernel_name)\n sch = tvm.create_schedule([res.op])\n if need_build:\n with build_config:\n mod = tvm.build(\n sch, [labels, prediction, res], \"cce\", name=kernel_name)\n if need_print:\n with build_config:\n print(\n tvm.lower(sch, [labels, prediction, res], simple_mode=True))", "def confusion(prediction, truth):\n\n confusion_vector = prediction / truth\n \n # Element-wise division of the 2 tensors returns a new tensor which holds a\n # unique value for each case:\n # 1 where prediction and truth are 1 (True Positive)\n # inf where prediction is 1 and truth is 0 (False Positive)\n # nan where prediction and truth are 0 (True Negative)\n # 0 where prediction is 0 and truth is 1 (False Negative)\n\n true_positives = torch.sum(confusion_vector == 1).item()\n false_positives = torch.sum(confusion_vector == float('inf')).item()\n true_negatives = torch.sum(torch.isnan(confusion_vector)).item()\n false_negatives = torch.sum(confusion_vector == 0).item()\n\n return [true_positives, false_positives, true_negatives, false_negatives]", "def get_confusion_matrix(gt_label, pred_label, class_num):\n index = (gt_label * class_num + pred_label).astype('int32')\n label_count = np.bincount(index)\n confusion_matrix = np.zeros((class_num, class_num))\n\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix", "def confusion_matrix(\n y_true: np.ndarray, y_pred: np.ndarray, normalized=True\n) -> np.ndarray:\n cm = metrics.confusion_matrix(y_true, y_pred)\n if normalized is True:\n cm = cm / cm.sum()\n cm = np.around(cm, 2)\n cm[np.isnan(cm)] = 0.0\n return cm", "def perform(y,y_predicted,unique_classes):\n y=np.asarray(y,dtype=int)\n y_predicted=np.asarray(y_predicted,dtype=int)\n \n numcl=len(unique_classes)\n confusion_matrix=np.zeros((numcl,numcl),dtype=float)\n for i in range(len(y)):\n confusion_matrix[y[i],y_predicted[i]]=confusion_matrix[y[i],y_predicted[i]]+1\n perf=np.zeros((2*numcl+3,)) # sensitivity_0,sensitivity_1,...,sensitivity_{C-1}, precision_0,precision_1,...,precision_{C-1}, accuracy, balanced sensitivity, balanced precision \n perf[0:numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=1) # sensitivity and specifity for two classes, (class-wise rates for multi-classes)\n perf[numcl:2*numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=0) # PPV and NPV for two classes, (class-wise predictive rates for multi-classes)\n perf[2*numcl]=confusion_matrix.diagonal().sum()/confusion_matrix.sum(axis=1).sum() # accuracy\n perf[2*numcl+1]=np.mean(perf[0:numcl]) # balanced accuracy for two-classes, average class-wise rate for multi-class \n perf[2*numcl+2]=np.mean(perf[numcl:2*numcl]) # avarage class-wise predictive rate\n return perf,confusion_matrix", "def get_confusion_matrix(scores, labels):\n C = scores.size(-1)\n y_pred = scores.detach().cpu().numpy().reshape(-1, C) # (N, C)\n y_pred = np.argmax(y_pred, axis=1) # (N,)\n\n y_true = labels.detach().cpu().numpy().reshape(-1,)\n\n y = np.bincount(C * y_true + y_pred, minlength=C * C)\n\n if len(y) < C * C:\n y = np.concatenate([y, np.zeros((C * C - len(y)), dtype=np.long)])\n else:\n if len(y) > C * C:\n warnings.warn(\n \"Prediction has fewer classes than ground truth. This may affect accuracy.\"\n )\n y = y[-(C * C):] # last c*c elements.\n\n y = y.reshape(C, C)\n\n return y", "def test(self, test_data_path):\n test_data = read_datafile(test_data_path)\n test_data = self.preprocessor.preprocess(test_data)\n\n data = '__label__' + test_data['claim'].astype(str) + test_data['check_worthiness'].astype(str) + ' ' + \\\n test_data['tweet_text']\n\n output = self.run(data)\n\n df = pd.DataFrame()\n df[\"predicted\"] = output.split()\n df[\"labeled\"] = [d.split()[0] for d in data]\n\n cm = confusion_matrix(df[\"labeled\"], df[\"predicted\"], labels=['__label__11','__label__10','__label__00'])\n\n ax= plt.subplot()\n sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells\n\n ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels'); \n ax.set_title('Confusion Matrix'); \n ax.xaxis.set_ticklabels(['__label__11','__label__10','__label__00']); ax.yaxis.set_ticklabels(['__label__11','__label__10','__label__00']);\n\n plt.show()\n\n return np.sum(cm.diagonal()) / np.sum(cm)", "def confusion_matrix_(y_true, y_hat, labels=None, df_option=False):\n matrix = []\n if labels == None:\n labels = np.unique(y_hat)\n for cat1 in labels:\n tmp = []\n for cat2 in labels:\n tmp.append(check(y_true, y_hat, cat1, cat2))\n matrix.append(tmp)\n if not df_option:\n return np.array(matrix)\n return pd.DataFrame(matrix, columns=labels, index=labels)", "def confusion_matrix(results):\n EPS = 1e-10\n cm_raw = base_confusion_matrix(results.true_labels,\n results.inferred_labels, results.label_list)\n\n # For off axis, normalize harmonic mean of row / col inverse errors.\n # The idea here is that this average will go to 1 => BAD, as\n # either the row error or column error approaches 1. That is, if this\n # off diagonal element dominates eitehr the predicted values for this \n # label OR the actual values for this label. A standard mean will only\n # go to zero if it dominates both, but these can become decoupled with \n # unbalanced classes.\n row_totals = cm_raw.sum(axis=1, keepdims=True)\n col_totals = cm_raw.sum(axis=0, keepdims=True)\n inv_row_fracs = 1 - cm_raw / (row_totals + EPS)\n inv_col_fracs = 1 - cm_raw / (col_totals + EPS)\n cm_normalized = 1 - harmonic_mean(inv_col_fracs, inv_row_fracs)\n # For on axis, use the F1-score (also a harmonic mean!)\n for i in range(len(cm_raw)):\n recall = cm_raw[i, i] / (row_totals[i, 0] + EPS)\n precision = cm_raw[i, i] / (col_totals[0, i] + EPS)\n if row_totals[i, 0] == col_totals[0, i] == 0:\n cm_normalized[i, i] = -1 # Not values to compute from\n else:\n cm_normalized[i, i] = harmonic_mean(recall, precision)\n\n return ConfusionMatrix(cm_raw, cm_normalized)", "def build_conf_matrix(self):\n pred_vals = []\n for prediction in self.predicted_values:\n pred_vals.append(np.argmax(prediction))\n\n print confusion_matrix(self.true_values, pred_vals, labels=[1, 2, 3, 4])\n\n self.logger.info(\"Confusion Matrix : {}\".format(confusion_matrix(self.true_values, pred_vals,\n labels=[1, 2, 3, 4])))", "def predict(model,test_data, show_confusion_matrix=False):\r\n\r\n pick_model = open(model, \"rb\") # 'model.data'\r\n model = pickle.load(pick_model)\r\n pick_model.close()\r\n\r\n pick_in = open(test_data, \"rb\") # 'data_test.data' , be careful not to leak training data here\r\n data = pickle.load(pick_in)\r\n pick_in.close()\r\n\r\n features = []\r\n labels = []\r\n\r\n for feature, label in data:\r\n features.append(feature)\r\n labels.append(label)\r\n\r\n xtest = features\r\n ytest = labels\r\n \r\n accuracy = model.score(xtest, ytest)\r\n print(\"accuracy: \", accuracy)\r\n \r\n if show_confusion_matrix:\r\n prediction = model.predict(xtest)\r\n cm_array = confusion_matrix(ytest, prediction)\r\n sns.heatmap(cm_array, annot=True, cmap='Blues')\r\n plt.show()", "def plot_confusion_matrix(y_pred, y_true, classes_list):\n fig = plt.figure(figsize=(8, 8))\n cm = confusion_matrix(y_pred, y_true)\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n plt.title('Confusion matrix')\n plt.colorbar()\n tick_marks = np.arange(len(classes_list))\n plt.xticks(tick_marks, classes_list, rotation=45)\n plt.yticks(tick_marks, classes_list)\n cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)\n\n thresh = cm.max() / 2.0\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n return fig", "def plot_confusion_matrix1(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def test_confusion_matrix(self):\n perf = self.get_file(\"classification_metrics.csv\")\n schema = [(\"value\", int), (\"predicted\", int)]\n # [true_positive, false_negative, false_positive, true_negative]\n actual_result = [64, 15, 23, 96]\n\n frame = self.context.frame.import_csv(perf, schema=schema)\n\n cm = frame.binary_classification_metrics('value', 'predicted', 1, 1)\n\n conf_matrix = cm.confusion_matrix.values\n cumulative_matrix_list = [conf_matrix[0][0],\n conf_matrix[0][1],\n conf_matrix[1][0],\n conf_matrix[1][1]]\n self.assertEqual(actual_result, cumulative_matrix_list)", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap= cm.Blues,\n save:bool = False):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cmat = confusion_matrix(y_true, y_pred,labels = classes)\n # Only use the labels that appear in the data\n if normalize:\n cmat = cmat.astype('float') / cmat.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cmat)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cmat, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cmat.shape[1]),\n yticks=np.arange(cmat.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cmat.max() / 2.\n for i in range(cmat.shape[0]):\n for j in range(cmat.shape[1]):\n ax.text(j, i, format(cmat[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cmat[i, j] > thresh else \"black\")\n fig.tight_layout()\n plt.show()\n if save:\n cwd=os.getcwd()\n fig.savefig(os.path.join(cwd, 'Keras\\\\Model_images', title +'_CM.png'))\n return ax", "def conf_matrix(model, X_train, y_train, X_test, y_test, train=True):\n from sklearn.metrics import confusion_matrix\n import itertools\n if train==True: \n ypredTrain = model.predict(X_train)\n cm = confusion_matrix(y_train, ypredTrain)\n def plot_conf_matrix(cm, classes, title='Confusion Matrix', cmap=plt.cm.Reds):\n plt.figure(figsize = (5, 5))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, size = 14)\n plt.colorbar(aspect=4)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0, size = 10)\n plt.yticks(tick_marks, classes, size = 10)\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), fontsize = 14,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.grid(b=None)\n plt.tight_layout()\n plt.ylabel('True label', size = 12)\n plt.xlabel('Predicted label', size = 12)\n plot_conf_matrix(cm, classes = ['Covid-', 'Covid+'], \n title = 'Confusion Matrix\\n\\n(Train)\\n')\n elif train==False:\n ypredTest = model.predict(X_test)\n cm = confusion_matrix(y_test, ypredTest)\n def plot_conf_matrix(cm, classes, title='Confusion Matrix', cmap=plt.cm.Blues):\n plt.figure(figsize = (5, 5))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, size = 14)\n plt.colorbar(aspect=4)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0, size = 10)\n plt.yticks(tick_marks, classes, size = 10)\n fmt = 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt), fontsize = 14,\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.grid(b=None)\n plt.tight_layout()\n plt.ylabel('True label', size = 12)\n plt.xlabel('Predicted label', size = 12)\n plot_conf_matrix(cm, classes = ['Covid-', 'Covid+'], \n title = 'Confusion Matrix\\n\\n(Test)\\n')", "def test_model(model, trainset, testset):\n model.eval()\n \n predictions = []\n actuals = []\n \n for data in testset:\n # data will have batch of features and labels\n X = data[0:4]\n y = data[4:]\n \n pred = np.round(model(X).detach().numpy())\n actual = y.detach().numpy()\n # print(f'pred: {pred}')\n # print(f'actual: {actual}')\n predictions.append(pred)\n actuals.append(actual)\n \n print(accuracy_score(y_true=actuals, y_pred=predictions))\n \n \n # Confusion Matrix\n \n confusion_matrix = np.zeros((3, 3))\n for i,j in zip(predictions, actuals):\n confusion_matrix[i, j] += 1\n print(\"Confusion matrix:\\n\", confusion_matrix)", "def confusion_matrix(pred, target, num_classes=21):\n mat = np.zeros((num_classes, num_classes))\n for c in range(num_classes):\n mask = target == c\n if mask.any():\n vec, _ = np.histogram(pred[mask],\n bins=np.arange(num_classes+1))\n mat[c, :] += vec\n\n return mat", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n labels = [int(x) for x in unique_labels(y_true, y_pred)]\n classes = classes[labels]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax, cm", "def evaluate_preds_classification(y_true, y_preds):\n accuracy = accuracy_score(y_true, y_preds)\n precision = precision_score(y_true, y_preds)\n recall = recall_score(y_true, y_preds)\n f1 = f1_score(y_true, y_preds)\n metric_dict = {\"accuracy\": round(accuracy, 2),\n \"precision\": round(precision, 2),\n \"recall\": round(recall, 2),\n \"f1\": round(f1, 2)}\n print(f\"Accuracy: {accuracy * 100:.2f}%\")\n print(f\"Precision: {precision}\")\n print(f\"Recall: {recall}\")\n print(f\"F1 Score: {f1} \\n\")\n return metric_dict", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n ul = unique_labels(y_true, y_pred)\n if np.sum(ul)==0:\n \tul = [0, 1]\n classes = classes[ul]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def analyze_confusion_matrix(confusion_matrix: List[int], num_games: int) -> Dict[str, float]:\n true_positives = confusion_matrix[0]\n false_negatives = confusion_matrix[1]\n false_positives = confusion_matrix[2]\n true_negatives = confusion_matrix[3]\n accuracy = (true_positives + true_negatives) / num_games\n try:\n precision = true_positives / (true_positives + false_positives)\n except ZeroDivisionError:\n precision = 0.0\n error_rate = (false_negatives + false_positives) / num_games\n return {\"True Positives\": true_positives, \"False Negatives\": false_negatives, \"False Positives\": false_positives,\n \"True Negatives\": true_negatives, \"Accuracy\": accuracy, \"Precision\": precision, \"Error Rate\": error_rate}", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig, ax", "def compute(self, dataset: List[str], true_labels: List[int], predicted_labels: List[int]) -> \\\n Tuple[float, np.ndarray, List[Tuple[str, str, str]]]:\n category_count = len(self._labels)\n confusion_matrix = np.zeros((category_count, category_count))\n classification = []\n\n # Build an inverse lookup dictionary to retrieve label descriptions from indices\n descriptions = {v: k for k, v in self._labels.items()}\n\n # Format classification results and compute the confusion matrix\n for image, true, predicted in zip(dataset, true_labels, predicted_labels):\n classification.append((os.path.basename(image), descriptions[true], descriptions[predicted]))\n confusion_matrix[true, predicted] += 1\n\n accuracy = np.trace(confusion_matrix) / np.sum(confusion_matrix)\n\n return accuracy, confusion_matrix, classification", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=[0, 1], yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig, cm", "def confusion_matrix(preds, labels, conf_matrix):\n for p, t in zip(preds, labels):\n conf_matrix[t, p] += 1\n return conf_matrix", "def plot_confusion_matrix(y_true, predictions, classes,\r\n normalize=False,\r\n title=None,\r\n cmap=plt.cm.Blues):\r\n if not title:\r\n if normalize:\r\n title = 'Normalized confusion matrix'\r\n else:\r\n title = 'Confusion matrix, without normalization'\r\n\r\n # Compute confusion matrix\r\n cm = sklearn.metrics.confusion_matrix(y_true, predictions)\r\n # Only use the labels that appear in the data\r\n classes = classes[unique_labels(y_true, predictions)]\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n fig, ax = plt.subplots()\r\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\r\n ax.figure.colorbar(im, ax=ax)\r\n # We want to show all ticks...\r\n ax.set(xticks=np.arange(cm.shape[1]),\r\n yticks=np.arange(cm.shape[0]),\r\n # ... and label them with the respective list entries\r\n xticklabels=classes, yticklabels=classes,\r\n title=title,\r\n ylabel='True label',\r\n xlabel='Predicted label')\r\n\r\n # Rotate the tick labels and set their alignment.\r\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\r\n rotation_mode=\"anchor\")\r\n\r\n # Loop over data dimensions and create text annotations.\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i in range(cm.shape[0]):\r\n for j in range(cm.shape[1]):\r\n ax.text(j, i, format(cm[i, j], fmt),\r\n ha=\"center\", va=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n fig.tight_layout()\r\n return ax", "def accuracy_per_class(y_pred, y_test, columns_list):\n accuracy_dict = {}\n correct_index = 0\n incorrect_index = 1\n for row in range(y_pred.shape[0]):\n curr_correct_index = np.argmax(y_test[row])\n curr_col_name = columns_list[curr_correct_index]\n if np.argmax(y_pred[row]) == curr_correct_index:\n if curr_col_name in accuracy_dict:\n accuracy_dict[curr_col_name][correct_index] += 1\n else:\n accuracy_dict[curr_col_name] = [1, 0]\n elif np.argmax(y_pred[row]) != curr_correct_index:\n if curr_col_name in accuracy_dict:\n accuracy_dict[curr_col_name][incorrect_index] += 1\n else:\n accuracy_dict[curr_col_name] = [0, 1]\n for class_name in accuracy_dict:\n lst = accuracy_dict[class_name]\n percentage = lst[correct_index] / (lst[incorrect_index]+lst[correct_index])\n accuracy_dict[class_name].insert(0, percentage)\n return accuracy_dict", "def get_confusion_matrix(gt_label, pred_label, class_num, ignore_label): #seg_gt, seg_pred, args.num_classes\n\n pred_label = pred_label.flatten()\n if torch.is_tensor(gt_label) == True:\n gt_label = gt_label.cpu().detach().numpy()\n\n gt_label = gt_label.flatten()\n\n valid_flag = gt_label != ignore_label\n valid_inds = np.where(valid_flag)[0]\n\n pred_label = pred_label[valid_flag]\n gt_label = gt_label[valid_flag]\n\n index = (gt_label * class_num + pred_label).astype('int32') #gt_label(array([0, 1]), array([316446, 12684])) pred_label (array([0, 1], dtype=uint8), array([ 77728, 251402]))\n\n label_count = np.bincount(index)\n\n confusion_matrix = np.zeros((class_num, class_num))\n\n for i_label in range(class_num):\n for i_pred_label in range(class_num):\n cur_index = i_label * class_num + i_pred_label\n if cur_index < len(label_count):\n confusion_matrix[i_label, i_pred_label] = label_count[cur_index]\n\n return confusion_matrix", "def _confusion_matrix(self, actual, classify, p_sum, reject):\r\n if self.preds is None:\r\n self._predict(classify)\r\n x_actu = pd.Series(actual, name='Actual')\r\n if reject:\r\n y_pred = pd.Series(self.preds[:, 0, 0], name='Predicted')\r\n else:\r\n y_pred = pd.Series(self.preds[:, 0], name='Predicted')\r\n if len(pd.Series(pd.unique(y_pred)).dropna()) == len(np.unique(actual)): # Check if the number of different\r\n # target in y pred is the same than is actual\r\n return pd.crosstab(x_actu, y_pred, margins=p_sum, dropna=False)\r\n else:\r\n df = pd.crosstab(x_actu, y_pred, margins=p_sum, dropna=False)\r\n mask = np.in1d(np.unique(actual), np.unique(y_pred)) # Add the missing targets to y_pred\r\n if p_sum:\r\n column_z = [0] * (len(np.unique(actual)) + 1)\r\n else:\r\n column_z = [0] * len(np.unique(actual))\r\n for idx in np.where(~mask)[0]:\r\n df.insert(loc=int(idx), column=self.targets[idx], value=column_z) # Add a zero column in the matrix\r\n return df", "def plot_confusion_matrix(y_true, y_pred, classes=None,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n if classes is not None:\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n else:\n classes = unique_labels(y_true, y_pred)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig", "def eval_metrics_for_multiclass(self, predicted_answers):\n total_correct_in_all = 0\n total_pred_in_all = len(predicted_answers)\n # initial a dict for total correct in topK counting.\n total_correct_in_topK = dict([(i, 0) for i in self.topK_list])\n total_pred_in_topK = dict([(i, 0) for i in self.topK_list])\n max_topK = max(self.topK_list)\n label_pred = []\n label_true = []\n label_weights = []\n digits = 3\n metrics = {}\n\n for e_id, sample in predicted_answers.iteritems():\n # get all correct ids\n correct_label_indices = sample['correct_labels']\n # current case, we only have a majority lable for the correct label\n label_true.append(correct_label_indices[0])\n # counting all correct for each sample\n total_correct_in_all += len(correct_label_indices)\n # select topK\n sorted_probs_max_topK = sorted(sample['pred_probs'], reverse=True, key=lambda x: x['prob'])[:max_topK]\n top1_pred = sorted_probs_max_topK[0]\n label_pred.append(top1_pred['label_index'])\n\n # for all topK predictions\n for i in range(len(sorted_probs_max_topK)):\n pred = sorted_probs_max_topK[i]\n for topK in self.topK_list:\n if i >= topK:\n continue\n else:\n total_pred_in_topK[topK] += 1\n if pred['label_index'] in correct_label_indices:\n total_correct_in_topK[topK] += 1\n\n if total_correct_in_all != 0:\n # recall@K\n recall_at_K = dict([(k, total_correct_in_topK[k] / (total_correct_in_all * 1.0)) for k in self.topK_list])\n # assign recall@K into metrics\n for k, v in recall_at_K.items():\n # Jie\n # 1 means the greater the better.\n # -1 means the smaller the better.\n metrics['R@{}'.format(k)] = (1, v)\n\n self.logger.info('total_correct_in_all = {}, correct_in_topK = {}, recall@K = {}'.format(total_correct_in_all, sorted(total_correct_in_topK.items()), sorted(recall_at_K.items())))\n # here return all the p,r,f for each label, then we compute the micro average later.\n p, r, f1, s = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average=None)\n total_s = np.sum(s)\n p_micro, r_micro, f1_micro, _ = precision_recall_fscore_support(label_true, label_pred, beta=1.0, labels=range(self.num_classes), average='micro')\n last_lines_heading = ['macro / total', 'weighted_mac / total', 'micro / total']\n target_names = self.classes\n name_width = max(len(cn) for cn in target_names)\n width = max(name_width, max([len(x) for x in last_lines_heading]), digits)\n\n headers = [\"precision\", \"recall\", \"f1-score\", \"support\"]\n head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)\n report = head_fmt.format(u'', *headers, width=width)\n report += u'\\n\\n'\n row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\\n'\n rows = zip(target_names, p, r, f1, s)\n for row in rows:\n label_weights.append(row[4])\n report += row_fmt.format(*row, width=width, digits=digits)\n metrics['P_{}'.format(row[0])] = (1, row[1])\n metrics['R_{}'.format(row[0])] = (1, row[2])\n metrics['F1_{}'.format(row[0])] = (1, row[3])\n report += u'\\n'\n\n # compute macro averages\n p_macro = np.average(p, weights = None)\n r_macro = np.average(r, weights = None)\n f1_macro = np.average(f1, weights = None)\n metrics['P_{}'.format(\"macro\")] = (1, p_macro)\n metrics['R_{}'.format(\"macro\")] = (1, r_macro)\n metrics['F1_{}'.format(\"macro\")] = (1, f1_macro)\n report += row_fmt.format(last_lines_heading[0],\n p_macro,\n r_macro,\n f1_macro,\n total_s,\n width=width, digits=digits)\n\n # compute weighted macro average\n label_weights = map(lambda x : x/(total_s * 1.0), label_weights)\n p_weighted_average = np.average(p, weights = label_weights)\n r_weighted_average = np.average(r, weights = label_weights)\n f1_weighted_average = np.average(f1, weights = label_weights)\n metrics['P_{}'.format(\"weighted_macro\")] = (1, p_weighted_average)\n metrics['R_{}'.format(\"weighted_macro\")] = (1, r_weighted_average)\n metrics['F1_{}'.format(\"weighted_macro\")] = (1, f1_weighted_average)\n report += row_fmt.format(last_lines_heading[1],\n p_weighted_average,\n r_weighted_average,\n f1_weighted_average,\n total_s,\n width=width, digits=digits)\n # micro average\n metrics['P_{}'.format(\"micro\")] = (1, p_micro)\n metrics['R_{}'.format(\"micro\")] = (1, r_micro)\n metrics['F1_{}'.format(\"micro\")] = (1, f1_micro)\n report += row_fmt.format(last_lines_heading[2],\n p_micro,\n r_micro,\n f1_micro,\n total_s,\n width=width, digits=digits)\n\n self.logger.info(\"P,R,F1 report as follows:\\n {}\".format(report))\n # only plot it at dev and test time, not during training.\n if self.gen_confusing_matrix:\n\n self.logger.info(\"Generate confusing matrix photo.\")\n # Compute confusion matrix\n conf_matrix = confusion_matrix(label_true, label_pred)\n np.set_printoptions(precision=2)\n\n # Plot non-normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d',\n title='Confusion matrix, without normalization')\n wo_norm_fig_path = os.path.join(self.result_dir, '{}_wo_norm.png'.format(self.result_prefix))\n plt.savefig(wo_norm_fig_path)\n\n # Plot normalized confusion matrix\n plt.figure()\n self.plot_confusion_matrix(conf_matrix, classes=self.brief_classes, ori_fmt='d', normalize=True,\n title='Normalized confusion matrix')\n\n norm_fig_path = os.path.join(self.result_dir, '{}_w_norm.png'.format(self.result_prefix))\n plt.savefig(norm_fig_path)\n\n else:\n self.logger.warn('invalid total_correct_in_all')\n\n return metrics", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = metrics.confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = [\"No\", \"Yes\"] # classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n plt.show()\n return ax", "def score_all(results):\n Y = np.concatenate([results['%dtest'%n] for n in range(10)])\n print score(np.concatenate([results['%dtrain'%n] for n in range(10)]))\n print score(np.concatenate([results['%dtest'%n] for n in range(10)]))\n class_counts = np.asarray([(Y[:,0]==n).sum() for n in range(10)])\n return confusion_matrix(Y[:,0],Y[:,1]), class_counts", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n fig, ax = plt.subplots( figsize=(12,8), dpi=120)\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return fig", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # print(\"Normalized confusion matrix\")\n # else:\n # print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.4f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n return ax", "def calculate_accuracy(ground_truth_object_list, pred_object_list):\n\n ground_truth_list = np.zeros((1,10))\n pred_list = np.zeros((1,10))\n \n for gt_index in range(len(ground_truth_object_list)):\n \n intClass_ground_truth = classMappingDict[ground_truth_object_list[gt_index]['name']]\n ground_truth_list[0][intClass_ground_truth] = ground_truth_list[0][intClass_ground_truth] + 1\n\n for pred_index in range(len(pred_object_list)):\n\n intClass_pred = classMappingDict[pred_object_list[pred_index]['name']]\n pred_list[0][intClass_pred] = pred_list[0][intClass_pred] + 1\n return ground_truth_list, pred_list", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n #classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=90, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n fig.set_size_inches(18.5, 10.5)\n return ax", "def confusionMetric( self, classTest, classPred):\n # accuracy of the model - in one number\n accuracy = average_precision_score( classTest, classPred )\n # confusion matrix 2x2 matric\n matConf = confusion_matrix(classTest, classPred)\n # cohen Kappa is applicable for unbalanced data\n matCohenKappa = cohen_kappa_score(classTest, classPred)\n # classification report\n strClassificationReport = classification_report(classTest, classPred)\n \n return accuracy, matConf, matCohenKappa, strClassificationReport" ]
[ "0.7215619", "0.71285826", "0.7120823", "0.70471257", "0.70212066", "0.6992223", "0.696946", "0.6948753", "0.6921117", "0.69178545", "0.6913209", "0.69081", "0.68527514", "0.6760738", "0.675917", "0.6750563", "0.6734669", "0.6700748", "0.6682267", "0.6672295", "0.66603005", "0.6654966", "0.6634522", "0.66302985", "0.66160774", "0.6586201", "0.6572117", "0.6544181", "0.65006495", "0.64981025", "0.6495841", "0.6484609", "0.64763254", "0.6475467", "0.6460735", "0.64428824", "0.6442835", "0.64290875", "0.64138556", "0.6404164", "0.63930726", "0.6390009", "0.63825744", "0.63764036", "0.6372682", "0.6326432", "0.6326039", "0.631658", "0.6305017", "0.6301179", "0.6276749", "0.62736183", "0.62695634", "0.6257546", "0.6255568", "0.62546253", "0.62440956", "0.62267345", "0.6221186", "0.61893773", "0.6179938", "0.6174613", "0.61653787", "0.6144645", "0.6144016", "0.613301", "0.612662", "0.6126056", "0.6125691", "0.6124694", "0.6123033", "0.61228126", "0.6111096", "0.610654", "0.60935485", "0.60904473", "0.60854584", "0.6084504", "0.60821337", "0.60788226", "0.60756946", "0.60722613", "0.607038", "0.60701936", "0.60699433", "0.60544044", "0.6052312", "0.6052312", "0.6052312", "0.6052312", "0.6052312", "0.6052312", "0.60506433", "0.6050642", "0.60492337", "0.6047339", "0.6046023", "0.6023467", "0.60211116", "0.60087997" ]
0.72815716
0
Returns argmax, max of dictionary
def argmax(d): return max(d.iteritems(), key=operator.itemgetter(1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def argMax(self):\n if len(self.keys()) == 0: return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def argMax(self):\n if len(list(self.keys())) == 0:\n return None\n all = list(self.items())\n values = [x[1] for x in all]\n maxIndex = values.index(max(values))\n return all[maxIndex][0]", "def argmax(table):\n return max((v,k) for k,v in table.iteritems())[1]", "def get_max_key(dico):\n our_max = 0\n argmax = None\n for key, val in dico.items():\n if val > our_max:\n argmax = key\n our_max = val\n return argmax", "def keywithmaxval(d):\n\treturn max(d, key=lambda k: d[k])", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def keywithmaxval(d): \n v=list(d.values())\n k=list(d.keys())\n return k[v.index(max(v))]", "def max_key (dict):\n output = -1\n for key, value in dict.items():\n output = max(output, key)\n return output", "def keywithmaxval(dictionary): # from https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary/12343826#12343826 \n\tv=list(dictionary.values())\n\tk=list(dictionary.keys())\n\treturn k[v.index(max(v))]", "def keywithmaxval(kwmv_dict):\n values = list(kwmv_dict.values())\n keys = list(kwmv_dict.keys())\n return keys[values.index(max(values))]", "def keywithmaxval(d): \r\n v=list(d.values())\r\n k=list(d.keys())\r\n return k[v.index(max(v))]", "def find_max_key_val_in_dict(in_dict):\n\tmax_key = None\n\tmax_val = -np.inf\n\tfor key,val in in_dict.iteritems():\n\t\tif val >= max_val:\n\t\t\tmax_val = val\n\t\t\tmax_key = key\n\treturn (max_key,max_val)", "def argmax(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmax\")\n return k, cast(pdarray, v)", "def argmax(fn,over):\n return max([(arg,fn(arg)) for arg in over],key=lambda v: v[1])[0]", "def max(*args, **kwargs):\n key = kwargs.get(\"key\", lambda x: x)\n args = args[0] if len(args) == 1 else args[:]\n max_value = \"\"\n for arg in args:\n if max_value == \"\":\n max_value = arg\n max_value = arg if key(arg) > key(max_value) else max_value\n return max_value", "def keymaxval (dictionary):\n values = list (dictionary.values())\n return list(dictionary.keys())[values.index(max(values))]", "def __argmax(l: list, key):\n max = float('-inf')\n max_i = -1\n for i in range(len(l)):\n if key(l[i]) > max:\n max = key(l[i])\n max_i = i\n return max_i", "def most_occured(dict):\n\n max = dict['e']\n max_alpha = 'e'\n\n for i, j in zip(dict.values(), dict.keys()):\n\n if max < i:\n max = i\n max_alpha = j\n \n return max_alpha", "def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}", "def extract_max_value(h: np.ndarray):\n return np.argmax(h, axis=1)", "def data_dict_max(data_dict, feature):\n name = max(filter(lambda k: isinstance(data_dict[k][feature],\n (int, float)), data_dict), key=lambda k: data_dict[k][feature])\n\n return name, data_dict[name][feature]", "def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )", "def key_of_max(d):\n keys = list(d.keys())\n keys.sort()\n return max(keys, key=lambda x: d[x])", "def max_map(freq_map):\n\n max_val = max(freq_map.values())\n return max_val", "def argmax(self, values):\n return self.aggregate(values, \"argmax\")", "def dict_max(dic):\n cnt = 0\n for i in dic:\n if dic[i] > cnt:\n cnt = dic[i]\n return cnt", "def test_perf_max():\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"keys_dict = {key: key for key in range(1000, -1000, -1)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import OrderedTreeDict;\"\n \"keys_tree = OrderedTreeDict((key, key) for key in range(1000, -1000, -1))\",\n number=1000\n )\n assert dict_time > tree_time, \"Max method is slow.\"\n assert dict_sort_time > tree_time, \"Max method is slow.\"", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def argmax(x):\n def op(a, b):\n comp = (a[1] > b[1])\n return comp.if_else(a[0], b[0]), comp.if_else(a[1], b[1])\n return tree_reduce(op, enumerate(x))[0]", "def max_move(d):\n v = list(d.values())\n k = list(d.keys())\n return k[v.index(max(v))]", "def best_score(a_dictionary):\n for key in a_dictionary:\n if key is None:\n return None\n else:\n max_val = max(a_dictionary)\n return max_val", "def maxLike(self):\n return max(self.d.values())", "def giveMaxDict(self, dicts):\n if len(dicts) == 0:\n return {}\n\n elif len(dicts) == 1:\n return dicts[0]\n\n result = deepcopy(dicts[0])\n for i in range(1, len(dicts)):\n for k, v in dicts[i].items():\n if k in result:\n result[k] = max(result[k], dicts[i][k])\n else:\n result[k] = v\n return result", "def get_key_with_max_value(dictionary):\n values = list(dictionary.values())\n keys = list(dictionary.keys())\n return keys[values.index(max(values))]", "def pmax(self):\n pmaxresult = {}\n for (commandnumber,proposal) in self.pvalues.keys():\n pmaxresult[commandnumber] = proposal\n return pmaxresult", "def test_perf_max(self):\n dict_time = timeit.timeit(\n \"max(keys_dict.keys())\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n dict_sort_time = timeit.timeit(\n \"sorted(keys_dict.keys())[-1]\",\n setup=\"from random import sample;\"\n \"keys_dict = {key: key for key in sample(range(-1000, 1000), 2000)}\",\n number=1000\n )\n tree_time = timeit.timeit(\n \"keys_tree.max()\",\n setup=\"from amp_trees import SplayDict;\"\n \"from random import sample;\"\n \"keys_tree = SplayDict((key, key) for key in sample(range(-1000, 1000), 2000))\",\n number=1000\n )\n self.assertGreater(dict_time, tree_time, \"Max method is slow.\")\n self.assertGreater(dict_sort_time, tree_time, \"Max method is slow.\")", "def arglexmax(keys, multi=False):\n # Handle keys in reverse order to be consistent with np.lexsort\n reverse_keys = keys[::-1]\n arr = reverse_keys[0]\n breakers = reverse_keys[1:]\n # Look for the maximum value in the first array, and continue using new\n # arrays until a unique maximum index is found.\n _cand_idxs = np.where(arr == arr.max())[0]\n if len(_cand_idxs) > 1:\n for breaker in breakers:\n vals = breaker[_cand_idxs]\n _cand_idxs = _cand_idxs[vals == vals.max()]\n if len(_cand_idxs) == 1:\n break\n # If multiple maximum values are found then either\n # return them all or return an arbitrary one.\n return _cand_idxs if multi else _cand_idxs[0]", "def argmax2(self, cvars=None, ctuple=None):\n if (cvars is None):\n return self.v.ind2sub(self.t.argmax())\n ax = tuple(map(lambda x:ctuple[cvars.index(x)] if x in cvars else slice(None) ,self.v))\n return self.v.ind2sub(self.t[ax].argmax())", "def argmax(sequence):\r\n\r\n import operator\r\n index, value = max(enumerate(sequence), key=operator.itemgetter(1))\r\n\r\n return index", "def max_(*args, **kwargs):\n ...", "def find_max_key_val(value):\n most_sold = max(value, key=value.get)\n keys_val = value.get(most_sold)\n print(f\"We can see the highest count is for {most_sold},\")\n print(f\"with a total of {keys_val} sale(s).\")\n return keys_val", "def argmax(values):\n\tvalues = np.array(values)\n\tmx = np.max(values)\n\tval = np.where(values==mx)[0]\n\treturn np.random.choice(val)", "def max_in_dict(dict_of_ints):\n list_of_vals =[]\n list_of_max_keys = []\n for i in dict_of_ints:\n list_of_vals.append(dict_of_ints[i])\n max_val= max(list_of_vals)\n for i in dict_of_ints:\n if dict_of_ints[i] == max_val:\n list_of_max_keys.append(i)\n if len(list_of_max_keys) == 1:\n return list_of_max_keys[0]\n else:\n return list_of_max_keys", "def mle(self):\n\n\t\tmax_key, max_value = None, 0\n\t\tfor key, value in self.items():\n\t\t\tif value > max_value:\n\t\t\t\tmax_key, max_value = key, value\n\n\t\treturn max_key", "def find_max_value(self, dictionary):\n max_value = max(dictionary.items(), key=lambda x: x[1])\n list_of_max_values = []\n for k, v in dictionary.items():\n if v == max_value[1]:\n list_of_max_values.append(k)\n return list_of_max_values", "def d_max(x, y):\n axis = np.argmax(x.shape)\n return np.max(np.array([x, y]), axis=axis)", "def max_employment(countries, employment):\n i = np.argmax(employment)\n max_value = employment[i]\n max_country = countries[i]\n\n return max_country, max_value", "def get_max(x, y, z):\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))", "def test_get_maximum():\n assert get_maximum({\"numbers\": [4, 3, 2, 1]}) == {\"maximum\": 4}", "def Max(data):\n return data.max()", "def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])", "def max_word_value(words):\n return max(words, key=calc_word_value)", "def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])", "def zmax(self):\n # Extract parameters\n pzs = self.params[0]\n return max([pz.zmax for pz in pzs])", "def _call_max(vecObj):\n res = vecObj.max()\n return res", "def argmax(a, *args, **kwargs):\n warn('The function argmax is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n if isinstance(a, np.ma.MaskedArray):\n return np.ma.argmax(a, *args, **kwargs)\n elif isinstance(a, np.ndarray):\n return np.argmax(a, *args, **kwargs)\n else:\n return _argmax(a)", "def max(self, include_zero=False):\n for key, value in reversed(self.items()):\n if value > 0 or include_zero:\n return key", "def argmax(module, x, axes=None):\n return module.argmax(x, axes)", "def argmax(X):\n\tN,K,_ = X.shape\n\tg0 = X[0,0]\n\tg = X[1:]\n\n\tB = ones((N,K), dtype=int32) * -1\n\t# compute max-marginals and backtrace matrix\n\tV = g0\n\tfor t in xrange(1,N):\n\t\tU = empty(K)\n\t\tfor y in xrange(K):\n\t\t\tw = V + g[t-1,:,y]\n\t\t\tB[t,y] = b = w.argmax()\n\t\t\tU[y] = w[b]\n\t\tV = U\n\t# extract the best path by brack-tracking\n\ty = V.argmax()\n\ttrace = []\n\tfor t in reversed(xrange(N)):\n\t\ttrace.append(y)\n\t\ty = B[t, y]\n\ttrace.reverse()\n\treturn trace", "def max(self, key=lambda _: _):\n return max(self, key=key)", "def structured_maximum(x, y):\r\n # see decorator for function body\r", "def max_discrete(func: Callable[[Tuple], np.ndarray], over: Iterable[Tuple],\\\n state: Tuple[Union[int, float]]) -> Tuple[float, Tuple[Union[int, float]], None]:\n vals = [func(np.asarray((*state, *action)).reshape(1, -1))[0, 0] for action in over]\n maximum = max(vals)\n return (maximum, over[vals.index(maximum)], None)", "def potential_max(self):\n\n return self._args.max", "def argmax(func, seq):\n def compare(a1, b1):\n if a1[0] > b1[0]:\n return a1\n return b1\n # using a generator expression here should save memory\n objs = ((func(val), val) for val in seq)\n return reduce(compare, objs)[1]", "def _get_maximum_from_heatmap(self, heatmap):\n assert heatmap.size(0) == 1 and heatmap.size(1) == 1\n max_map = torch.eq(heatmap, self.pool(heatmap)).float()\n heatmap = heatmap * max_map\n score = heatmap.view(-1)\n score, pos_idx = score.topk(self.max_num_people)\n mask = score > self.keypoint_threshold\n score = score[mask]\n pos_idx = pos_idx[mask]\n return pos_idx, score", "def max(x):\n pass", "def max():\n return KeeperOfMinOrMax(int.__lt__)", "def argmax(self, axis=None):\n return np.argmax(self.data, axis=axis)", "def find_matrix_max(matrix):\n\n max_val = 0.0\n max_i = 0\n max_j = 0\n\n for i in matrix.keys():\n try:\n kvp = max(matrix[i].iteritems(), key=itemgetter(1))\n except ValueError:\n continue\n \n # Maybe I should store the max value with the array, and then always \n # store the previous largest, and when i insert or delete...\n \n if kvp[1] > max_val:\n max_val = kvp[1]\n max_i = i\n max_j = kvp[0]\n\n return (max_i, max_j, max_val)", "def argmax(vec):\n _, idx = torch.max(vec, -1)\n return to_scalar(idx)", "def max_y_arg(self):\n return max((self(0).y,0), (self(1).y,1))[1]", "def argmax(tensor):\n raise NotImplementedError", "def _single_value_max(self, maps, threshold):\r\n max_vec = np.max(maps, axis=1)\r\n cmin = np.min(max_vec)\r\n cmax = np.max(max_vec)\r\n limit = cmax - (cmax - cmin) * threshold\r\n max_mask = max_vec > limit\r\n argmax = np.argmax(maps, axis=1)\r\n return (argmax + 1) * max_mask", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def max(self):\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits-1)) - 1)\n iinfo._max_vals[self.key] = val\n return val", "def max_apply(x): \n if len(x) == 1:\n return x[0]\n else:\n return x[1]", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def max(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max\")", "def _get_maxh(self, maxh=None, **kwargs):\n try:\n key = 'maxh_' + self.name\n maxh = kwargs[key]\n except KeyError:\n if maxh == None:\n raise ValueError(\n \"Please provide a valid value for 'maxh' (or maxh_... for each of the components of the mesh template).\")\n return maxh", "def get_parameters_max(self):\n maxValues = numpy.zeros(self.get_num_parameters())\n i = 0\n for p in self.parameters:\n maxValues[i] = p.get_max_value()\n i += 1\n return maxValues", "def get_max_index(a):\n return a.argmax()", "def get_max_index(a):\n return a.argmax()", "def get_max_index(a):\n return a.argmax()", "def detChar(char_dic):\n\t#get the key\n\tmax_char_key = max(char_dic, key=char_dic.get)\n\t#get the value\n\tmax_char_val = max(char_dic.values())\n\n\treturn (max_char_key, max_char_val)", "def max_value(policy_lookup, state, player):\n\taction_values = list(get_policy_actions(policy_lookup, state, player).values())\n\tif action_values:\n\t\treturn np.max(action_values)\n\treturn 0", "def longest_value_key(incoming_dict: dict):\n longest_value = max(incoming_dict.values(), key=len)\n\n for key, value in incoming_dict.items():\n if longest_value == value:\n longest_key = key\n print(longest_key)", "def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max", "def my_max(*args):\n def sorter(sequence):\n \"\"\"\n This function find max in given sequence of simple numbers\n \"\"\"\n def bubble_sort(a):\n \"\"\"\n This function sort the list\n \"\"\"\n for i in reversed(range(len(a))):\n for j in range(1, i + 1):\n if a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n return a\n\n listed_seq = list(sequence)\n for number in listed_seq:\n if not isinstance(number, int):\n raise ValueError(\"Can't find max, wrong data format\")\n return bubble_sort(listed_seq)[-1]\n\n if not args:\n raise ValueError(\"Can't find max, no data given\")\n if len(args) == 1:\n thing = args[0]\n if isinstance(thing, (list, tuple)):\n return sorter(thing)\n if isinstance(thing, int):\n return thing\n raise ValueError(\"Can't find max, wrong data format\")\n return sorter(args)", "def get_y_max(self):\n if len(self._statDict) == 0:\n return -1E10\n\n line_id_list = self._statDict.keys()\n max_y = self._statDict[line_id_list[0]][3]\n for i_plot in range(1, len(line_id_list)):\n if self._statDict[line_id_list[i_plot]][3] > max_y:\n max_y = self._statDict[line_id_list[i_plot]][3]\n\n return max_y", "def cmax(self):\n return self[\"cmax\"]", "def max_value(self, state, max_alpha, max_beta, max_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if max_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"-inf\")\r\n for a in state.actions():\r\n v = max(v, self.min_value(state.result(a), max_alpha, max_beta, max_depth - 1))\r\n if v >= max_beta:\r\n return v\r\n max_alpha = max(max_alpha, v)\r\n return v", "def max(scores):\n return __builtin__.max(scores) if len(scores) else 0", "def longest_value_key(incoming_dict):\n #return_value = max(incoming_dict, key=len)\n #return return_value\n if not incoming_dict:\n return None\n\n all_keys = incoming_dict.keys()\n if not all_keys:\n return None\n\n Key_with_longest_value = None\n for key in all_keys:\n if not Key_with_longest_value:\n Key_with_longest_value = key\n\n if len(incoming_dict[key]) > len(incoming_dict[Key_with_longest_value]):\n Key_with_longest_value = key\n return Key_with_longest_value", "def _argmax(a, positions, shape, dtype):\n\n result = numpy.empty((1,), dtype=dtype)\n\n pos_nd = numpy.unravel_index(positions[numpy.argmax(a)], shape)\n for i, pos_nd_i in enumerate(pos_nd):\n result[\"pos\"][0, i] = pos_nd_i\n\n return result[0]", "def max(self):\n return self._reduce_for_stat_function(F.max, only_numeric=False)", "def max_by(d: D, *, keyfunc: Callable[..., Hashable], **kwds: Any) -> NumDict:\n\n value = by(d, max, keyfunc, **kwds)\n _kwds = {\"keyfunc\": keyfunc}\n _kwds.update(kwds)\n record_call(max_by, value, (d,), _kwds)\n\n return value", "def get_max(bij, exploration, bij_bool):\n\n#\tbij[bij_bool] = -sys.maxint - 1\n\n\tm = bij.argmax()\n\tc = np.unravel_index(m, bij.shape)\n\t#c = np.unravel_index(bij.argmax(), bij.shape)\n\n############################## A MODIFIER EVENTUELLEMENT #################\n#\tb = bij[bij_bool]\n#\tm = b.argmax()\n#\tind = np.unravel_index(m, b.shape)\n#\tc = np.where(bij == b[ind])\n#\tc = (c[0][0], c[1][0])\n#\tprint('mMAXx', bij[c])\n\treturn (c)", "def get_tmax(data):\n return data[np.argmax(data[:, 1])][0]", "def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]" ]
[ "0.7893945", "0.7893012", "0.78599966", "0.7807235", "0.7397014", "0.7194823", "0.7194823", "0.7194823", "0.7170892", "0.7158785", "0.7147766", "0.71402246", "0.71295154", "0.71241313", "0.7121471", "0.7005879", "0.6961847", "0.6961493", "0.69519615", "0.68631655", "0.6839727", "0.6824366", "0.68026197", "0.67939425", "0.673112", "0.6730373", "0.6664163", "0.66009325", "0.657136", "0.657136", "0.6570348", "0.65219504", "0.64896065", "0.64676636", "0.6467285", "0.64418733", "0.64210933", "0.6413631", "0.63593143", "0.6343249", "0.62897843", "0.6267", "0.62241066", "0.62151676", "0.62150043", "0.6171135", "0.6146254", "0.61049604", "0.61046195", "0.61034775", "0.6092827", "0.60904247", "0.6077774", "0.6064668", "0.6064668", "0.6058539", "0.60520047", "0.60461104", "0.60425377", "0.60369354", "0.60311323", "0.6013205", "0.60119313", "0.5999801", "0.59890425", "0.59764254", "0.5967459", "0.59650093", "0.5962457", "0.5957236", "0.59476304", "0.59468335", "0.5937248", "0.59150577", "0.59054244", "0.59054244", "0.590243", "0.58985835", "0.58985835", "0.5894787", "0.5891277", "0.58903617", "0.58903617", "0.58903617", "0.58801717", "0.58768123", "0.58731437", "0.58617157", "0.5859799", "0.5858095", "0.5851843", "0.58366853", "0.58212876", "0.58124334", "0.5798603", "0.5797455", "0.57933086", "0.57915545", "0.57770604", "0.5772678" ]
0.83839536
0
Produce nboot bootstrap samples from applying func to data
def bootstrap(data,func,nboot): n = len(data) resamples = np.array([[random.choice(data) for i in range(n)] for j in range(nboot)]) return np.apply_along_axis(func, 1, resamples)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]", "def bootstrap_replicate_1d(data, func):\r\n bs_sample = np.random.choice(data, len(data))\r\n return func(bs_sample)", "def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n\n return func(bs_sample)", "def bootstrap_replicate_1d(data, func):\n bs_sample = np.random.choice(data, len(data))\n return func(bs_sample)", "def bootstrap_statistic(data, stats_fn, num_samples):\n return [stats_fn(bootstrap_sample(data)) for _ in range(num_samples)]", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]", "def dataset_augmentation(data_start, bootstrapping = 1, epurate = 1, shuffle = True):\n data = data_start\n for ii in range(bootstrapping):\n data = data.append(data_start.apply(bootstrap_sample, axis=1), ignore_index=True)\n\n#Bugged version that weirdly works well....\n# for ii in range(bootstrapping):\n # data = data.append(bootstrap_sample(data_start), ignore_index=True)\n\n for ii in range(epurate):\n data = data.append(data_start.apply(epurate_sample, axis=1), ignore_index=True)\n\n # Shuffling (Important)\n if shuffle == True:\n data = data.sample(frac=1)\n return data", "def bootstrap(X):\n return X[np.random.choice(list(range(X.shape[0])), size=X.shape[0]), :]", "def bootstrap(x, iter=int(1E6), return_samples=False):\n\n \n means = np.empty(iter) \n dfs = []\n for i in tqdm(range(iter), desc='Performing bootstrap sampling'):\n resamp = np.random.choice(x, size=len(x), replace=True)\n means[i] = resamp.mean()\n\n if return_samples:\n _df = pd.DataFrame([])\n _df['value'] = resamp\n _df['iter'] = i + 1\n dfs.append(_df)\n\n # Compute confidence intervals of the means.\n mean_val = means.mean()\n bounds_ci = {'99%': (0.5, 99.5), '95%': (2.5, 97.5), '90%': (5, 95),\n '75%': (12.5, 87.5), '50%': (25, 75), '25%': (37.5, 62.5),\n '10%': (45, 55), '5%': (47.5, 52.5), '1%': (49.5, 50.5)} \n cis = {} \n for k, v in bounds_ci.items():\n bounds = np.percentile(means, v)\n cis[k] = bounds\n\n statistics['original_data'] = x\n statistics['resampled_means'] = means\n statistics['mean_value'] = mean_val\n statistics['confidence_intervals'] = cis\n\n if return_samples:\n _df = pd.concat(dfs, sort=False)\n return [statistics, _df]\n else:\n return statistics", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def bootstrap(series, func=statistics.mean, confidence=0.9):\n n = len(series)\n n_bootstrap = 250\n digests = []\n for j in range(n_bootstrap):\n bootstrap_sample = [\n random.choice(series)\n for _ in range(n)\n ]\n digest = func(bootstrap_sample)\n digests.append(digest)\n digests.sort()\n low, mid, high = (1.0-confidence)/2.0, 0.5, (1.0+confidence)/2.0\n low, mid, high = int(low*n_bootstrap), int(mid*n_bootstrap), int(high*n_bootstrap)\n return digests[low], digests[mid], digests[high]", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n \r\n resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def bootstrap(data, num_samples, statistic, alpha):\n n = len(data)\n idx = npr.randint(0, n, (num_samples, n))\n samples = x[idx]\n stat = np.sort(statistic(samples, 1))\n return (stat[int((alpha/2.0)*num_samples)],\n stat[int((1-alpha/2.0)*num_samples)])", "def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)", "def genBootstrapData(fullData, dirName=\"bootstrap_data/\", ti=None, tf=None, n=1, blockLen=7):\n # If initial (final) time not given, apply block bootstrap to whole data set\n if ti == None:\n ti = 0\n if tf == None:\n tf = fullData.shape[0]\n\n # Reset seed\n np.random.seed()\n\n bsSets = []\n\n for i in range(0, n):\n bsSet = fullData.copy()\n\n # Loop over the sensors\n for sensor in range(fullData.shape[1]):\n # Loop over the blocks\n for tStart in range(ti, tf, blockLen):\n # Resample only the non-nan datapoints\n # TODO: is this a valid way of doing this???\n oldBlockNonNans = bsSet[tStart:tStart+blockLen, sensor].copy()\n oldBlockNonNans = oldBlockNonNans[np.isfinite(oldBlockNonNans)]\n\n for t in range(tStart, min(tStart + blockLen, fullData.shape[0])):\n if not np.isnan(bsSet[t, sensor]):\n bsSet[t, sensor] = np.random.choice(oldBlockNonNans, 1, replace=False)\n\n bsSets.append(bsSet)\n\n # Save the dataset\n np.savetxt(dirName + \"/blockLen=%i_%i.csv\"%(blockLen, i), bsSet, delimiter=\" \", fmt=\"%f\")\n\n return bsSets", "def bs_replicate(data, func=np.mean):\n bs_sample = np.random.choice(data, replace=True, size=len(data))\n return func(bs_sample)", "def compute_bootstrapped_sample(X_table, y_table):\n n = len(X_table)\n X_sample = []\n y_sample = []\n for _ in range(n):\n rand_index = random.randrange(0, n)\n X_sample.append(X_table[rand_index])\n y_sample.append(y_table[rand_index])\n return X_sample, y_sample", "def empirical_bootstrap(self, pop_data: np.ndarray, n = None, B = 1000, func=None):\n # store the estimates for each bootstrapped sample\n n = pop_data.shape[0] if n is None else n\n boot_est = [None] * B\n index = 0\n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n est = func(pop_data[idx], axis=0)\n boot_est[index] = est\n index += 1\n \n result = {}\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est)\n result['est_err'] = np.std(boot_est, ddof=1)\n \n return result", "def bootstrap(data, alpha=0.05, n_bootstrap = 2000, func=None, **func_args):\n\t\n\tassert data.ndim == 3, 'Data is not 3-dimensional. Function only works for 3-D data.' \n\t\n\t# Trials form the second dimension\n\tn_trials = data.shape[1]\n\t\n\t# generate randomised bootstrap resamples as random indices\n\tbootstrap_index = np.random.randint(0, n_trials, \n\t\t\t\t\t\t\t\t\t\t(n_trials, n_bootstrap) )\n\t\n\t# For each bin in the histogram, randomly samples from the results\n\t# of each trial and repeats, effectively, n_bootstrap times \n\ttrials_bootstrap = data[:, bootstrap_index, :]\n\t\n\t# dimension one is the trials, zero is the conditions; this averaging \n\t# goes across the trials creating a PSTH for each condition, and,\n\t# importantly, for each bootstrap resample\n\tavg_bootstrap = trials_bootstrap.mean(axis=1)\n\t\n\tif func:\n\t\tavg_bootstrap = func(avg_bootstrap, **func_args)\n\t\t\n\t# find percentile values for each bin along the bootstrap resamples,\n\t# which are on axis 1 \n\tCI_pos = np.percentile(avg_bootstrap, 100*(1 - (alpha/2.)), \n\t\t\t\t\t\t\t\taxis=1)\n\tCI_neg = np.percentile(avg_bootstrap, 100*(alpha/2.), \n\t\t\t\t\t\t\t\taxis=1)\n\n\n\treturn CI_pos, CI_neg", "def main():\n df = pd.read_csv('data/Boston.csv')\n n_obs = len(df)\n np.random.seed(111)\n\n # Part a\n medv_mean = np.mean(df['medv'])\n print('medv mean = {:.3f}'.format(medv_mean))\n\n # Part b\n medv_stan_err = statistics.stdev(df['medv']) / np.sqrt(n_obs)\n print('medv standard error = {:.5f}'.format(medv_stan_err))\n\n # Part c\n n_boot_iters = 10000\n medv_mean_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_mean_array[ii] = np.mean(df.loc[ind, 'medv'])\n\n medv_stan_err_boot = statistics.stdev(medv_mean_array)\n print('medv standard error (bootstrap) = {:.5f}'.format(medv_stan_err_boot))\n\n # Part d\n ci_95 = [medv_mean - 2 * medv_stan_err,\n medv_mean + 2 * medv_stan_err]\n ci_95_boot = [medv_mean - 2 * medv_stan_err_boot,\n medv_mean + 2 * medv_stan_err_boot]\n print('95% CI = [{:.3f}, {:.3f}]'.format(ci_95[0], ci_95[1]))\n print('95% CI (bootstrap) = [{:.3f}, {:.3f}]'.format(ci_95_boot[0], ci_95_boot[1]))\n\n # Part e\n medv_med = np.median(df['medv'])\n print('medv med = {:.3f}'.format(medv_med))\n\n # Part f\n medv_med_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_med_array[ii] = np.median(df.loc[ind, 'medv'])\n\n medv_med_stan_err_boot = statistics.stdev(medv_med_array)\n print('medv median standard error (bootstrap) = {:.5f}'.format(medv_med_stan_err_boot))\n\n # Part g\n medv_10 = np.percentile(df['medv'], 10)\n print('medv 10th percentile = {:.3f}'.format(medv_10))\n\n # Part f\n medv_10_array = np.zeros(n_boot_iters)\n for ii in range(n_boot_iters):\n ind = np.random.choice(n_obs, n_obs, replace=True)\n medv_10_array[ii] = np.percentile(df.loc[ind, 'medv'], 10)\n\n medv_10_stan_err_boot = statistics.stdev(medv_10_array)\n print('medv 10th percenile standard error (bootstrap) = {:.5f}'.format(medv_10_stan_err_boot))", "def get_bootstraps(self):\n col_range = range(self.response.shape[1])\n random_state = np.random.RandomState(seed=self.random_seed)\n return random_state.choice(col_range, size=(self.num_bootstraps, self.response.shape[1])).tolist()", "def bootstrapping_variance_estimation(data, iterations=100):\n bootstrapped_variance = []\n for i in tqdm(range(1)):\n data_at_index_i = [elem for elem in data]\n\n variance_estimation = []\n for _ in range(iterations):\n bootstrapped_data = []\n for _ in range(len(data_at_index_i)):\n bootstrapped_data.append(np.random.choice(data_at_index_i))\n variance_estimation.append(np.var(bootstrapped_data))\n\n bootstrapped_variance.append(np.mean(variance_estimation, axis=0))\n\n return bootstrapped_variance", "def bootstrap_resample(X, n=None):\r\n if n == None:\r\n n = len(X)\r\n\r\n resample_i = N.floor(N.random.rand(n)*len(X)).astype(int)\r\n X_resample = X[resample_i]\r\n return X_resample", "def bootstrap(df, nclass, if_new=False):\n ori_size = Counter(df.label)\n logger.info(f'class info before resampling: {ori_size.values()}')\n ori_size_list = list(ori_size.values())\n\n if if_new:\n df_new = pd.DataFrame(data=None, columns=df.columns)\n target_size = min(ori_size_list)\n else:\n target_size = max(ori_size_list)\n df_new = df.copy()\n\n for i in range(nclass):\n name = list(ori_size.keys())[i]\n name_index = np.array(df[df.label == name].index)\n if target_size < ori_size_list[i]:\n sample_size = target_size\n elif target_size > ori_size_list[i]:\n sample_size = target_size - ori_size_list[i]\n else:\n if if_new:\n sample_size = target_size\n else:\n sample_size = 0\n\n np.random.seed(i)\n boostrap_sample = np.random.randint(0, ori_size_list[i], sample_size)\n df_new = df_new.append(df.iloc[name_index[boostrap_sample]], ignore_index=True)\n logger.info(f'class info after resampling: {Counter(df_new.label).values()}')\n return df_new", "def bootstrap(data, iterations=10000):\n\n boot_mean = []\n\n for n in range(0, iterations):\n\n boot = resample(data, replace=True, n_samples=None,\n random_state=None)\n\n boot_mean.append(np.mean(boot))\n\n final_mean = np.mean(boot_mean)\n\n final_std = np.std(boot_mean, dtype=np.float64)\n\n return final_mean, final_std", "def calc_bootstrap(fcs,obs,ref,func, bootstrap_range, L, B):\n \n from sklearn.utils import resample\n \n idxs = np.arange(len(fcs))\n results = []\n \n random_state = 0\n for smp in range(B):\n block_sample = np.array([]).astype(int)\n while(len(block_sample) < len(fcs)):\n random_state += 1\n rolls = resample(idxs, n_samples=1, random_state=random_state)[0]\n block = np.roll(idxs, rolls)[0:L]\n block_sample = np.append(block_sample, block)\n\n block_sample = block_sample[0:len(idxs)]\n results.append(func(fcs[block_sample],obs[block_sample],ref[block_sample]))\n \n try:\n out = [ np.percentile(results, bootstrap_range[0]), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, bootstrap_range[1])]\n except:\n out = [ np.percentile(results, 2.5), \n func(fcs,obs,ref), #np.mean(results), \n np.percentile(results, 97.5)]\n\n # For indicating the statistical significance \n # of the lower boundary:\n if(out[0]>0): \n out.append('*')\n else:\n out.append('')\n \n return out", "def bootstrap_resample(labels):\n idxs = np.arange(len(labels))\n num_labels = max(labels) + 1\n bootstrap_idxs = np.zeros_like(idxs)\n ptr = 0\n for i in range(num_labels):\n strat = idxs[labels == i]\n bootstrap_idxs[ptr:ptr + len(strat)] = np.random.choice(strat, len(strat), replace=True)\n ptr += len(strat)\n return bootstrap_idxs", "def bootstrap(init_file, nbootstraps):\n check_presence_init(init_file)\n dict_ = read(init_file)\n\n # Process the information specified in the initialization file\n nbins, logit, bandwidth, gridsize, a, b = process_user_input(dict_)\n trim, rbandwidth, reestimate_p = process_default_input(dict_)\n\n # Suppress output\n show_output = False\n\n # Prepare empty array to store output values\n mte_boot = np.zeros([gridsize, nbootstraps])\n\n # Load the baseline data\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n\n counter = 0\n while counter < nbootstraps:\n boot_data = resample(data, replace=True, n_samples=len(data), random_state=None)\n\n # Process the inputs for the decision equation\n indicator, D, Z = process_choice_data(dict_, boot_data)\n\n # Estimate propensity score P(z)\n ps = estimate_treatment_propensity(D, Z, logit, show_output)\n\n if isinstance(ps, np.ndarray): # & (np.min(ps) <= 0.3) & (np.max(ps) >= 0.7):\n # Define common support and trim the data, if trim=True\n boot_data, ps = trim_support(\n dict_,\n boot_data,\n logit,\n ps,\n indicator,\n nbins,\n trim,\n reestimate_p,\n show_output,\n )\n\n # Estimate the observed and unobserved component of the MTE\n X, b1_b0, b0, mte_u = mte_components(\n dict_, boot_data, ps, rbandwidth, bandwidth, gridsize, a, b, show_output\n )\n\n # Calculate the MTE component that depends on X\n mte_x = np.dot(X, b1_b0).mean(axis=0)\n\n # Put the MTE together\n mte = mte_x + mte_u\n mte_boot[:, counter] = mte\n\n counter += 1\n\n else:\n continue\n\n return mte_boot", "def bootstrap_array(f_values, values, bootstraps=1000):\n x = np.array(values)\n x_bs = x[np.random.randint(x.size, size=(bootstraps, x.size))]\n return np.array(map(f_values, x_bs))", "def bootstrap_mean(x, n=100):\n out = []\n\n for i in range(n):\n idx = pd.Series(np.arange(len(x))).sample(frac=1.0, replace=True).values\n out.append(x[idx].mean(0))\n outm = np.stack(out)\n return outm.mean(0), outm.std(0)", "def bootstrap_sample_from_data(data, weights=None, seed=0):\n # Set up the random number generator\n RNG = np.random.default_rng(seed)\n N = data.shape[0]\n\n # Set up weights\n if weights is not None:\n cutoffs = np.cumsum(weights)\n else:\n cutoffs = np.linspace(0, 1, N)\n\n # Draw random indices\n indices = np.searchsorted(cutoffs, RNG.uniform(size=N))\n\n # Create a bootstrapped sample\n new_data = deepcopy(data[indices,])\n return new_data", "def eg_bootmu():\n\n a = []\n b = []\n\n for _ in range(100):\n a.append(utils.gaussian(10, 1))\n\n print(\"\", \"mu\", \"sd\", \"cliffs\", \"boot\", \"both\", sep=\"\\t\")\n print(\"\", \"--\", \"--\", \"------\", \"----\", \"----\", sep=\"\\t\")\n\n for mu in range(100, 111):\n b = []\n\n for _ in range(100):\n b.append(utils.gaussian(mu / 10, 1))\n\n cl = utils.cliffsDelta(a, b)\n bs = stats.bootstrap(a, b)\n\n print(\"\", mu / 10, 1, cl, bs, cl and bs, sep=\"\\t\")", "def bootstrap(init_file, nbootstraps, show_output=False):\n check_presence_init(init_file)\n dict_ = read(init_file)\n\n nbins = dict_[\"ESTIMATION\"][\"nbins\"]\n trim = dict_[\"ESTIMATION\"][\"trim_support\"]\n rbandwidth = dict_[\"ESTIMATION\"][\"rbandwidth\"]\n bandwidth = dict_[\"ESTIMATION\"][\"bandwidth\"]\n gridsize = dict_[\"ESTIMATION\"][\"gridsize\"]\n a = dict_[\"ESTIMATION\"][\"ps_range\"][0]\n b = dict_[\"ESTIMATION\"][\"ps_range\"][1]\n\n logit = dict_[\"ESTIMATION\"][\"logit\"]\n\n # Distribute initialization information.\n data = read_data(dict_[\"ESTIMATION\"][\"file\"])\n\n # Prepare empty arrays to store output values\n mte_boot = np.zeros([gridsize, nbootstraps])\n\n counter = 0\n while counter < nbootstraps:\n boot = resample(data, replace=True, n_samples=len(data), random_state=None)\n\n # Process data for the semiparametric estimation.\n indicator = dict_[\"ESTIMATION\"][\"indicator\"]\n D = boot[indicator].values\n Z = boot[dict_[\"CHOICE\"][\"order\"]]\n\n # The Local Instrumental Variables (LIV) approach\n\n # 1. Estimate propensity score P(z)\n ps = estimate_treatment_propensity(D, Z, logit, show_output)\n\n if isinstance(ps, np.ndarray): # & (np.min(ps) <= 0.3) & (np.max(ps) >= 0.7):\n\n # 2a. Find common support\n treated, untreated, common_support = define_common_support(\n ps, indicator, boot, nbins, show_output\n )\n\n # 2b. Trim the data\n if trim is True:\n boot, ps = trim_data(ps, common_support, boot)\n\n # 3. Double Residual Regression\n # Sort data by ps\n boot = boot.sort_values(by=\"ps\", ascending=True)\n ps = np.sort(ps)\n\n X = boot[dict_[\"TREATED\"][\"order\"]]\n Xp = construct_Xp(X, ps)\n Y = boot[[dict_[\"ESTIMATION\"][\"dependent\"]]]\n\n b0, b1_b0 = double_residual_reg(ps, X, Xp, Y, rbandwidth, show_output)\n\n # Turn the X, Xp, and Y DataFrames into np.ndarrays\n X_arr = np.array(X)\n Xp_arr = np.array(Xp)\n Y_arr = np.array(Y).ravel()\n\n # 4. Compute the unobserved part of Y\n Y_tilde = Y_arr - np.dot(X_arr, b0) - np.dot(Xp_arr, b1_b0)\n\n # 5. Estimate mte_u, the unobserved component of the MTE,\n # through a locally quadratic regression\n quantiles, mte_u = locpoly(ps, Y_tilde, 1, 2, bandwidth, gridsize, a, b)\n\n # 6. construct MTE\n # Calculate the MTE component that depends on X\n mte_x = np.dot(X, b1_b0).mean(axis=0)\n\n # Put the MTE together\n mte = mte_x + mte_u\n\n mte_boot[:, counter] = mte\n\n counter += 1\n\n else:\n continue\n\n return mte_boot", "def compute_disturbances_bootstrap_rawdata(n_bits, data_ref, data_test, num_bootstrap_samples=20,\n max_weight=4, solver=\"SCS\", verbosity=1, seed=0,\n return_resampled_data=False, add_one_to_data=True):\n #p_ml = _np.array(data_ref) / _np.sum(data_ref)\n #q_ml = _np.array(data_test) / _np.sum(data_test)\n\n if verbosity > 0:\n print(\"Computing base disturbances\")\n dist_by_weight_ml = compute_disturbances_with_confidence(\n n_bits, data_ref, data_test, None, max_weight, solver=solver, verbosity=verbosity - 1)\n\n dist_by_weight = _np.zeros((max_weight, num_bootstrap_samples), 'd')\n resampled_data = []\n\n bootstrap_data_ref = data_ref + _np.ones(len(data_ref), dtype='int')\n bootstrap_data_test = data_test + _np.ones(len(data_test), dtype='int')\n\n for i in range(num_bootstrap_samples):\n if verbosity > 0:\n print(\"Analyzing bootstrap sample %d of %d...\" % (i + 1, num_bootstrap_samples), end='')\n _sys.stdout.flush(); tStart = _time.time()\n redata_ref = resample_data(bootstrap_data_ref, seed=seed + i)\n redata_test = resample_data(bootstrap_data_test, seed=seed + num_bootstrap_samples + i)\n if return_resampled_data:\n resampled_data.append((redata_ref, redata_test))\n\n try:\n disturbances = compute_disturbances_with_confidence(\n n_bits, redata_ref, redata_test, None, max_weight, solver=solver, verbosity=verbosity - 2)\n except Exception:\n try:\n if verbosity > 0: print(\"\\nFalling back on ECOS\")\n disturbances = compute_disturbances_with_confidence(\n n_bits, redata_ref, redata_test, None, max_weight, solver=\"ECOS\", verbosity=verbosity - 2)\n except Exception:\n if verbosity > 0: print(\"\\nFailed using %s and ECOS - reporting nans\" % solver)\n for w in range(max_weight):\n dist_by_weight[w, i] = _np.nan\n\n for w in range(max_weight):\n dist_by_weight[w, i] = disturbances[w][0]\n\n if verbosity > 0:\n print(\" (%.1fs)\" % (_time.time() - tStart))\n\n dist_ml = _np.array([dist_by_weight_ml[w][0] for w in range(max_weight)], 'd')\n\n if return_resampled_data:\n return dist_ml, dist_by_weight, resampled_data\n else:\n return dist_ml, dist_by_weight", "def _get_bootstrap_sample(x, y, num_reps):\r\n combined = array(list(x) + list(y))\r\n total_obs = len(combined)\r\n num_x = len(x)\r\n for i in range(num_reps):\r\n # sampling with replacement\r\n indices = randint(0, total_obs, total_obs)\r\n sampled = combined.take(indices)\r\n # split into the two populations\r\n sampled_x = sampled[:num_x]\r\n sampled_y = sampled[num_x:]\r\n yield sampled_x, sampled_y", "def bootstrap_group(nsubj, ngroups):\n groupsize = nsubj\n samples = [(groupsize * np.random.rand(groupsize)).astype(np.int_)\n for i in range(ngroups)]\n return samples", "def bootstrap_sample_binomial(k, n, B):\n\n vec = np.zeros(n) # Original sample created as a vector\n vec[0:k] = 1 # 1 == success\n y = np.zeros(B) # Bootstrap statistics of # success\n \n for k in range(B):\n y[k] = np.sum(np.random.choice(vec,n)) # default is with replacement\n\n return y", "def bootstrapping(datasample):\r\n \r\n datasample=df_to_array(datasample)\r\n \r\n boots_indexs=np.random.randint(len(datasample),size=(1,len(datasample)))\r\n \r\n whole_indexs=list(range(len(datasample)))\r\n \r\n missing_indexs=np.array(list(set(whole_indexs).difference(set(list(boots_indexs[0])))))\r\n \r\n \r\n return boots_indexs,missing_indexs", "def bootstrap(fit_func, xdata, ydata, iterations=100):\n opt_parameters, _ = curve_fit(fit_func, xdata, ydata)\n # NOTE: Do not use [[]]*len(...) since this creates three references to the same []\n samples = [[] for i in range(len(opt_parameters))]\n\n new_xdata = [0 for i in range(len(xdata))]\n new_ydata = [0 for i in range(len(ydata))]\n\n for _ in range(iterations):\n # Resample\n for i in range(len(ydata)):\n index_choice = random.randrange(0, len(xdata))\n new_xdata[i] = xdata[index_choice]\n new_ydata[i] = ydata[index_choice]\n\n # Curve fit and store the samples\n sampled_pars, _spcov = curve_fit(fit_func, new_xdata, new_ydata)\n\n for i, sp in enumerate(sampled_pars):\n samples[i].append(sp)\n\n return {np.average(sample): np.std(sample) for sample in samples}", "def bootstrapData(filename, nrounds, nsamples=None, genecols=2):\n if nsamples == None:\n nsamples = countSamples(filename)\n outfiles = []\n\n for i in range(nrounds):\n columns = samplingWithReplacement(nsamples)\n (name, ext) = os.path.splitext(filename)\n outfilename = name + \"-{}\".format(i) + ext\n outfiles.append(outfilename)\n\n sys.stderr.write(\"Writing {}\\n\".format(outfilename))\n with open(outfilename, \"w\") as o:\n with open(filename, \"r\") as f:\n for line in f:\n parsed = line.rstrip(\"\\n\").split(\"\\t\")\n new = parsed[0:genecols]\n for c in columns:\n new.append(parsed[c+genecols])\n o.write(\"\\t\".join(new) + \"\\n\")\n \n return outfiles", "def bootstrap(a, iterations, func=identity, func_axis=None, dtype=None):\n # Calculate the number of measurements\n n = __number_measurements(a, func_axis)\n # Evaluate the function on the bootstrap means\n bootstrap_values = [func(*(__array_mean_indices(a, numpy.random.randint(0, high=n, size=n), func_axis=func_axis, dtype=dtype))) for i in range(iterations)]\n\n # Return the average value and the error of this averaged value\n return numpy.mean(bootstrap_values), math.sqrt(float(iterations)/float(iterations - 1))*numpy.std(bootstrap_values)", "def generate_samples(self, n_samples=100):\n \t\t\n\t\t#how many times should ancestral sampling be run\n\t\t#n_samples\n prior_samples=[]\n for i in range(0,n_samples):\n prior_sample = self.prior.get_samples(\n n_latent_nodes=self.n_latent_nodes,\n n_gibbs_sampling_steps=100, \n sampling_mode=\"gibbs_ancestral\")\n prior_sample = torch.cat(prior_sample)\n prior_samples.append(prior_sample)\n prior_samples=torch.stack(prior_samples)\n # prior_samples = tf.slice(prior_samples, [0, 0], [num_samples, -1])\n output_activations = self.decoder.decode(prior_samples)\n output_activations = output_activations+self._train_bias\n output_distribution = Bernoulli(logit=output_activations)\n output=torch.sigmoid(output_distribution.logits)\n # output_activations[0] = output_activations[0] + self.train_bias\n # output_dist = FactorialBernoulliUtil(output_activations)\n # output_samples = tf.nn.sigmoid(output_dist.logit_mu)\n # print(\"--- \",\"end VAE::generate_samples()\")\n return output", "def bootstrap_resample(self, X, n=None):\n if n == None:\n n = len(X)\n\n resample_i = np.floor(np.random.rand(n)*len(X)).astype(int)\n X_resample = X.iloc[resample_i, :]\n return X_resample", "def bootstrap(items, choices, repeats):\n for i in range(repeats):\n yield sample(items, choices, replace=True)", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def samples(self):\n pass", "def sample(df: pd.DataFrame, cat: str, n_samples: int) -> np.array:\n # get optimal block size\n b_star = optimal_block_length(df[cat].values)\n b_star = math.ceil(b_star[0].b_star_cb)\n\n # (n_samples, n_observations)\n samples = tapered_block_bootstrap(df[cat].values,\n block_length=b_star,\n replications=n_samples)\n\n return samples", "def generate_dataset():\n num_list = 10\n return [generate_list() for _ in range(num_list)]", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()", "def sampling(X_train, y_train, X_test, y_test, sampling_instances, model_instances, func):\n\n metrics = []\n # go through all sampling methods\n for sampling_instance in sampling_instances:\n if sampling_instance is not None:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = sampling_instance.fit_resample(X=X_train, y=y_train)\n else:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = X_train, y_train\n\n # Go through all models\n for model_instance in model_instances:\n print('fitting model ' + str(model_instances.index(model_instance) + 1) + ' on ' +\n str(len(model_instances)), \" : \", type(model_instance).__name__)\n model_instance.fit(X_train1, y_train1)\n metrics.append(func(y_test, model_instance.predict(X_test)))\n\n models = [type(model).__name__ for model in model_instances]\n methods = [type(sampling).__name__ for sampling in sampling_instances]\n index = [model + '_' + method for model in models for method in methods]\n\n #Dry run of compute metrics with return_index=True to get indexes\n columns = func(y_test, y_test, average='weighted', return_index=True)\n metrics = pd.DataFrame(metrics, columns=columns, index=index)\n\n return metrics", "def bootstrap(h0, h1, f, B=10000):\n t_star = np.zeros(B)\n m = len(h0)\n n = len(h1)\n all_h = np.concatenate([h0, h1])\n for b_id in range(B):\n b_data = np.random.choice(all_h, size=m + n, replace=True)\n t_star[b_id] = f(b_data[:m], b_data[m:])\n return t_star", "def eg_pre():\n\n print(\"\\teg3\")\n\n d = 1\n\n for _ in range(10):\n t1 = []\n t2 = []\n\n for _ in range(32):\n t1.append(utils.gaussian(10, 1))\n t2.append(utils.gaussian(d * 10, 1))\n\n print(\"\", \"\", d, d < 1.1, stats.bootstrap(\n t1, t2), stats.bootstrap(t1, t1), sep=\"\\t\")\n\n d = round(d + .05, 2)", "def bootstrap(self, X_list, n_sampling):\n if len(X_list) < 2:\n raise ValueError('X_list must be a list containing at least two items')\n\n n_features = check_array(X_list[0]).shape[1]\n X_list_ = []\n for X in X_list:\n X_ = check_array(X)\n if X_.shape[1] != n_features:\n raise ValueError('X_list must be a list with the same number of features')\n X_list_.append(X_)\n X_list = np.array(X_list_)\n\n if isinstance(n_sampling, (numbers.Integral, np.integer)):\n if not 0 < n_sampling:\n raise ValueError('n_sampling must be an integer greater than 0.')\n else:\n raise ValueError('n_sampling must be an integer greater than 0.')\n\n # Bootstrapping\n adjacency_matrices_list = [[] for _ in range(X_list.shape[0])]\n for _ in range(n_sampling):\n resampled_X_list = [resample(X) for X in X_list]\n model = self.fit(resampled_X_list)\n for i, am in enumerate(model.adjacency_matrices_):\n adjacency_matrices_list[i].append(am)\n\n result_list = []\n for adjacency_matrices in adjacency_matrices_list:\n result_list.append(BootstrapResult(adjacency_matrices))\n\n return result_list", "def boot_matrix(z, B):\n\n n = len(z) # sample size\n idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples\n return z[idz]", "def bootstrap_two_sample_diff_in_proportions(p_group_1_count, p_group_1_sample_size, p_group_2_count, p_group_2_sample_size, p_size=1, p_alpha=.05):\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n original_group_1_proportion = p_group_1_count / p_group_1_sample_size \n original_group_2_proportion = p_group_2_count / p_group_2_sample_size \n \n original_proportion_diff = original_group_1_proportion - original_group_2_proportion\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n # Create an array of zeros and ones with the same proportions as group_1\n\n arr_1 = np.array([0] * (p_group_1_sample_size - p_group_1_count) + [1] * p_group_1_count)\n\n np.random.shuffle(arr_1)\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n # Create an array of zeros and ones with the same proportions as group_2\n\n arr_2 = np.array([0] * (p_group_2_sample_size - p_group_2_count) + [1] * p_group_2_count)\n\n np.random.shuffle(arr_2)\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Initialize array of replicates: \n bs_replicates = np.empty(p_size)\n\n # Generate replicates\n for i in range(p_size):\n\n # Generate bootstrap sample for arr_1 and arr_2 \n bs_sample_arr_1 = np.random.choice(arr_1, len(arr_1))\n bs_sample_arr_2 = np.random.choice(arr_2, len(arr_2))\n \n # Get the proportions on the bootstrap samples \n bs_sample_arr_1_proportion = np.sum(bs_sample_arr_1) / len(bs_sample_arr_1)\n bs_sample_arr_2_proportion = np.sum(bs_sample_arr_2) / len(bs_sample_arr_2)\n\n # Add the diff of those proportions to the bs_replicates array:\n bs_replicates[i] = bs_sample_arr_1_proportion - bs_sample_arr_2_proportion\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get the bootstrap mean and standard error\n bs_mean = np.mean(bs_replicates)\n bs_std = np.std(bs_replicates)\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Get the confidence interval\n lower_critical_value, upper_critical_value = get_two_tailed_critical_values(p_alpha = p_alpha)\n\n ci_lower, ci_upper = np.percentile(bs_replicates, [lower_critical_value*100, upper_critical_value*100])\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n Result = namedtuple(\n 'Result', [\n 'original_group_1_proportion', \n 'original_group_2_proportion', \n 'original_proportion_diff', \n 'bs_mean', \n 'bs_std', \n 'ci_lower',\n 'ci_upper',\n 'bs_replicates'\n ]\n )\n\n result = Result(\n original_group_1_proportion,\n original_group_2_proportion,\n original_proportion_diff,\n bs_mean,\n bs_std,\n ci_lower,\n ci_upper,\n bs_replicates\n )\n\n return result\n # -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~", "def residual_bootstrap(self, X: np.ndarray, y: np.ndarray, n=None, B=1000, model=None):\n # fit the model if it hasn't been run\n if model.run is False:\n model.fit(X, y);\n resid = model.residuals\n pred = model.predictions\n boot_est = [None] * B\n result = {} # to store the mean, std_err\n index = 0 \n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n boot_yi = pred + resid[idx]\n model.fit(X, boot_yi)\n boot_est[index] = tuple(model.theta)\n index += 1\n \n #self.boot_est['std_err'] = np.std(statistic, ddof=1, axis=0)\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est, axis=0)\n result['est_err'] = np.std(boot_est, ddof=1, axis=0)\n return result", "def bootstrap(m, axis=0, n=None, replace=True, return_holdout=False):\n if n is None:\n n = m.shape[axis]\n idx = np.random.choice(m.shape[axis], n, replace=replace)\n bootstrap = np.take(m, idx, axis=axis)\n if return_holdout:\n ran = np.arange(m.shape[axis])\n holdout_idx = ran[np.array([x not in idx for x in ran])]\n holdout = np.take(m, holdout_idx, axis=axis)\n return bootstrap, holdout\n else:\n return bootstrap", "def draw_bootstrap_pairs(x, y, func, size=1):\n\n # Set up array of indices to sample from: inds\n inds = np.arange(len(x))\n\n # Initialize replicates: bs_replicates\n bs_replicates = np.empty(size)\n\n # Generate replicates\n for i in range(size):\n bs_inds = np.random.choice(inds, len(inds))\n bs_x, bs_y = x[bs_inds], y[bs_inds]\n bs_replicates[i] = func(bs_x, bs_y)\n\n return bs_replicates", "def run_f(df, sample_number):\n samples = normal_custom(df.get(Model.MEAN_KEY), df.get(Model.STD_KEY), n_sample=sample_number) # Normal_custom imported from helper_func\n return samples", "def bootstrap_ci(data: np.ndarray,\n stat_fcn,\n num_reps: int,\n alpha: float,\n ci_sides: int,\n bias_correction: bool = False,\n studentized: bool = False,\n seed: int = None):\n assert isinstance(data, np.ndarray)\n assert isinstance(num_reps, int) and num_reps > 0\n assert ci_sides == 1 or ci_sides == 2\n\n # Compute the statistic of interest based on the empirical distribution (input data)\n stat_emp = stat_fcn(data)\n\n # Get the bootstrap replications\n if data.ndim == 1:\n # Size of the samples drawn by the bootstrap method have to be equal input sample, since the variance of the\n # statistic to be computed depends on sample size\n size_sample = data.shape[0]\n # Set the seed if provided\n if seed is not None:\n np.random.seed(seed)\n # Draw samples of data with replacement (uniform weights)\n data_bs = np.random.choice(data, (size_sample, num_reps), replace=True)\n else:\n # Only use this function for 1D data sets\n raise NotImplementedError\n\n # Compute the statistic of interest based on the resampled distribution\n # Do it along each row (axis=0) -->> bootstrap replications\n stat_bs = np.apply_along_axis(stat_fcn, 0, data_bs) # dim = 1 x num_reps\n\n # Correct for the bias introduced by bootstrapping\n # Note: other estimates of the bias-correction factor than stat_emt possible, see [4]\n if bias_correction:\n # bias-corrected statistic (see (2) in [2], or (11.10) in [3])\n stat_bs_bc = 2*stat_emp - np.mean(stat_bs) # same as bias = mean_repl - stat_emp; repl_bc = stat_emp - bias\n stat_ret = stat_bs_bc # use the correction also for the bs replications? -->> No (so far)\n # Note: bias-correction can be dangerous in practice. Even though T_bc(D) is less biased than T(D),\n # the bias-corrected estimator may have substantially larger variance. This is due to a possibly higher\n # variability in the estimate of the bias, particularly when computed from small data sets.\n else:\n # Return the estimator based on the original sample a.k.a. empirical distribution\n stat_ret = stat_emp\n\n # Compute the deviation to the value of the statistic based on the empirical distribution (see [7])\n # This is analogous to the deviation of the empirical value around the true population value\n # i.e. delta = stat_emp - stat_pop\n # Note: it makes no difference if one uses the percentile operator before or after this difference\n delta_bs = stat_bs - stat_emp # dim = 1 x num_reps\n\n # Confidence interval with asymptotic refinement (a.k.a. percentile-t method)\n if studentized:\n # Compute the standard deviation of the original sample\n se_emp = np.std(data, ddof=0)/np.sqrt(data.shape[0]) # for dividing by (n-1) set ddof=1\n if se_emp < 1e-9:\n warn('Standard deviation in the empirical data (se_emp) is below 1e-9.', UserWarning)\n\n # Compute the standard error of the replications for the bootstrapped t-statistic\n se_bs = np.std(stat_bs, ddof=0)/np.sqrt(data_bs.shape[0]) # dim = num_reps x 1\n if se_bs < 1e-9: # use any for version 2 above\n warn('Standard deviation in the bootstrapped data (se_bs) is below 1e-9. '\n 'Setting confidence interval bounds to infinity.', UserWarning)\n return stat_ret, [-np.infty, np.infty]\n\n # Compute the t-statistic of the replications\n t_bs = delta_bs/se_bs # is consistent with [3, p. 360]\n\n if ci_sides == 2: # Two-sided confidence interval\n t_bs.sort()\n t_lo, t_up = np.percentile(t_bs, [100*alpha/2., 100 - 100*alpha/2.])\n ci_lo = stat_emp - t_up*se_emp # see [3, (11.6) p. 364]\n ci_up = stat_emp - t_lo*se_emp # see [3, (11.6) p. 364]\n\n elif ci_sides == 1: # One-sided confidence interval (upper bound)\n t_bs.sort()\n t_lo = np.percentile(t_bs, 100*alpha)\n ci_lo = -np.inf\n ci_up = stat_emp - t_lo*se_emp # see [3, (11.6) p. 364]\n\n else:\n raise pyrado.ValueErr(given=ci_sides, eq_constraint=\"1 or 2\")\n\n # Confidence interval without asymptotic refinement (a.k.a. basic method)\n else:\n if ci_sides == 2: # Two-sided confidence interval\n delta_bs.sort()\n delta_lo, delta_up = np.percentile(delta_bs, [100*alpha/2., 100 - 100*alpha/2.])\n ci_lo = stat_emp - delta_up\n ci_up = stat_emp - delta_lo\n\n elif ci_sides == 1: # One-sided confidence interval (upper bound)\n delta_bs.sort()\n delta_lo = np.percentile(delta_bs, 100*alpha)\n ci_lo = -np.inf\n ci_up = stat_emp - delta_lo\n\n else:\n raise pyrado.ValueErr(given=ci_sides, eq_constraint=\"1 or 2\")\n\n return stat_ret, [ci_lo, ci_up]", "def apply():\r\n result = dataSampling(str, \"hhhhhhahhhhhahhahahahahhahahha\", 5)\r\n final_res = dataScreening(result, \"ha\")\r\n print(final_res)", "def generate_data(sample_size, noise_variance):\n \n # generate true beta\n A = np.array([[1]*15, [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]]).T\n B = np.array([[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0], [1]*15]).T\n x_shape = A.shape[0]\n y_shape = B.shape[0]\n \n X_train = np.random.randn(sample_size, x_shape, y_shape) \n X_train_vec = np.reshape(X_train, (sample_size, x_shape*y_shape))\n \n cross_beta = A @ B.T\n vec_cross_beta = np.reshape(cross_beta, (x_shape*y_shape, 1))\n cross_norm = np.linalg.norm(cross_beta, 'fro')\n cross_beta = cross_beta / cross_norm\n Y_soft = np.zeros((sample_size, 1))\n \n for i in range(sample_size):\n epsilon = noise_variance * np.random.randn(1, 1)\n x_i = X_train_vec[i, :]\n y_i = (x_i @ vec_cross_beta) + epsilon\n Y_soft[i, :] = y_i\n \n Y_hard = np.sign(Y_soft)\n \n return cross_beta, X_train, Y_hard, Y_soft", "def bootstrap_mean(x, B=10000, alpha=0.05, plot=False):\n\n # Deterministic things\n n = len(x) # sample size\n orig = x.mean() # sample mean\n se_mean = x.std()/np.sqrt(n) # standard error of the mean\n qt = stats.t.ppf(q=1 - alpha/2, df=n - 1) # Student quantile\n\n # Generate boostrap distribution of sample mean\n xboot = boot_matrix(x, B=B)\n sampling_distribution = xboot.mean(axis=1)\n\n # Standard error and sample quantiles\n se_mean_boot = sampling_distribution.std()\n quantile_boot = np.percentile(sampling_distribution, q=(100*alpha/2, 100*(1-alpha/2)))\n\n # # RESULTS\n # print(\"Estimated mean:\", orig)\n # print(\"Classic standard error:\", se_mean)\n # print(\"Classic student c.i.:\", orig + np.array([-qt, qt])*se_mean)\n # print(\"\\nBootstrap results:\")\n # print(\"Standard error:\", se_mean_boot)\n # print(\"t-type c.i.:\", orig + np.array([-qt, qt])*se_mean_boot)\n # print(\"Percentile c.i.:\", quantile_boot)\n # print(\"Basic c.i.:\", 2*orig - quantile_boot[::-1])\n\n if plot:\n plt.hist(sampling_distribution, bins=\"fd\")\n # return sampling_distribution\n return np.round(orig, decimals=2), np.round(quantile_boot, decimals=2)", "def generate_samples(self, no=10):\n observations = []\n state_sequence = []\n initial_state = np.random.choice(\n self.latent_variable_markov_chain.states,\n p=self.prior_probabilities)\n state_sequence.append(initial_state)\n observations.append(self.observation_from_state(initial_state))\n current_state = initial_state\n for i in range(2, no):\n next_state = self.latent_variable_markov_chain.next_state(current_state)\n state_sequence.append(next_state)\n observations.append(self.observation_from_state(next_state))\n current_state = next_state\n return observations, state_sequence", "def boot_matrix(z, B):\n z = np.array(z).flatten()\n n = len(z) # sample size\n idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples\n return z[idz]", "def _make_boot_index(elements, niter):\n return numpy.random.randint(low=0, high=elements, size=(niter, elements))", "def generate_data(data, samples, targeted=True, start=0, inception=False):\n inputs = []\n targets_1hot = []\n i = 0\n samples_sofar = 0\n while samples_sofar < samples:\n i += 1\n if torch.argmax(model(torch.tensor(data.test_data[start+i:start+i+1]+0.5, device=\"cuda\", dtype=torch.float32).permute(0, 3, 1, 2))) != np.argmax(data.test_labels_1hot[start+i]):\n continue\n\n if targeted:\n if inception:\n seq = random.sample(range(1, 1001), 10)\n else:\n seq = range(data.test_labels_1hot.shape[1])\n\n # print ('image label:', torch.argmax(data.test_labels[start+i]))\n for j in seq:\n # skip the original image label\n if (j == torch.argmax(data.test_labels_1hot[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets_1hot.append(\n torch.eye(data.test_labels_1hot.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets_1hot.append(data.test_labels_1hot[start+i])\n\n samples_sofar += 1\n\n inputs = torch.tensor(inputs).permute(0, 3, 1, 2)\n targets_1hot = torch.tensor(targets_1hot)\n\n return inputs, targets_1hot", "def fit_preprocessing_fn_numpy(batch: np.ndarray):\n x = preprocessing_fn_numpy(batch)\n x = np.stack([x_i[np.random.randint(x_i.shape[0])] for x_i in x])\n return x", "def test_ks_boot(self):\n D, Pval = ks_boot(self.x1[:10], self.x2[:10], num_reps=10)", "def test_ks_boot(self):\n D, Pval = ks_boot(self.x1[:10], self.x2[:10], num_reps=10)", "def generate_data(data, samples, targeted=True, start=0, inception=True):\n \n assert (targeted==True and start==0 and inception==True)\n \n \n inputs = []\n targets = []\n \n '''\n for i in range(samples):\n if targeted:\n if inception:\n seq = random.sample(range(1,1001), 10)\n else:\n seq = range(data.test_labels.shape[1])\n\n for j in seq:\n if (j == np.argmax(data.test_labels[start+i])) and (inception == False):\n continue\n inputs.append(data.test_data[start+i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n else:\n inputs.append(data.test_data[start+i])\n targets.append(data.test_labels[start+i])\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n '''\n\n return inputs, targets", "def draw_bs_reps(data, func, reps=1):\n bs_replicates = np.empty(reps)\n for i in range(reps):\n bs = np.random.choice(data, replace=True, size=len(data))\n bs_replicates[i] = np.mean(bs)\n return func(bs_replicates)", "def data_feeder_2():\n return random.sample(range(100), 10)", "def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )", "def regression_bootstrap(self, X: np.ndarray, y: np.ndarray, n=None, B=1000, model=None):\n boot_est = [None] * B\n result = {}\n if model.run is False:\n model.fit(X, y);\n thetas = model.theta\n index = 0\n for _ in range(B):\n idx = np.random.randint(low=0, high=n, size=n)\n model.fit(X[idx], y[idx]);\n boot_est[index] = tuple(model.theta)\n index += 1\n\n result = {}\n result['estimates'] = boot_est\n result['est_mean'] = np.mean(boot_est, axis=0)\n result['est_err'] = np.std(boot_est, ddof=1, axis=0)", "def gen_data(low, high, n_samples, scale=4, test_size=0.2, random_state=3):\n np.random.seed(15)\n X = np.random.uniform(low, high, size=n_samples)\n\n # generate the response from the ground truth function and add\n # some random noise to it, scale controls the variance of the noise.\n y = ground_truth(X) + np.random.normal(scale=scale, size=n_samples)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test", "def bootstrap_sample_generator_1D(samples: Union[NumpyFloatArray, NumpyIntArray]):\n n_samples = samples.shape[0]\n\n while True:\n _indices = np.random.randint(0, high=n_samples, size=n_samples)\n\n yield samples[_indices]", "def bs_run(bs_start, bs_end, bs_dir):\n bs_start = int(bs_start)\n bs_end = int(bs_end)\n\n fmri_smooth = os.path.join(bs_dir, 'fmri_smooth') \n fmri_little = os.path.join(bs_dir, 'fmri_little') \n bs_ncuts = os.path.join(bs_dir, 'ncuts')\n\n session_smooth = './session1_smooth'\n session_little = './session1_little'\n \n host_name = socket.gethostname()\n\n if not os.path.exists(bs_dir):\n os.makedirs(bs_dir)\n\n if not os.path.exists( fmri_smooth ):\n os.makedirs( fmri_smooth )\n\n if not os.path.exists(fmri_little):\n os.makedirs(fmri_little)\n\n if not os.path.exists(bs_ncuts):\n os.makedirs(bs_ncuts)\n\n for bsid in range(bs_start, bs_end + 1):\n print('----->>>> Bootstrap {0} begin: <<<-----------'.format(bsid) )\n \n # resampling and normalizaion.\n resample(session_smooth, fmri_smooth, bsid)\n resample(session_little, fmri_little, bsid)\n\n one_run(fmri_smooth, fmri_little, bsid, bs_dir)\n\n return", "def bootstrap_interval(data, percentiles=(2.5, 97.5), n_boots=100):\n # Create empty array to fill the results\n bootstrap_means = np.zeros([n_boots, data.shape[-1]])\n for ii in range(n_boots):\n # Generate random indices for data *with* replacement, then take the sample mean\n random_sample = resample(data)\n bootstrap_means[ii] = random_sample.mean(axis=0)\n\n # Compute the percentiles of choice for the bootstrapped means\n percentiles = np.percentile(bootstrap_means, percentiles, axis=0)\n return percentiles", "def stationary_bootstrap(data,B,w):\n (t,k) = data.shape\n assert k == 1, 'DATA must be a column vector'\n assert t>=2, 'DATA must have at least 2 observations.'\n assert w >= 0, 'Average block length W must be a positive scalar.'\n assert (isinstance(B,int)) and(B >= 0), 'Number of bootstrap samples B must be a \\\n positive scalar integer'\n \n # Probability of new block\n p=1/w\n \n #Set up the bsdata and indices\n indices = np.zeros((t,B))\n indices[0,:] = np.ceil(t*np.random.rand(1,B))\n \n # Set up random numbers\n select=np.random.rand(t,B) < p\n indices[select] = np.ceil(np.random.rand(1,np.sum(select))*t).flatten()\n \n \n for i in range(1,t):\n # Determine whether we stay (rand>p) or move to a new starting value\n indices[i,~select[i,:]] = indices[i-1,~select[i,:]]\n # Make sure indices don't go out of bound\n indices[indices>t-1] = indices[indices>t-1]-t-1\n # Indices need to be integers\n indices = indices.astype(int)\n # Sample data\n bsdata=data[indices]\n # Get rid of extra dimension that comes from sampling\n bsdata = bsdata.reshape((t,B))\n \n return bsdata, indices", "def random_resample(*args, samples,\n function=None, function_kwargs=None, bundle_args=True,\n replace=True):\n samples_spec = samples.copy() # copy because use pop below\n args_sub = [obj.copy() for obj in args]\n dim_block_1 = [d for d, s in samples_spec.items() if s[1] == 1]\n\n # Do all dimensions with block_size = 1 together\n samples_block_1 = { dim: samples_spec.pop(dim) for dim in dim_block_1 }\n random_samples = {dim: \n np.random.choice(\n len(args_sub[0][dim]),\n size=n,\n replace=replace)\n for dim, (n, _) in samples_block_1.items()}\n args_sub = [obj.isel(\n {dim: random_samples[dim] \n for dim in (set(random_samples.keys()) & set(obj.dims))}) for obj in args_sub]\n\n # Do any remaining dimensions\n for dim, (n, block_size) in samples_spec.items():\n n_blocks = int(n / block_size)\n random_samples = [slice(x,x+block_size) \n for x in np.random.choice(\n len(args_sub[0][dim])-block_size+1, \n size=n_blocks,\n replace=replace)]\n args_sub = [xr.concat([obj.isel({dim: random_sample}) \n for random_sample in random_samples],\n dim=dim) \n if dim in obj.dims else obj \n for obj in args_sub]\n\n if function:\n if bundle_args:\n if function_kwargs is not None:\n res = function(*args_sub, **function_kwargs)\n else:\n res = function(*args_sub)\n else:\n if function_kwargs is not None:\n res = tuple([function(obj, **function_kwargs) for obj in args_sub])\n else:\n res = tuple([function(obj) for obj in args_sub])\n else:\n res = tuple(args_sub,)\n\n if isinstance(res, tuple):\n if len(res) == 1:\n return res[0]\n else:\n return res", "def get_cd_samples(self):\n \n if \"PCD\" in self.algorithm:\n \n input_vars = []\n \n given_vars = []\n \n else:\n \n input_vars = [self.minibatch_set]\n \n given_vars = {self.x_gibbs: self.train_inputs[self.minibatch_set,:]} \n \n get_samples = theano.function(inputs = input_vars,\n outputs = [self.p_xi_given_x_[-1], \n self.gibbs_samples[-1]\n ], \n givens = given_vars,\n #start the chain at the data distribution\n updates = self.gibbs_updates)\n \n return get_samples", "def generate_data(data, samples, targeted=True, start=0, inception=False):\n inputs = []\n targets = []\n labels = []\n true_ids = []\n for i in range(samples):\n if targeted:\n if inception:\n # for inception, randomly choose 10 target classes\n seq = np.random.choice(range(1, 1001), 1)\n # seq = [580] # grand piano\n else:\n # for CIFAR and MNIST, generate all target classes\n seq = range(data.test_labels.shape[1])\n\n # print ('image label:', np.argmax(data.test_labels[start+i]))\n for j in seq:\n # skip the original image label\n if (j == np.argmax(data.test_labels[start + i])) and (inception == False):\n continue\n inputs.append(data.test_data[start + i])\n targets.append(np.eye(data.test_labels.shape[1])[j])\n labels.append(data.test_labels[start + i])\n true_ids.append(start + i)\n else:\n inputs.append(data.test_data[start + i])\n targets.append(data.test_labels[start + i])\n labels.append(data.test_labels[start + i])\n true_ids.append(start + i)\n\n inputs = np.array(inputs)\n targets = np.array(targets)\n labels = np.array(labels)\n true_ids = np.array(true_ids)\n\n return inputs, targets, labels, true_ids", "def generate_synth_data(n):", "def generate_dataset(num_sequences=2**8):\n samples = []\n \n for _ in range(num_sequences): \n num_tokens = np.random.randint(1, 12)\n sample = ['a'] * num_tokens + ['b'] * num_tokens + ['EOS']\n samples.append(sample)\n \n return samples", "def main(feats_name, targets_name, model_name, n_boot, seed_start, output_filename, train_test_flag):\n\n #load feats and targets\n input_dict = {}\n input_dict['feats'] = 'data/%s' % (feats_name)\n input_dict['targets'] = 'data/%s' % (targets_name)\n #load the feats and targets\n df = pd.read_csv(\"%s\" % (input_dict['feats']))\n targets = pd.read_csv(\"%s\" % (input_dict['targets']))\n #drop columns not used for prediction\n drop_cols = [\"Unnamed: 0\",\"index\"]\n for dc in drop_cols:\n if dc in targets.columns:\n targets = targets.drop(dc,axis=1)\n if dc in df.columns:\n df = df.drop(dc,axis=1)\n #reduce to training or test set only if requested\n if (train_test_flag == 'train') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 0]\n df = df[df['test_set'] == 0]\n elif (train_test_flag == 'test') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 1]\n df = df[df['test_set'] == 1]\n df = df.drop('test_set', axis = 1)\n \n #broadcast the feats and targets\n df_b = sc.broadcast(df)\n targets_b = sc.broadcast(targets)\n\n #Set up the classifier. 3fold CV for selection of regularization term.\n if model_name == 'linear':\n model = LinearRegression(fit_intercept=True,\n normalize=False,\n copy_X=True,\n n_jobs=1) \n elif model_name == 'lasso':\n model = LassoCV(alphas = [.05,.1,.2],\n normalize = False,\n fit_intercept = True,\n verbose = False,\n copy_X = False,\n n_jobs = 3)\n elif model_name == 'ridge':\n model = RidgeCV(alphas = [.00001,.0001,.001,.01,.1,1,10,100,1000,10000],\n normalize = False,\n fit_intercept = True,\n verbose = 1,\n cv = 3)\n else:\n raise ValueError('model_name not recognized.')\n \n #Create an RDD that specifies prng seed to use\n samp_list = [(n,) for n in np.arange(seed_start, seed_start+n_boot)]\n samp_rdd = sc.parallelize(samp_list,n_boot) #create RDD with one partition for each row (second arg is number of partitions)\n #Create a function that takes a tuple as input and returns \n def func(tup):\n \"\"\"\n Takes as input a tuple containing an integer. The integer specifies the random seed that will be used to \n randomly sample, with replacement, observations from the feats set provided. The model is fitted to the \n sampled feats. Resulting best fit parameters, along with some other summary statistics and information are\n provided as input in a JSON string that will be written to the output file when all jobs are completed.\n \n Parameters\n ----------\n tup, rdd\n - series of tuples with different integer values defining the RNG seed to be used to sample observations\n \n Returns\n ----------\n tup[0], int\n - the seed that was used\n json.dumps(results_dict), str\n - dict in json format with the following keys:\n - alpha, the regularization term providing the best fit according to 3 fold cross-validation\n - random_state, the initial state used for fitting\n - training_feats, the name of the training_feats csv file\n - training_targets, the name of the target variable csv file\n - cv, the type of cross-validation used\n - sklearn_version, which version of sklearn was used\n - mse_min, the mean squared error for the test set on each fold\n - r2, the r-squared value (% var explained)\n - coef, parameter vector\n - intercept, intercept parameter\n - column_names, feature name corresponding to each parameter in the parameter vector\n \"\"\"\n #take a random sample with replacement\n np.random.seed(seed=tup[0]) #set the seed\n n_obs = np.shape(df_b.value)[0] #number of observations determines sample size\n samp = list(np.random.randint(0,high=n_obs,size=n_obs)) #draw the random sample with replacement\n #fit the model\n tic = time.time()\n results = model.fit(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n toc = tic - time.time()\n #save the results in a dict\n results_dict = {}\n results_dict['alpha'] = results.alpha_\n results_dict['random_state'] = results.random_state\n results_dict['training_feats'] = input_dict['feats']\n results_dict['training_targets'] = input_dict['targets']\n results_dict['cv'] = results.cv\n results_dict['sklearn_version'] = sklearn.__version__\n results_dict['mse_min'] = results.mse_path_.min()\n results_dict['r2'] = results.score(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n results_dict['coef'] = list(results.coef_)\n results_dict['intercept'] = results.intercept_\n results_dict['column_names'] = [i for i in df_b.value.columns]\n results_dict['fit_time'] = toc\n #convert results dict to json and save in tuple\n return(json.dumps(results_dict))\n\n #fit model in parallel\n results = samp_rdd.map(lambda p: func(p))\n #save to text file\n results.saveAsTextFile(output_filename)\n #stop the SparkContext.\n if not local_mode:\n sc.stop()", "def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,\n n_samples, use_one_hot, class_partition,\n upsample_control):\n # modify seq_len in case we do upsampling control\n if upsample_control:\n upsample_factor = seq_len\n seq_len = 1\n if not self.two_class:\n raise NotImplementedError()\n\n # construct all possible classes\n classes = [\"\".join(seq) for seq in \\\n itertools.product(\"01\", repeat=seq_len)]\n\n # get the right number of samples per class to get a balanced data set\n # with the desired n_samples.\n num = n_samples\n div = len(classes)\n n_samples_per_class = [num // div + (1 if x < num % div else 0) \\\n for x in range (div)]\n\n # find indices of samples with the wanted digit class\n y_data = [np.argmax(y) for y in y_data]\n digit_idx = []\n digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])\n digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])\n\n # generate samples for every class\n samples = []\n labels = []\n for i,c in enumerate(classes):\n this_label = i\n digits_to_sample = [int(c[i]) for i in range(len(c))]\n for s in range(n_samples_per_class[i]):\n this_sample = None\n for d in digits_to_sample:\n rand_idx = self._rstate.randint(len(digit_idx[d]))\n sample_idx = digit_idx[d][rand_idx]\n digit_sample = x_data[sample_idx]\n if this_sample is None:\n this_sample = digit_sample\n else:\n this_sample = np.vstack((this_sample,digit_sample)) \n samples.append(this_sample)\n labels.append(this_label)\n\n # if configured sort labels into 2 classes\n labels = np.asarray(labels)\n if self.two_class and not upsample_control:\n lbl_mask = np.isin(labels, class_partition)\n labels[~lbl_mask] = 0\n labels[lbl_mask] = 1\n\n if upsample_control:\n for i,s in enumerate(samples):\n # Initial timestep is absolute start position of digit. To\n # translate to a higher resolution image, we can just multiply\n # the abolute position vby the scaling factor.\n upsample = s[0,:]*upsample_factor\n for t in np.arange(1,s.shape[0]):\n # don't do upsampling at end of strokes or end of digits\n if all((s[t,2] == 0, s[t,3] == 0)):\n # Repeat original stroke \"upsample_factor\" times, such\n # that the relative stroke length is identical if\n # images are normalized to same resolution.\n for k in range(upsample_factor):\n upsample = np.vstack((upsample, s[t,:]))\n else:\n upsample = np.vstack((upsample, s[t,:]))\n samples[i] = upsample\n\n # structure output data\n out_data = labels.reshape(-1, 1)\n if use_one_hot:\n n_classes = 2**seq_len\n if self.two_class:\n n_classes = 2\n\n # FIXME We shouldn't call this method if the validation set size is\n # zero.\n if out_data.size == 0:\n out_data = np.matlib.repmat(out_data, 1, n_classes)\n else:\n # FIXME use internal method `_to_one_hot` and set required class\n # attributes beforehand.\n one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])\n one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)\n out_data = one_hot_encoder.transform(out_data).toarray()\n\n if self.target_per_timestep:\n out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)\n\n # structure input data\n in_data = np.zeros((n_samples,max_seq_len,4))\n sample_lengths = np.zeros(n_samples)\n for i,s in enumerate(samples):\n in_data[i,:s.shape[0],:] = s\n sample_lengths[i] = s.shape[0]\n\n in_data = self._flatten_array(in_data)\n\n return in_data, out_data, sample_lengths", "def generate_data(n, data, labels, param, label):\n\n\tmu = param[0]\n\tsigma = param[1]\n\n\tfor i in range(n):\n\n\t\t# TODO: Recall the notation of x in the exercise sheet.\n\t\t# \t\tGenerate a 2-d Gaussian distributed data point plus an offset value for the bias.\n\t\t# \t\tUse our rand_gaussian method.\n\n\t\tx1 = rand_gaussian(mu[0], sigma[0]) \n\t\tx2 = rand_gaussian(mu[1], sigma[1]) \n\n\t\tdata_point = [1, x1, x2]\n\t\tdata.append(data_point)\n\t\tlabels.append(label)\n\n\treturn data,labels", "def sample_train_data(dataset ,target,data_len, resp = True ):\r\n np.random.seed(222)\r\n ixes = np.random.choice(dataset.index, data_len, replace = False)\r\n print(ixes)\r\n under_df = dataset.iloc[ixes]\r\n if resp==True:\r\n under_target = target.iloc[ixes]\r\n return under_df, under_target\r\n else:\r\n return under_df", "def sample_data_input_fn(params):\n window_size = params['window_size']\n batch_size = params['batch_size']\n\n dataset_names = sample_data.get_data_names()\n all_downsampled = [sample_data.get_downsampled_data(name) for name in dataset_names]\n np_dtype = all_downsampled[0].dtype\n _, num_columns = all_downsampled[0].shape\n assert num_columns == 3\n\n # For each data item, this computes\n time_diffs = [(x[1:, 0] - x[:-1, 0]) for x in all_downsampled]\n median_time_diff = np.median(np.concatenate(time_diffs, axis=0))\n lower, upper = median_time_diff * 0.8, median_time_diff * 1.2\n valid_start_window_indices = [\n get_window_valid_indices(d, lower, upper, window_size) for d in time_diffs\n ]\n for name, valid_indices in zip(dataset_names, valid_start_window_indices):\n if np.size(valid_indices) == 0:\n raise ValueError(\"{} has no valid window ranges\".format(name))\n\n def get_samples_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n assert idx_array.shape == (batch_size, )\n samp_results = np.zeros((batch_size, window_size, num_columns), dtype=np_dtype)\n for i, sample_idx in enumerate(idx_array):\n start_idx = random.choice(valid_start_window_indices[sample_idx])\n samp_results[i, :, :] = all_downsampled[sample_idx][start_idx: (\n start_idx + window_size)]\n assert samp_results.shape == (batch_size, window_size, num_columns)\n return samp_results\n\n def get_window_sample(idx_tensor):\n samples = tf.py_func(get_samples_py_op, [idx_tensor], np_dtype)\n samples.set_shape((batch_size, window_size, num_columns))\n return samples\n\n def random_negative_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n neg_idx_array = np.copy(idx_array)\n for i, idx in enumerate(idx_array):\n while neg_idx_array[i] == idx_array[i]:\n neg_idx_array[i] = random.randint(0, len(all_downsampled) - 1)\n return neg_idx_array\n\n def get_negative_window_sample(idx_tensor):\n neg_idx_tensor = tf.py_func(\n random_negative_py_op,\n [idx_tensor],\n idx_tensor.dtype)\n return get_window_sample(neg_idx_tensor)\n\n # Current sample method: First select sample index, then select window.\n num_samples = len(all_downsampled)\n if num_samples < 2:\n raise ValueError(\"Need at least 2 light curves for negative samples!\")\n dataset = tf.data.Dataset.range(num_samples)\n dataset = dataset.repeat().shuffle(num_samples * 2).batch(batch_size)\n\n positive = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_window_sample(idx_tensor),\n 'goal': tf.constant([1.0] * batch_size, dtype=tf.float64)\n })\n negative = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_negative_window_sample(idx_tensor),\n 'goal': tf.constant([0.0] * batch_size, dtype=tf.float64)\n })\n\n # TODO(gatoatigrado): Experiment with shuffling positive & negative within a batch.\n # Currently each batch is just positive or negative.\n assert positive.output_shapes == negative.output_shapes\n assert negative.output_types == positive.output_types\n dataset = tf.contrib.data.sample_from_datasets((positive, negative))\n assert dataset.output_shapes == negative.output_shapes\n return dataset", "def get_bootstrap_CI(self, alpha, num_samples):\n return None", "def get_bootstrap_CI(self, alpha, num_samples):\n return None", "def _chunk_boot_ols_coefs(dat, formula, weights, seed):\n # Random sample with replacement from all data\n dat = dat.sample(frac=1, replace=True, random_state=seed)\n y, x = dmatrices(formula, dat, 1, return_type=\"dataframe\")\n b = _ols(\n x, y, robust=None, n_lags=1, cluster=None, all_stats=False, weights=weights\n )\n return list(b)", "def get_data(generator, random, bench_id):\n x_train, y_train, x_test, y_test = generator(random, bench_id)\n x_train = np.c_[np.ones(len(x_train)), x_train]\n x_test = np.c_[np.ones(len(x_test)), x_test]\n return x_train, y_train, x_test, y_test", "def generate_datasets(self, rand=None, *args, **kwargs):\n raise NotImplementedError()", "def concoct_dataset(n_per_label, feat_specs, sigma=0, shuffle=True):\n samples = dict((label, [list() for i in range(n)]) for (label,n) in enumerate(n_per_label))\n for feat_spec in feat_specs:\n for (label, breakdown) in enumerate(feat_spec):\n if shuffle: random.shuffle(samples[label])\n s = 0\n for (value, n_per_value) in enumerate(breakdown):\n if s+n_per_value > len(samples[label]): raise Exception(str(feat_spec)+' has too many samples')\n for i in range(n_per_value):\n samples[label][s+i].append(np.random.normal(value, sigma))\n s += n_per_value\n if s < len(samples[label]): raise Exception(str(feat_spec)+' has too few samples')\n print(samples)\n return [ExpressionProfile(str(i),str(label),values) for (label, value_sets) in samples.items() for (i,values) in enumerate(value_sets)]", "def gen_batch_function(data_folder, image_shape, seed=None, samples_limit=None):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n }\n background_color = np.array([255, 0, 0])\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n samples_n = len(image_paths)\n\n rnd = random.Random(seed)\n\n def get_batches_fn(batch_size):\n \"\"\"\n\t\tCreate batches of training data\n\t\t:param batch_size: Batch Size\n\t\t:return: Batches of training data\n\t\t\"\"\"\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn, samples_n" ]
[ "0.7176311", "0.71086556", "0.7089425", "0.7087817", "0.7020718", "0.68657595", "0.6752474", "0.6727999", "0.6657717", "0.63728184", "0.62784743", "0.62304413", "0.62304413", "0.61555386", "0.60460633", "0.6008815", "0.6005979", "0.6003459", "0.59947544", "0.5991532", "0.59781086", "0.5943359", "0.5922075", "0.59154767", "0.58596087", "0.5840936", "0.5814761", "0.5803637", "0.58029264", "0.5788222", "0.5777874", "0.576962", "0.5769145", "0.57442874", "0.5738673", "0.56910866", "0.5687242", "0.56782025", "0.5666396", "0.5633081", "0.56290936", "0.56258667", "0.55954534", "0.555256", "0.55378675", "0.5504598", "0.545898", "0.54535204", "0.54351836", "0.54207605", "0.5414666", "0.54056466", "0.53938586", "0.5353668", "0.5350758", "0.53477854", "0.5340027", "0.53385884", "0.53221226", "0.53150564", "0.53044397", "0.52987725", "0.52978593", "0.52944654", "0.52896094", "0.5278632", "0.52719754", "0.5270111", "0.5267911", "0.5259677", "0.52547216", "0.5251947", "0.5251947", "0.525162", "0.5243586", "0.52387327", "0.5234392", "0.5233803", "0.52325356", "0.52309394", "0.5227729", "0.52239573", "0.52210677", "0.5213728", "0.52082694", "0.52082086", "0.52065307", "0.5206424", "0.52063817", "0.5204529", "0.51983255", "0.5183573", "0.5179715", "0.5168413", "0.5168413", "0.516717", "0.5162833", "0.51620907", "0.51606476", "0.5160086" ]
0.8077993
0
Split the dataset by features and labels
def split( self, df, iteration_col, episode_col, iteration_order, lagger_str, current_row, feature_cols, label_cols, augmented_cols, ): logger.info( f"Iteration order set to {iteration_order} so using {current_row} from {lagger_str} {iteration_order} row" ) # We group by episode and iteration indices to make dataset episodic df = df.sort_values(by=[episode_col, iteration_col]) # Create a lagged dataframe for capturing inputs and outputs # when iteration_order < 0, this will consist of the features # since we are doing a shift-backwards # when iteration_order > 0, this will consist of labels # since we are doing a shift-forward lagged_df = df.groupby(by=episode_col, as_index=False).shift( iteration_order * -1 ) lagged_df = lagged_df.drop([iteration_col], axis=1) # if iteration order is less than 1 # then the actions, configs should not be lagged # only states should be lagged # features = lagged_df[states] + df[actions, configs] # labels = df[states] if iteration_order < 0: features_df = lagged_df[feature_cols] features_df[augmented_cols] = df[augmented_cols] # if iteration order is greater than 1 # then features = states, actions, configs from current row (df) # labels = states from next row (lagged_df) else: features_df = df[feature_cols] # TODO: check, is this always redundant? # i.e., is feature_cols is supset of augmented_cols features_df[augmented_cols] = df[augmented_cols] # eventually we will join the labels_df with the features_df # if any columns are matching then rename them if bool(set(feature_cols) & set(label_cols)): features_df = features_df.rename( columns=lambda x: "prev_" + x if x in label_cols else x ) self.feature_cols = list(features_df.columns.values) self.label_cols = list(label_cols) logger.info(f"Feature columns are: {self.feature_cols}") logger.info(f"Label columns are: {self.label_cols}") # joined_df = df.join(features_df) vars_to_keep = ( [episode_col, iteration_col] + self.feature_cols + self.label_cols ) if iteration_order < 0: labels_df = df[[episode_col, iteration_col] + self.label_cols] else: labels_df = df[[episode_col, iteration_col]].join(lagged_df[self.label_cols]) return labels_df.join(features_df)[vars_to_keep]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(data, labels):\r\n # Split the data into train and test\r\n X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.30, random_state = 42)\r\n return(X_train, y_train, X_test, y_test)", "def split_dataset(instances, labels, train_split=0.8):\n split = int(train_split * len(instances))\n train_data, train_labels = instances[:split], labels[:split]\n test_data, test_labels = instances[split:], labels[split:]\n\n return train_data, train_labels, test_data, test_labels", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n \n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def data_split(data, labels, train_ratio=0.5, rand_seed=42):\n\n assert 0 <= train_ratio <= 1, \"Error: training set ratio must be between 0 and 1\"\n\n x_train, x_temp, y_train, y_temp = train_test_split(data,\n labels,\n train_size=train_ratio,\n random_state=rand_seed)\n\n x_val, x_test, y_val, y_test = train_test_split(x_temp,\n y_temp,\n train_size=0.5,\n random_state=rand_seed)\n\n return x_train, x_val, x_test, y_train, y_val, y_test", "def split_train_test(df_train, labels):\n n_train = np.shape(df_train)[0]\n X = {'train': [], 'holdout': []} # features\n Y = {'train': [], 'holdout': []} # labels\n p10 = int(0.1 * n_train)\n X['holdout'] = df_train.iloc[-p10:]\n Y['holdout'] = labels[-p10:]\n X['train'] = df_train.iloc[:(n_train - p10)]\n Y['train'] = labels[:(n_train - p10)]\n return X, Y", "def _split_dataset(self, X, y, label, index, value, sample_weights=None):\n # YOUR CODE HERE\n # Hint: Do not forget to remove the index-th feature from X.\n # begin answer\n ret1=[]\n ret2=[]\n featVec=X[:,index]\n X=X[:,[i for i in range(X.shape[1]) if i!=index ]]\n for i in range(len(featVec)):\n if featVec[i]>=value:\n ret1.append(i)\n else:\n ret2.append(i)\n sub1_X = X[ret1,:]\n sub1_y = y[ret1]\n label_1=label[ret1]\n sub1_sample_weights=sample_weights[ret1]\n sub2_X = X[ret2,:]\n sub2_y = y[ret2]\n label_2=label[ret2]\n sub2_sample_weights=sample_weights[ret2]\n # end answer\n return sub1_X, sub1_y, label_1, sub1_sample_weights, sub2_X, sub2_y, label_2, sub2_sample_weights", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def split_data(data, labels, val_size):\n # Shuffle index\n index = np.random.permutation(len(data))\n\n # Split into Datasets\n X_val = data[index][-val_size:]\n X_train = data[index][:-val_size]\n y_val = labels[index][-val_size:].ravel()\n y_train = labels[index][:-val_size].ravel()\n\n return X_train, X_val, y_train, y_val", "def split_data(train, parameters):\n labels = train.labels\n train_indices, val_indices = train_test_split(range(len(labels)),\n stratify=labels,\n random_state=parameters['seed'],\n test_size=parameters['validation_size'])\n return train_indices, val_indices", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def data_splitting(data_features, data_targets, test_size):\n data_features_train, data_features_test, data_targets_train, data_targets_test = \\\n train_test_split(data_features,\n data_targets,\n test_size = test_size)\n\n return data_features_train, data_features_test, data_targets_train, data_targets_test", "def split_data_set_splitted(X, Y):\n\n # Uso la funcion de scikitlearn para separar el data_set\n # Esta funcion por defecto mezcla los datos para asegurar la representacion\n # de los datos en los dos subconjuntos\n #\n # Blanca Cano Camarero me comenta que ponga el stratify = Y porque asi se lo\n # indica el profesor Pablo Mesejo en una consulta realizada. En la referencia\n # que indico de scikitlearn tambien viene documentado este parametro\n # Lo que hace es evitar que haya clases que queden infrarepresentadas\n X_training, X_test, Y_training, Y_test= train_test_split(X, Y, train_size = 0.75, test_size = 0.25, stratify = Y)\n return X_training, X_test, Y_training, Y_test", "def splitData(data, class_label, seed, ratio):\n\t\n\trandom.seed(seed)\n\tsubset = data.clone()\n\tsize_data = subset.data.shape[0]\n\tn = int(np.floor(size_data * ratio)) # number of datasets in train\n\tindex = random.sample(range(1, size_data), n)\n\tsplit_list = [item for item in [0] for i in range(size_data)]\n\t\n\tfor i in index:\n\t\tsplit_list[i]=1\n\t\n\treturn split_list #returns list of indeces where 0 is test and 1 is training data ", "def get_transformer_splits(loader_cls, tokenizer, return_intent_labels=True):\n datasets = []\n for subset in SUBSETS:\n dataset = OODDataset(loader_cls(subset=subset), tokenizer.tokenize,\n return_intent_labels)\n dataset.vectorize_texts(tokenizer)\n datasets.append(dataset)\n return datasets", "def split_features_labels(self, batch):\n return batch if not self.unsupervised else (batch, batch)", "def dataset_stratified_split(split: float, dataset: np.ndarray, labels: np.ndarray) -> \\\n (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n train_X, test_X, train_Y, test_Y = train_test_split(dataset,\n labels,\n test_size=split,\n stratify=labels,\n random_state=config.RANDOM_SEED,\n shuffle=True)\n return train_X, test_X, train_Y, test_Y", "def split_dataset(x, y, seed=0):\n # split the data into label and unlabel\n x_unlabel, x_label, _, y_label = \\\n train_test_split(\n x,\n y,\n test_size=0.1,\n random_state=seed,\n )\n\n # split data into train and test data\n x_train, x_test, y_train, y_test = \\\n train_test_split(\n x_label,\n y_label,\n test_size=0.2,\n random_state=seed,\n )\n\n return Dataset(\n x_unlabel,\n Data(x_train, None, y_train),\n Data(x_test, None, y_test)\n )", "def targetFeatureSplit( data ):\n\n target = []\n features = []\n for item in data:\n target.append( item[0] )\n features.append( item[1:] )\n\n return target, features", "def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]", "def split_data(df):\n # drop any instances that have missing values\n df = df.dropna()\n\n # define features\n features = df[['pitch_type', 'release_speed', 'release_spin_rate',\n 'if_fielding_alignment', 'launch_angle', 'launch_speed',\n 'hc_x', 'hc_y', 'stand', 'type', 'RH']]\n\n # make dummies for categorical features\n features = pd.get_dummies(features)\n\n # define label\n label = df['hit']\n\n # split data into test and training\n features_train, features_test, label_train, label_test = \\\n train_test_split(features, label, test_size=0.3)\n\n standard = StandardScaler()\n\n features_train = standard.fit_transform(features_train)\n features_test = standard.transform(features_test)\n\n return features_train, features_test, label_train, label_test", "def split_data_set(self, vectors, labels):\n num_of_images = len(vectors)\n num_of_train_data = floor(self.config['percent_to_train_data'] * num_of_images)\n\n indices = np.random.permutation(num_of_images) # A random permutation of all indices\n X_train = [vectors[i] for i in indices[:num_of_train_data]]\n y_train = [labels[i] for i in indices[:num_of_train_data]]\n X_test = [vectors[i] for i in indices[num_of_train_data:]]\n y_test = [labels[i] for i in indices[num_of_train_data:]]\n\n return X_train, y_train, X_test, y_test", "def train_test_data_split(node_features, labels, train_ratio=0.8):\n num_graph = node_features.shape[0]\n train_test_split = int(train_ratio*num_graph)\n x_train = node_features[:train_test_split,:,:] \n y_train = labels[:train_test_split,:,:] \n x_test = node_features[train_test_split:,:,:] \n y_test = labels[train_test_split:,:,:]\n np.save(\"data/node_features_train.npy\", x_train)\n np.save(\"data/node_features_test.npy\", x_test)\n np.save(\"data/labels_train.npy\", y_train)\n np.save(\"data/labels_test.npy\", y_test)\n return x_train, x_test, y_train, y_test", "def train_test_split(features, target, split_ts):\n\n # split features\n train_features = []\n test_features = []\n for feature in features:\n train_feature, test_feature = feature.split_after(split_ts)\n train_features.append(train_feature)\n test_features.append(test_feature)\n\n # split target\n train_target, test_target = target.split_after(split_ts)\n\n return (train_features, train_target, test_features, test_target)", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def split_data(images, labels):\n images, labels = shuffle_data_pair(images, labels)\n\n num_covid_points = sum(map(lambda label: label == 0, labels))\n\n # Calculate split\n num_test = int(num_covid_points * 0.1)\n num_covid_train = num_covid_points - num_test * 2\n num_other_train = int(num_covid_train * 1.1)\n\n # (train, validate, test) points added\n num_points_added = [\n [0, 0, 0], # COVID-19\n [0, 0, 0], # Viral pneumonia\n [0, 0, 0] # Normal\n ]\n\n # Datasets\n images_train = []\n labels_train = []\n images_validate = []\n labels_validate = []\n images_test = []\n labels_test = []\n\n # Add images and labels to datasets\n notifier.send(\" Adding images and labels to dataset...\")\n for i, label in enumerate(labels):\n print(f\" Point: {i} / {len(labels)}\")\n completed_labels = [False, False, False] # Enough of label added\n if all(completed_labels):\n break\n for j in range(3): # 0: COVID-19, 1: Viral pneumonia, 2: Normal\n if completed_labels[j]:\n continue\n if label == j:\n # Add training data\n can_add_training = False\n if j == 0: # COVID-19\n if num_points_added[j][0] < num_covid_train:\n can_add_training = True\n num_points_added[j][0] += 1\n elif num_points_added[j][0] < num_other_train: # Not COVID-19\n can_add_training = True\n num_points_added[j][0] += 1\n if can_add_training:\n images_train.append(images[i])\n labels_train.append(labels[i])\n break\n\n # Add validation data\n if num_points_added[j][1] < num_test:\n num_points_added[j][1] += 1\n images_validate.append(images[i])\n labels_validate.append(labels[i])\n break\n\n # Add testing data\n if num_points_added[j][2] < num_test:\n num_points_added[j][2] += 1\n images_test.append(images[i])\n labels_test.append(labels[i])\n break\n\n # Point couldn't be added anywhere: label is complete\n completed_labels[j] = True\n break\n\n # Shuffle all data\n notifier.send(\" Shuffling data...\")\n images_train, labels_train = shuffle_data_pair(\n images_train, labels_train\n )\n images_validate, labels_validate = shuffle_data_pair(\n images_validate, labels_validate\n )\n images_test, labels_test = shuffle_data_pair(\n images_test, labels_test\n )\n\n if PLOT_LABELS:\n # Plot data frequencies\n plt.hist(labels, bins=3)\n plt.title(\"Labels\")\n\n plt.hist(labels_train, bins=3)\n plt.title(\"Train Labels\")\n\n plt.hist(labels_validate, bins=3)\n plt.title(\"Validate Labels\")\n\n plt.hist(labels_test, bins=3)\n plt.title(\"Test Labels\")\n\n plt.show()\n\n # Make labels categorical\n notifier.send(\" Making labels categorical: train...\")\n labels_train = tf.keras.utils.to_categorical(labels_train)\n notifier.send(\" Making labels categorical: validate...\")\n labels_validate = tf.keras.utils.to_categorical(labels_validate)\n notifier.send(\" Making labels categorical: test...\")\n labels_test = tf.keras.utils.to_categorical(labels_test)\n\n notifier.send(\" Converting data to NumPy arrays...\")\n return \\\n np.array(images_train), np.array(images_validate), np.array(images_test), \\\n np.array(labels_train), np.array(labels_validate), np.array(labels_test)", "def split_by_feature(tX, y=None, feature22=None):\n # Split up the dataset by feature 22 by 0, 1 and >1\n tX_0 = tX[feature22 == 0]\n tX_1 = tX[feature22 == 1]\n tX_2 = tX[feature22 > 1]\n # Drop the undefined features\n #tX_0 = np.delete(tX_0, drop_0, axis=1)\n #tX_1 = np.delete(tX_1, drop_1, axis=1)\n print(\"Shape 0: {}, Shape 1: {}, Shape 2: {}\".format(tX_0.shape, tX_1.shape, tX_2.shape))\n if y is not None:\n y_0 = y[feature22 == 0]\n y_1 = y[feature22 == 1]\n y_2 = y[feature22 > 1]\n print(\"Shape 0: {}, Shape 1: {}, Shape 2: {}\".format(y_0.shape, y_1.shape, y_2.shape)) \n return tX_0, tX_1, tX_2, y_0, y_1, y_2\n else:\n return tX_0, tX_1, tX_2", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def train_test_split(measurements: np.ndarray, split: float = 0.8) -> (np.ndarray, np.ndarray):\n labels_measurements = [m.label for m in measurements]\n labels = np.unique(labels_measurements)\n\n for i, l in enumerate(labels):\n indices_label = np.argwhere(np.array(labels_measurements) == l).flatten()\n\n num_samples = indices_label.size\n if i == 0:\n measurements_train = measurements[indices_label][:int(split*num_samples)]\n measurements_test = measurements[indices_label][int(split*num_samples):]\n else:\n measurements_train = np.append(measurements_train, measurements[indices_label][:int(split*num_samples)])\n measurements_test = np.append(measurements_test, measurements[indices_label][int(split*num_samples):])\n\n np.random.shuffle(measurements_train)\n np.random.shuffle(measurements_test)\n\n return measurements_train, measurements_test", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def split_data(X, scaling, ids, y, split_ratio=0.2):\r\n split = int(X.shape[0] * split_ratio) # index must be int\r\n X_test = X[:split, :, :, :]\r\n scaling_test = scaling[:split, :]\r\n ids_test = ids[:split]\r\n y_test = y[:split, :]\r\n X_train = X[split:, :, :, :]\r\n scaling_train = scaling[split:, :]\r\n ids_train = y[split:]\r\n y_train = y[split:, :]\r\n\r\n return X_train, scaling_train, ids_train, y_train, X_test, scaling_test, ids_test, y_test", "def split_data(self, data):\n\n train_df, test_df = train_test_split(data, test_size=self.test_size, \n random_state=0, \n stratify=data[self.outcome_name])\n\n # print(\"Splitting data into training with \", train_df.shape, \"sampes and \",\n # test_df.shape, \"testing samples\")\n\n return train_df, test_df", "def split_data(x, y, ratio, seed=1):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Input:\n - x (ndarray) : binary prediction for set 1\n - y (ndarray) : binary prediction for set 2\n - ratio (ndarray) : binary prediction for set 3\n - seed (float) : indices of the data points in set 1 \n Output: \n - train_x (ndarray) : binary prediction for set 1\n - train_y (ndarray) : binary prediction for set 2\n - test_x (ndarray) : binary prediction for set 3\n - test_y (ndarray) : indices of the data points in set 1\n \"\"\"\n # set seed and shuffle the indices\n np.random.seed(seed)\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n shuffled_y = y[shuffle_indices]\n shuffled_x = x[shuffle_indices]\n \n #splits the set according to the ratio on the shuffled set\n ratio_idx = int(np.floor(ratio*len(y)))\n train_y = shuffled_y[:ratio_idx]\n train_x = shuffled_x[:ratio_idx]\n test_y = shuffled_y[ratio_idx:]\n test_x = shuffled_x[ratio_idx:]\n return train_x, train_y, test_x, test_y", "def __split_dataset(self):\n self.train, self.valid, _, _ = train_test_split(self.data, self.data, test_size=0.2)\n self.valid, self.test, _, _ = train_test_split(self.valid, self.valid, test_size=0.5)", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def split_data(self):\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n\n return X_train, X_test, y_train, y_test", "def batch_features_labels(features, labels, batch_size):\r\n for start in range(0, len(features), batch_size):\r\n end = min(start + batch_size, len(features))\r\n yield features[start:end], labels[start:end]", "def split_data(X_data, y_data):\n return cv.train_test_split(X_data, y_data, test_size=0.1, random_state=0)", "def regroup_dataset(labels):\r\n batch_y = labels.copy()\r\n for i, label in enumerate(labels):\r\n if label in [0, 15, 19]:\r\n batch_y[i]=0\r\n if label in [1, 2, 3, 4, 5,]:\r\n batch_y[i]=1\r\n if label in [6]:\r\n batch_y[i]=2\r\n if label in [7,8,9,10]:\r\n batch_y[i]=3\r\n if label in [11,12,13,14]:\r\n batch_y[i]=4\r\n if label in [16,17,18]:\r\n batch_y[i]=5\r\n \r\n print('regrouped label', batch_y.shape)\r\n return batch_y", "def read_in_and_split_data(iris_data):\n iris_data = datasets.load_iris()\n data = iris_data['data']\n targets = iris_data['target']\n train_data, test_data, train_targets, test_targets = train_test_split(data, targets, test_size=0.1) \n return (train_data, test_data, train_targets, test_targets)", "def split_data(X:np.ndarray, y:np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n \n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)\n \n return X_train, X_val, y_train, y_val", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def preprocess(self,\n data: Dataset,\n features: Union[str, List[str]],\n label: str = 'label',\n use_label_name: bool = True,\n *args, **kwargs) -> List[Tuple[Any, Any]]:\n dataset = []\n for example in data:\n if isinstance(features, str):\n feat = example[features]\n elif isinstance(features, list):\n feat = tuple(example[f] for f in features)\n else:\n raise RuntimeError(f\"features should be str or list, but found: {features}\")\n lb = example[label]\n if use_label_name and lb != -1:\n # -1 label is used if there is no label (test set)\n lb = data.info.features[label].names[lb]\n dataset += [(feat, lb)]\n return dataset", "def split_dataset(X, Y, train_size=0.8):\n if train_size != 1.0:\n return train_test_split(\n X, Y,\n train_size=train_size,\n stratify=Y\n )\n else:\n X_, Y_ = shuffle(\n X, Y\n )\n return X_, [], Y_, []", "def split_dataset(self, test_size=0.20):\n\t\t(self.training_data, self.test_data, self.training_labels, self.test_labels) = train_test_split(self.training_data, self.training_labels, test_size=test_size)", "def split_dataset(data_set, train_size, test_size):\n # Generate random indices without replacement, to make train and test sets disjoint\n rand_indices = np.random.choice(data_set.shape[0], train_size+test_size, replace=False)\n feature_end = data_set.shape[1] - 1\n output_location = feature_end\n feature_offset = var.ALGORITHM_INFO['feature_offset']\n\n # Define the training and testing matrices\n x_train = data_set[rand_indices[0:train_size], feature_offset:feature_end]\n y_train = data_set[rand_indices[0:train_size], output_location]\n x_test = data_set[rand_indices[train_size:train_size+test_size], feature_offset:feature_end]\n y_test = data_set[rand_indices[train_size:train_size+test_size], output_location]\n favorite_test = data_set[rand_indices[train_size:train_size+test_size], 0]\n\n # Normalize features, with maximum value in training set\n # as realistically, this would be the only possibility\n\n for ii in range(x_train.shape[1]):\n maxval = np.max(np.abs(x_train[:, ii]))\n if maxval > 0:\n x_train[:, ii] = np.divide(x_train[:, ii], maxval)\n x_test[:, ii] = np.divide(x_test[:, ii], maxval)\n\n\n # Add a column of ones; done after to avoid modifying entire data_set\n x_train = np.hstack((x_train, np.ones((x_train.shape[0], 1))))\n x_test = np.hstack((x_test, np.ones((x_test.shape[0], 1))))\n\n return (x_train, y_train), (x_test, y_test), favorite_test", "def transform(self, dataset, labels):\n print(f\"Dropping {len(self.deficient)} deficient features...\")\n dataset.drop(columns=self.deficient, inplace=True)\n print(f\"Scanning {len(dataset)} samples for duplicates...\")\n duplicates = dataset.duplicated()\n print(f\"Dropping {sum(duplicates)} duplicate samples...\")\n dataset.drop(index=dataset.index[duplicates], inplace=True)\n dataset.reset_index(drop=True, inplace=True)\n labels.drop(labels=labels.index[duplicates], inplace=True)\n labels.reset_index(drop=True, inplace=True)\n return dataset, labels", "def split_stratify_train(data: pd.DataFrame, label_ratio_low: float, label_ratio_high: float, test_size=0.2):\n while True:\n X_train, X_test, y_train, y_test = train_test_split(data.drop(columns=['LABEL']), data['LABEL'],\n test_size=test_size)\n if (y_train.sum() / len(y_train) >= label_ratio_low) and (y_train.sum() / len(y_train) <= label_ratio_high):\n break\n logger.info(f'Label 1 ratio of train set after split:{y_train.sum() / len(y_train)}')\n return X_train, X_test, y_train, y_test", "def train_test_split(features, target, split_ts):\n train_features, test_features = features.split_after(split_ts)\n train_target, test_target = target.split_after(split_ts)\n\n return (train_features, train_target, test_features, test_target)", "def split_data_into_train_and_test(raw_training_data):\n train_set, test_set = train_test_split(raw_training_data, test_size=0.2, random_state=42)\n return train_set, test_set", "def data_split(X, y):\n folds = KFold(n_splits=SPLITS, shuffle=True, random_state=RANDOM_STATE)\n train_indices, validation_indices = list(folds.split(X))[-1][0], list(folds.split(X))[-1][1]\n\n X_train = X.iloc[train_indices]\n X_validation = X.iloc[validation_indices]\n\n y_train = y.iloc[train_indices]\n y_validation = y.iloc[validation_indices]\n\n return X_train, X_validation, y_train, y_validation", "def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)", "def split ( self, X: np.ndarray, y: np.ndarray = None ):\n # Split the indices into `number_of_folds` subarray\n indices = self.get_indices ( X )\n split_indices = KFoldCV._get_indices_split ( indices = indices, number_of_folds = self.number_of_folds )\n for number_of_split in range ( self.number_of_folds ):\n # Return all but one split as train, and one split as test\n yield KFoldCV._get_one_split ( split_indices, number_of_split = number_of_split )\n # End split()", "def split_dataset(X: np.array, y: np.array, ratio=0.8):\n '''split dataset to train data and valid data'''\n X_train = X[:int(X.shape[0] * ratio)]\n y_train = y[:int(y.shape[0] * ratio)]\n X_valid = X[int(X.shape[0] * ratio):]\n y_valid = y[int(y.shape[0] * ratio):]\n dataset = tuple([X_train, y_train, X_valid, y_valid])\n\n return dataset", "def splitData(X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0)\n print(X_train)\n print(y_train)\n print(X_test)\n print(y_test)\n return X_train, X_test, y_train, y_test", "def data_split(config_path: Text) -> None:\n\n config = yaml.safe_load(open(config_path))\n\n dataset = pd.read_csv(config['featurize']['features_data'])\n train_dataset, test_dataset = train_test_split(\n dataset, \n test_size = config['data_split']['test_size'],\n random_state = config['data_split']['random_state']\n )\n\n train_csv_path = config['data_split']['train_path']\n test_csv_path = config['data_split']['test_path']\n train_dataset.to_csv(train_csv_path, index=False)\n test_dataset.to_csv(test_csv_path, index=False)", "def split(self,X,y=None):\n all_idx = pd.Series(np.arange(X.shape[0])) \n mbrg = int(X.shape[0]*self.embargo_pct)\n test_starts=[(i[0],i[-1]+1) for i in np.array_split(all_idx.values,self.n_splits)]\n for i, j in test_starts:\n t0 = all_idx.index[i] # start of test set\n test_indices = all_idx.values[i:j]\n maxT1Idx = all_idx.index.searchsorted(all_idx[test_indices].max())\n train_indices = all_idx.index.searchsorted(all_idx[all_idx<=t0].index)\n if maxT1Idx < X.shape[0]: \n train_indices=np.concatenate((train_indices,all_idx[maxT1Idx+mbrg:]))\n yield train_indices,test_indices", "def splitfeatdata(rawdata, fold=10):\n\n labeldata = []\n for row in rawdata:\n\n # if row[2] > 0:\n # label = 'pos'\n # elif row[2] == 0:\n # label = 'neutral'\n # else:\n # label = 'neg'\n\n\n label = row[2]\n labeldata.append((row[4], label))\n\n\n random.shuffle(labeldata)\n\n size = int(math.floor(len(labeldata) / 10.0))\n # train = labeldata[:split]\n # test = labeldata[split:]\n\n # code for k-fold validation referred from:\n # http://stackoverflow.com/questions/16379313/how-to-use-the-a-10-fold-cross-validation-with-naive-bayes-classifier-and-nltk\n claccuracy = []\n for i in range(fold):\n test_this_round = labeldata[i*size:][:size]\n train_this_round = labeldata[:i*size] + labeldata[(i+1)*size:]\n\n acc = myclassifier(train_this_round, test_this_round)\n\n claccuracy.append(acc)\n\n\n\n print os.getcwd()\n\n\n mySentClassifier = nltk.NaiveBayesClassifier.train(labeldata)\n f = open('../../../mySentClassifier2.pickle', 'wb')\n dump(mySentClassifier, f)\n f.close()\n\n\n return claccuracy", "def split_on_classifier(data, classifier):\n feature_values = []\n for point in data:\n classification = classifier.classify(point)\n if classification not in feature_values:\n feature_values.append(classification)\n classification_dict = {}\n for feature_value in feature_values:\n classification_dict[feature_value] = []\n for point in data:\n classification = classifier.classify(point)\n classification_dict[classification].append(point)\n return classification_dict", "def dataSplit(self,df):\n X = df['message']\n y = df['label']\n return X, y", "def train_test_set_split(dataset, dataset_name, test_size=0.1):\n train_indices_path = './' + dataset_name + '_train_indices(' + str(test_size) + ').txt'\n test_indices_path = './' + dataset_name + '_test_indices(' + str(test_size) + ').txt'\n try:\n train_indices = []\n test_indices = []\n file = open(train_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n train_indices.append(int(line[:-1]))\n file.close()\n file = open(test_indices_path, 'rt', encoding='utf-8')\n while True:\n line = file.readline()\n if not line:\n break\n test_indices.append(int(line[:-1]))\n file.close()\n train_labels = [dataset.targets[i] for i in train_indices]\n except FileNotFoundError:\n indices = np.arange(len(dataset))\n labels = np.array(dataset.targets)\n train_indices, test_indices, train_labels, _ = train_test_split(\n indices, labels, test_size=test_size, stratify=labels\n )\n file = open(train_indices_path, 'wt', encoding='utf-8')\n for i in train_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n file = open(test_indices_path, 'wt', encoding='utf-8')\n for i in test_indices:\n line = str(i) + '\\n'\n file.write(line)\n file.close()\n\n train_set = torch.utils.data.Subset(dataset, indices=train_indices)\n test_set = torch.utils.data.Subset(dataset, indices=test_indices)\n return train_set, test_set, train_labels", "def split_data(num_samples, num_splits):\n\n kf = sklearn.model_selection.KFold(n_splits=num_splits, random_state=0);\n return kf.split(range(num_samples))", "def train(self, features, labels):\n pass", "def split_data(dataset_x, dataset_y, split_ratio):\n num_examples = len(dataset_x)\n training_x = dataset_x[:int(num_examples*split_ratio)]\n training_y = dataset_y[:int(num_examples*split_ratio)]\n\n validation_x = dataset_x[int(num_examples*split_ratio): num_examples]\n validation_y = dataset_y[int(num_examples*split_ratio): num_examples]\n\n training_y = np.asarray(training_y, dtype='float32')\n validation_y = np.asarray(validation_y, dtype='float32')\n return training_x, training_y, validation_x, validation_y", "def split ( self, y, X = None ):\n # Make sure y is an array\n y = np.array ( y ) if isinstance ( y, list ) else y\n\n # Groupby y and add integer indices.\n df_with_split = (\n pd.DataFrame ( { \"y\": y, \"index\": np.arange ( len ( y ) ) } )\n .groupby ( \"y\" ) [ \"index\" ]\n .apply ( self.add_split_col ) # Add col for split for instance\n )\n\n # For each fold, get train and test indices (based on col for split)\n for cv_split in np.arange ( self.number_of_folds - 1, -1, -1 ):\n train_bool = df_with_split [ \"split\" ] != cv_split\n test_bool = ~ train_bool\n # Yield index values of not cv_split and cv_split for train, test\n yield df_with_split [ \"index\" ].values [ train_bool.values ], df_with_split [\n \"index\"\n ].values [ test_bool.values ]\n # End split()", "def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set", "def partition(self, data, labels):\n\t\tfor i in range(self.splits):\n\t\t\tyield self.makePartition(len(labels))", "def batch_features_labels(features, labels, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def split_data(X, y, test_size, random_state):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)\n\n return X_train, X_test, y_train, y_test", "def partition_by_labels(features_matrix, instance_labels):\n partitioned_features_matrix = {}\n\n for i in range(len(features_matrix)):\n\n # Add a new label when encountered during training\n if instance_labels[i] not in partitioned_features_matrix:\n partitioned_features_matrix[instance_labels[i]] = []\n\n partitioned_features_matrix[instance_labels[i]].append(features_matrix[i])\n return partitioned_features_matrix", "def partition(self, data, labels):\n\t\traise Exception(\"Not implmented\")", "def split_data(data, labels, proportion):\n size = data.shape[0]\n np.random.seed(42)\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n return (data[s[:split_idx]], data[s[split_idx:]], labels[s[:split_idx]], labels[s[split_idx:]])", "def split_dataset(df, test_size, seed):\r\n ncols = np.size(df, 1)\r\n X = df.iloc[:, range(0, ncols - 1)]\r\n Y = df.iloc[:, ncols - 1]\r\n x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)\r\n y_train = get_dummies(y_train) # One-hot encoding\r\n y_test = get_dummies(y_test)\r\n return x_train, x_test, y_train, y_test", "def splits(cls, text_field, label_field, root='.data',\n train='training.1600000.processed.noemoticon.csv', \n test='testdata.manual.2009.06.14.csv', \n neutral = None, **kwargs):\n \n path_train = root + train\n path_test = root + test\n \n if not os.path.exists(root):\n os.mkdir(root)\n \n if not os.path.exists(path_train) or not os.path.exists(path_test):\n path = cls.download(root)\n path_train = path + train\n path_test = path + test\n \n train_dataset = Sentiment140(path_train, text_field, label_field, neutral=neutral, **kwargs)\n test_dataset = Sentiment140(path_test, text_field, label_field, **kwargs)\n \n return train_dataset, test_dataset", "def train_val_test_split(data):\n raise NotImplementedError", "def split_dataset(samples, ratio=0.8):\n nsamples = len(samples)\n num_train = int(ratio*nsamples)\n\n # shuffle samples\n shuffle(samples)\n\n trainset = samples[:num_train]\n testset = samples[num_train:]\n\n return trainset, testset", "def evenly_partition_dataset(data, labels, nb_teachers):\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n nclasses = len(labels[0])\n print(\"Start Index Selection\")\n data_sel = [data[labels[:, j] == 1] for j in range(nclasses)]\n print(\"End Index Selection\")\n i = 0\n data_sel_id = [0] * len(labels[0])\n partition_data = []\n partition_labels = []\n\n while True:\n partition_data.append(data_sel[i][data_sel_id[i]])\n partition_labels.append(np_utils.to_categorical(i, nclasses))\n\n if len(partition_data) == batch_len:\n partition_data = np.asarray(partition_data)\n partition_labels = np.asarray(partition_labels)\n yield partition_data, partition_labels\n partition_data = []\n partition_labels = []\n\n data_sel_id[i] += 1\n if data_sel_id[i] == len(data_sel[i]):\n data_sel_id[i] = 0\n i = (i + 1) % nclasses", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def split_sample(labels):\n sample_names = labels[\"Sample\"].str.split(\" \", n=1, expand=False)\n labels['Sample'] = sample_names\n return labels", "def perform_data_split(X, y, training_idxs, test_idxs, val_idxs):\n X_train = X[training_idxs]\n X_test = X[test_idxs]\n #X_val = X[val_idxs]\n\n y_train = y[training_idxs]\n y_test = y[test_idxs]\n #y_val = y[val_idxs]\n\n return X_train, X_test, y_train, y_test,", "def make_split(data, target, test_size=0.3):\n train, test = train_test_split(data, test_size=test_size)\n x_train = train.drop(target, axis=1)\n y_train = train[target]\n x_test = test.drop(target, axis=1)\n y_test = test[target]\n return x_train, y_train, x_test, y_test", "def _split_sets(X, y, folds, ind=-1, sample_counter=0):\n\n fold = folds.pop(ind) - sample_counter\n X_test = X[fold, ...]\n y_test = y[fold, ...]\n X_train = np.delete(X, fold, axis=0)\n y_train = np.delete(y, fold, axis=0)\n test_fold = fold + sample_counter\n # return X_train, np.squeeze(y_train), X_val, np.squeeze(y_val)\n return X_train, y_train, X_test, y_test, test_fold", "def split_by_StratifiedKFold(data, labels, nb_splits=3):\n skf = StratifiedKFold(n_splits=nb_splits, shuffle=True)\n\n index_folds = []\n for train_index, test_index in skf.split(data, labels):\n index_folds.append([train_index, test_index])\n\n # here we only return the first k-fold\n train_data = [data[k] for k in index_folds[0][0]]\n train_labels = [[labels[k] for k in index_folds[0][0]]]\n val_data = [data[k] for k in index_folds[0][1]]\n val_labels = [[labels[k] for k in index_folds[0][1]]]\n return train_data, train_labels, val_data, val_labels", "def DataSplit(self, data):\n train_X,test_X,train_y,test_y=train_test_split(data[0],data[1], random_state=2)\n valid_X,valid_y=train_test_split(data[0],data[1],random_state=2,test_size=0.15)[1],train_test_split(data[0],data[1],random_state=2,test_size=0.15)[3]\n return (train_X,test_X,valid_X,train_y,test_y,valid_y)", "def making_dataset_list_train(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n train_data_list = []\n for i in range(split_num):\n train_data_list.append(data[data['separate_num'] != i])\n for i in range(split_num):\n train_data_list[i] = train_data_list[i].drop(['separate_num'], axis = 1)\n return train_data_list", "def split_test_train(data, target=\"class\", split=0.20):\n np.random.seed(42)\n\n X = data[[c for c in list(data.columns) if c != target]]\n # y = data[target].astype(\"int\")\n y = data[target].astype(\"category\")\n\n train, test = Data(X, y), None\n if split is not None or split > 0:\n splits = train_test_split(X, y, test_size=split, stratify=y, random_state=42)\n train, test = Data(splits[0], splits[2]), Data(splits[1], splits[3])\n\n return train, test", "def _split(x, threshold, shuffle):\n x = np.array(x)\n if shuffle:\n np.random.shuffle(x)\n train_size = int(np.floor(len(x) * threshold))\n x_train = x[:train_size]\n x_test = x[train_size:]\n\n return x_train, x_test", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def split_train_and_validation(whole_train_data, whole_train_labels, validation_index, k_fold):\n dimension = whole_train_data.shape[1]\n train_data_chunks = np.array_split(whole_train_data, k_fold)\n train_label_chunks = np.array_split(whole_train_labels, k_fold)\n validation_data = train_data_chunks[validation_index]\n validation_labels = train_label_chunks[validation_index]\n train_data = np.delete(train_data_chunks, validation_index, 0)\n train_data = train_data.reshape((-1, dimension))\n train_labels = np.delete(train_label_chunks, validation_index, 0)\n train_labels = train_labels.flatten()\n return train_data, train_labels, validation_data, validation_labels", "def _load_split_data(self, dataset_path):\n for i, prefix in enumerate(['train', 'dev', 'test']):\n filename = os.path.join(dataset_path, '{}.txt'.format(prefix))\n knowledge, src, tgt = self._load_multi_data(filename)\n self.group_text_data[0].append(knowledge)\n self.group_text_data[1].append(src)\n self.group_text_data[2].append(tgt)", "def train_test_split(dataset, split):\r\n train = list()\r\n train_size = split * len(dataset)\r\n dataset_copy = list(dataset) \r\n while len(train) < train_size:\r\n index = randrange(len(dataset_copy))\r\n train.append(dataset_copy.pop(index))\r\n return train, dataset_copy", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)", "def get_features_and_labels(self, dataframe):\n features = dataframe.drop(columns=self._label, axis=1)\n labels = dataframe[self._label]\n\n return features, labels", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def split(self, X, y=None, groups=None):\n\n #X, y, groups = indexable(X, y, *groups)\n indices = np.arange(_num_samples(X))\n \n train_group, test_group = groups.T\n\n for train_g in np.unique(train_group):\n train_mask = train_group == train_g\n\n train_index = indices[train_mask]\n test_mask = np.logical_not(train_mask)\n\n rest_test = test_group[test_mask]\n\n for _, test_index in self.cv_test.split(X[test_mask],\n y[test_mask],\n test_group[test_mask]):\n test_index = indices[test_mask][test_index]\n yield train_index, test_index" ]
[ "0.7630744", "0.75351566", "0.72341454", "0.72143763", "0.7188007", "0.7188007", "0.71554524", "0.70676637", "0.70484245", "0.6948134", "0.6905886", "0.69033486", "0.68980664", "0.68860745", "0.687833", "0.687833", "0.6874963", "0.68439305", "0.6804945", "0.6803974", "0.6780325", "0.67507774", "0.6716221", "0.6697767", "0.66755503", "0.66642255", "0.6662638", "0.6662379", "0.6658029", "0.6657278", "0.6655543", "0.6647771", "0.6647586", "0.6599722", "0.65993387", "0.6588837", "0.6579331", "0.6577097", "0.6548885", "0.6548885", "0.6548885", "0.65469563", "0.6544593", "0.651451", "0.6510198", "0.6505912", "0.650456", "0.64922655", "0.64912575", "0.64746624", "0.64711785", "0.6423849", "0.64224803", "0.6411793", "0.64067936", "0.6403455", "0.63746864", "0.63743997", "0.63488036", "0.63473034", "0.6340516", "0.63377166", "0.6329495", "0.63245356", "0.63166314", "0.6309427", "0.63024265", "0.63012135", "0.62903357", "0.6282221", "0.6279998", "0.6259102", "0.62510186", "0.6228509", "0.6219978", "0.62161833", "0.62022996", "0.6200197", "0.6193963", "0.61850804", "0.61847246", "0.61842966", "0.61784756", "0.61762035", "0.61739486", "0.6173568", "0.6171003", "0.6166361", "0.6165192", "0.6158172", "0.6156595", "0.6154792", "0.6152576", "0.61515224", "0.6150832", "0.61472994", "0.61471635", "0.61448014", "0.6130445", "0.6120551", "0.6119685" ]
0.0
-1
Read episodic data where each row contains either inputs and its preceding output output or the causal inputs/outputs relationship
def read( self, df: pd.DataFrame, iteration_order: int = -1, episode_col: str = "episode", iteration_col: str = "iteration", feature_cols: List[str] = ["state_x_position"], label_cols: List[str] = ["state_x_position"], augmented_cols: List[str] = ["action_command"], ): # CASE 1: rows are of the form {st+1, at} # Append st into next row # if iteration_order < 0 then drop the iteration - iteration_order iteration from each episode # and append previous state columns into each row: {st+1, at} -> {st, at, st+1} if all([episode_col, iteration_col, iteration_order < 0]): lagger_str = "previous" current_row = "inputs" joined_df = self.split( df, iteration_col, episode_col, iteration_order, lagger_str, current_row, feature_cols, label_cols, augmented_cols, ) # skip the first row of each episode since we do not have its st joined_df = ( joined_df.groupby(by=episode_col, as_index=False) .apply(lambda x: x.iloc[iteration_order * -1 :]) .reset_index() ) return joined_df.drop(["level_0", "level_1"], axis=1) # CASE 2: rows of the form {st, at} # Append st+1 from next row into current row {st, at, st+1} elif all([episode_col, iteration_col, iteration_order > 0]): lagger_str = "next" current_row = "outputs" joined_df = self.split( df, iteration_col, episode_col, iteration_order, lagger_str, current_row, feature_cols, label_cols, augmented_cols, ) # truncate before the end of iteration_order for complete observations only joined_df = ( joined_df.groupby(by=episode_col, as_index=False) .apply(lambda x: x.iloc[: iteration_order * -1]) .reset_index() ) return joined_df.drop(["level_0", "level_1"], axis=1) else: return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_iers_EOP(input_file):\n #-- read data file splitting at line breaks\n with open(input_file,'r') as f:\n file_contents = f.read().splitlines()\n #-- number of data lines\n n_lines = len(file_contents)\n dinput = {}\n dinput['MJD'] = np.zeros((n_lines))\n dinput['x'] = np.zeros((n_lines))\n dinput['y'] = np.zeros((n_lines))\n #-- for each line in the file\n flag = 'I'\n counter = 0\n while (flag == 'I'):\n line = file_contents[counter]\n i = 2+2+2+1; j = i+8\n dinput['MJD'][counter] = np.float(line[i:j])\n i = j+1\n flag = line[i]\n i += 2; j = i+9\n dinput['x'][counter] = np.float(line[i:j])\n i = j+10; j = i+9\n dinput['y'][counter] = np.float(line[i:j])\n counter += 1\n #-- reduce to data values\n dinput['MJD'] = dinput['MJD'][:counter]\n dinput['x'] = dinput['x'][:counter]\n dinput['y'] = dinput['y'][:counter]\n #-- return the date, flag and polar motion values\n return dinput", "def read(self) :\n # Open the file.\n f = open(self.output, 'r')\n lines = f.readlines()\n \n # Find the eigenvalue.\n count = 0\n while True :\n words = lines[count].split()\n if len(words) == 5 :\n if words[0] == \"*\" and words[1] == \"K-EFF\":\n self.keff = float(words[3])\n break\n count += 1\n \n # Find the peaking.\n a = 0 # Assembly index\n \n while True :\n words = lines[count].split()\n if len(words) == 8 :\n if words[0] == \"NODE\" and words[1] == \"AVERAGE\" and words[2] == \"POWERS\" :\n count += 5 # Powers start 5 lines below title\n for row in range(0, self.dimension) :\n words = lines[count].split()\n assert(len(words) >= self.dimension)\n for col in range(0, self.dimension) :\n self.peaking_map[row, col] = float(words[col+1])\n if self.core.stencil[row, col] > 0:\n #print \" a=\", a, \" row=\", row, \" col=\", col, len(self.peaking)\n self.peaking[a] = self.peaking_map[row, col]\n a += 1\n count += 1\n break\n count += 1 \n # Maximum peaking.\n self.maxpeak = np.max(self.peaking)", "def read_qe(qefile, task):\n fileobj = open(qefile)\n lines = fileobj.readlines()\n fileobj.close()\n if task == \"PW_INP\": # Reading a pw.x input file\n for i, line in enumerate(lines):\n if \"nat\" in line:\n # Reading the number of atoms in the cell\n if \",\" in line.split()[2]:\n nat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n nat = int(line.split()[2])\n elif \"ntyp\" in line:\n if \",\" in line.split()[2]:\n ntypat = int(line.split()[2][:len(line.split()[2])-1])\n else:\n ntypat = int(line.split()[2])\n elif \"CELL_PARAMETERS\" in line:\n # Reading the cell vectors\n cell = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"ATOMIC_POSITIONS\" in line:\n if \"crystal\" in line:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]]), cell)\n else:\n # Reading the atoms and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n # Returning the input structure\n rstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n return rstrc\n elif task == \"PW_OUT_RELAX\": # Reading a pw.x output file for a calculation = \"relax\"\n status = \"NONE\"\n rstrcs = []\n rtotEs = []\n rtotFs = []\n rforces = []\n rstress = []\n for i, line in enumerate(lines):\n # Initial information related to the input cell\n if \"number of atoms/cell\" in line:\n # Reading the number of atoms in the cell\n nat = int(line.split()[4])\n elif \"number of atomic types\" in line:\n ntypat = int(line.split()[5])\n elif \"crystal axes: (cart. coord. in units of alat)\" in line:\n # Reading the cell vectors\n cell = [x.split()[3:6] for x in lines[i + 1:i + 4]]\n cell = array([[float(col) for col in row] for row in cell])\n elif \"Crystallographic axes\" in line:\n # Reading the input coordinates and creating a collection of ase.Atoms objects\n geom_start = i + 3\n geom_stop = geom_start + nat\n species = [line.split()[1] for line in lines[geom_start:geom_stop]]\n geom = dot(array([[float(col) for col in line.split()[6:9]]\n for line in lines[geom_start:geom_stop]]), cell)\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates (first)\")\n # Now, just after each SCF cycle\n # Reading total energy\n elif \"Forces acting on atoms\" in line:\n forces_start = i + 2\n forces_stop = forces_start + nat\n try:\n forces = array([[float(col) for col in line.split()[6:9]]\n for line in lines[forces_start:forces_stop]])\n #print (\"Appending forces\")\n rforces.append(forces)\n except ValueError:\n # expected to occur when forces are too big\n # and so incompatible with the format used in QE\n # for instance:\n # atom 3 type 2 force = 674.57999165 312.30521069-1079.69944125\n print (\"Rerror reading forces in file:\")\n print (qefile)\n #print (\"Appending forces (empty)\")\n rforces.append([])\n elif \"! total energy\" in line:\n rtotEs.append(float(line.split()[4]))\n #print (\"Appending energy\")\n elif \"total stress (Ry/bohr**3)\" in line:\n # Reading the stress tensor\n stress = [x.split()[0:3] for x in lines[i + 1:i + 4]]\n stress = array([[float(col) for col in row] for row in stress])\n rstress.append(stress)\n #print (\"Appending stress\")\n elif \"Total force\" in line:\n rtotFs.append(float(line.split()[3]))\n #print (\"Appending total forces\")\n elif \"ATOMIC_POSITIONS (alat)\" in line:\n # Reading the relaxed and creating a collection of ase.Atoms objects\n geom_start = i + 1\n geom_stop = geom_start + nat\n species = [line.split()[0] for line in lines[geom_start:geom_stop]]\n geom = array([[float(col) for col in line.split()[1:4]]\n for line in lines[geom_start:geom_stop]])\n tstrc = Atoms(\n cell=cell,\n pbc=True,\n positions=geom,\n symbols=\"\".join(species))\n rstrcs.append(tstrc)\n #print (\"Appending coordinates\")\n elif \"convergence NOT achieved after 100 iterations: stopping\" in line:\n # Removing the last item the vector with structures\n status = \"SCF_NOT_CONVERGED\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if no even the first SCF started\n if len(rtotEs) == 0 and status == \"NONE\":\n status = \"CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the SCF has not been finished because of timeout\n if len(rstrcs) > len(rtotEs) and status == \"NONE\":\n status = \"TIMEOUT_OR_CRASH\"\n rstrcs.pop()\n #print (\"Removing coordinates\")\n # Checking if the BFGS has been finished\n if status == \"TIMEOUT_OR_CRASH\" and \"JOB DONE\" in lines[len(lines)-2]:\n status = \"FINISHED\"\n # Returning a collection of cells and properties\n return status, rstrcs, rtotEs, rtotFs, rforces, rstress", "def read_input(E):\n # ---------- INSERT CODE BELOW ----------\n edge_list = []\n\n for _ in range(E):\n src, dst, cost = input('').rstrip('\\r\\n').split()\n edge_list.append((int(src),int(dst),int(cost)))\n \n return edge_list\n # ---------- INSERT CODE ABOVE ----------", "def read_ephem_file(infile):\n target_id, epoch, period, tdur = [], [], [], []\n with open(infile) as ff:\n data = ff.readlines()\n for row in data:\n s = row.split()\n target_id.append(s[0])\n epoch.append(float(s[1]))\n period.append(float(s[2]))\n tdur.append(float(s[3]))\n return target_id, epoch, period, tdur", "def read_SimCenter_EDP_input(input_path, EDP_kinds=('PID','PFA'), \n units = dict(PID=1., PFA=1.),\n verbose=False):\n \n # initialize the data container\n data = {}\n\n # read the collection of EDP inputs...\n # the read_csv method in pandas is sufficiently versatile to handle the\n # tabular format of dakota\n EDP_raw = pd.read_csv(input_path, sep='\\s+', header=0,\n index_col='%eval_id')\n # set the index to be zero-based\n EDP_raw.index = EDP_raw.index - 1\n\n # search the header for EDP information\n for column in EDP_raw.columns:\n for kind in EDP_kinds:\n if kind in column:\n\n if kind not in data.keys():\n data.update({kind: []})\n\n # extract info about the location, direction, and scenario\n info = column.split('-')\n \n # get the scale factor to perform unit conversion\n f_unit = units[kind]\n \n # store the data\n data[kind].append(dict(\n raw_data=(EDP_raw[column].values * f_unit).tolist(),\n location=info[2],\n direction=info[3],\n scenario_id=info[0]\n ))\n\n if verbose: pp.pprint(data)\n\n return data", "def __readCONTINoutput(self):\n\n titleline = 'OBJ. FCTN. VARIANCE STD. DEV.'\n chunkTitle = re.compile('OBJ. FCTN. VARIANCE STD. DEV. ')\n\n alldata = []\n\n with open(self.outputfile, 'r') as f:\n\n for line in f:\n if chunkTitle.search(line) is not None:\n\n alphadic = {}\n\n # gets the header\n alphaLine = next(f)\n if '*' in alphaLine:\n alphadic['marked'] = True\n\n alphaLine = alphaLine.replace('*', '')\n alphaParam = np.fromstring(alphaLine, sep=' ')\n\n # reduce the header line to string seperated text\n line = re.sub('\\s\\s\\s+', ' ', line).strip()\n for key, value in zip(line.split(' '), alphaParam):\n alphadic[key] = value\n # skip a line then get the data\n next(f)\n # alldata.append((alphadic, readblock(f)))\n alldata.append(\n (alphadic, readblock(f), readSummaryData(f)))\n\n # skip a line then get the data\n # print(next(f))\n\n return alldata", "def read_input():\n # Use with to make sure the file will be closed after the block executed\n with open('snapshot_input.txt') as f:\n # Split the line at line breaks\n x = f.read().splitlines()\n # Get the data of restructuring, three positive integers N , C , and D\n # Use generator expression for time and space efficiency\n restructuring_info = (i.split() for i in x if len(i.split())==3)\n # Get the data of single machine, four integers D, P, R and G\n machine_info = (i.split() for i in x if len(i.split())!=3)\n # Get the length of restructuring data\n length = sum(1 for i in x if len(i.split())==3)\n\n return restructuring_info, machine_info, length", "def model_inputs_and_outputs(self):\n\n # count relation instances\n total_rel_count = 0\n\n for note_path in glob.glob(self.text_dir + 'ID*_clinic_*'):\n\n # some notes weren't annotated\n if note_path not in self.note2rels:\n continue\n\n # to be broken into chunks later\n note_text = open(note_path).read()\n\n # iterate over note chunks\n for chunk_start, chunk_end in self.chunk_generator(note_text):\n\n # each event/time gets a number\n entity_num = 0\n\n # assign a number to each event and time\n time_offsets2num = {}\n event_offsets2num = {}\n\n # t5 i/o\n metadata = []\n rels_in_chunk = []\n\n # look for times and events in this chunk\n for time_start, time_end, time_id in self.note2times[note_path]:\n if time_start >= chunk_start and time_end <= chunk_end:\n time_offsets2num[(time_start, time_end)] = entity_num\n metadata.append('%s|%s' % (entity_num, time_id))\n entity_num += 1\n for event_start, event_end, event_id in self.note2events[note_path]:\n if event_start >= chunk_start and event_end <= chunk_end:\n event_offsets2num[(event_start, event_end)] = entity_num\n metadata.append('%s|%s' % (entity_num, event_id))\n entity_num += 1\n\n # combine time_offsets2num and event_offsets2num\n arg2num = dict(list(time_offsets2num.items()) +\n list(event_offsets2num.items()))\n\n targ2src = {} # map contained events to their containers\n for rel in self.note2rels[note_path]:\n src_start, src_end, targ_start, targ_end, src_id, targ_id = rel\n if src_start >= chunk_start and src_end <= chunk_end and \\\n targ_start >= chunk_start and targ_end <= chunk_end:\n targ2src[(targ_start, targ_end)] = (src_start, src_end)\n\n # map every event / time to its container (or none)\n sorted_args = sorted(arg2num.items(), key=lambda t: t[0][0])\n for (arg_start, arg_end), arg_num in sorted_args:\n if (arg_start, arg_end) in targ2src:\n # this target has a source (container)\n src_start, src_end = targ2src[(arg_start, arg_end)]\n src_num = arg2num[(src_start, src_end)]\n container = src_num\n else:\n container = '_' # no container\n rels_in_chunk.append('c(%s; %s)' % (arg_num, container))\n\n # add seq numbers and markers to events/times\n offset2str = {}\n for (start, end), entity_num in time_offsets2num.items():\n offset2str[start - chunk_start] = '<t> '\n offset2str[end - chunk_start] = '/' + str(entity_num) + ' </t>'\n for (start, end), entity_num in event_offsets2num.items():\n offset2str[start - chunk_start] = '<e> '\n offset2str[end - chunk_start] = '/' + str(entity_num) + ' </e>'\n chunk_text_with_markers = insert_at_offsets(\n note_text[chunk_start:chunk_end],\n offset2str)\n \n metadata_str = '||'.join(metadata)\n input_str = 'task: RELEXT; %s' % chunk_text_with_markers\n if len(rels_in_chunk) > 0:\n output_str = ' '.join(rels_in_chunk)\n else:\n output_str = 'no relations found'\n\n # counts inputs and outputs that t5 cannot handle\n if len(self.tokenizer(input_str).input_ids) > self.max_input_length:\n self.in_over_maxlen += 1\n if len(self.tokenizer(output_str).input_ids) > self.max_input_length:\n self.in_over_maxlen += 1\n\n self.inputs.append(input_str)\n self.outputs.append(output_str)\n self.metadata.append(metadata_str)\n\n print('%d total input/output pairs' % len(self.inputs))\n print('%d total relation instances' % total_rel_count)\n print('%d inputs over maxlen' % self.in_over_maxlen)\n print('%d outputs over maxlen' % self.out_over_maxlen)", "def caricaReadsEsIn(fileInput):\n\n\tidx_gene \t= 4 \n\tidx_chrom \t= 0\n\tidx_start\t= 1\n\tidx_end\t\t= 2\n\tidx_reads\t= 6\n\n\tdictReadsEsIn = {}\n\n\tlines = [x.strip('\\n').split('\\t') for x in open(fileInput)]\n\t\n\tfor riga in lines:\n\t\tgeneName \t= riga[idx_gene]\n\t\tchrom\t\t= riga[idx_chrom]\n\t\tstart\t\t= riga[idx_start]\n\t\tend\t\t\t= riga[idx_end]\n\t\treads\t\t= riga[idx_reads]\n\n\t\tif not geneName in dictReadsEsIn:\n\t\t\tdictReadsEsIn[geneName] = {}\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\t# Il primo campo indica se il cromosoma ha almeno..\n\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# ..una regione con reads\n\t\telif chrom not in dictReadsEsIn[geneName]:\n\t\t\tdictReadsEsIn[geneName][chrom] = [False, [start], [end], [reads]]\n\t\telse:\n\t\t\tdictReadsEsIn[geneName][chrom][idx_start].append(start)\n\t\t\tdictReadsEsIn[geneName][chrom][idx_end].append(end)\n\t\t\tdictReadsEsIn[geneName][chrom][3].append(reads)\n\n\t\ti = len(dictReadsEsIn[geneName][chrom][3])\n\t\tif int(dictReadsEsIn[geneName][chrom][3][i-1]) != 0:\n\t\t\tdictReadsEsIn[geneName][chrom][0] = True\t\t\t\t\t\t\t# Indica se c'e' almeno una regione esonica/intronica\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# che mappa delle reads\n\n\t# Si eliminano i cromosomi che non hanno mappato reads ne' su introni\n\t# ne' su esoni (primo value del dizionario = FALSE)\n\t#\n\tgeneKeys = dictReadsEsIn.keys()\n\tfor geneName in geneKeys:\n\t\tchromKeys = dictReadsEsIn[geneName].keys()\n\t\tfor chrom in chromKeys:\n\t\t\tif not dictReadsEsIn[geneName][chrom][0]:\n\t\t\t\tdel dictReadsEsIn[geneName][chrom]\n\t\t\t\t# Si eliminano i geni che non hanno piu' cromosomi\n\t\t\t\t#\n\t\t\t\tif not dictReadsEsIn[geneName]:\n\t\t\t\t\tdel dictReadsEsIn[geneName]\n\t\t\t\t\tprint 'Il gene %s non presenta cromosomi con reads mappanti.\\n' % geneName,\n\n\treturn dictReadsEsIn", "def read_inputs(self):\n self.in_power.read()\n self.in_alert.read()", "def extract_data_trans_info(lines, PE_dims):\n data_trans_info = {}\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('read_channel_intel') != -1:\n # Check the start and end of the block\n block_start, block_end = locate_data_trans_block(line_id, lines) \n block_lines = lines[block_start : block_end + 1]\n # Parse the data type\n block_line = block_lines[1]\n data_type = block_line.strip().split(' ')[0]\n #print(data_type)\n # Parse the start PE index\n block_line = block_lines[2]\n m = re.search(r'\\((.+?)\\)', block_line)\n fifo_name = m.group(1)\n PE_index_start = fifo_name.split('_')[-len(PE_dims):]\n PE_index_start = [int(s) for s in PE_index_start]\n #print(PE_index_start)\n # Parse the IO group name\n group_name = fifo_name.split('_')[1]\n #print(group_name)\n data_trans_info[group_name] = {\\\n 'in_block_lines': block_lines, 'in_block_pos': [block_start, block_end], \\\n 'PE_index_start': PE_index_start, 'data_type': data_type}\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in data_trans_info: \n # Check the start and end of the block\n block_start, block_end = locate_data_trans_block(line_id, lines)\n block_lines = lines[block_start : block_end + 1]\n # Parse the end PE index\n block_line = block_lines[3]\n m = re.search(r'\\((.+?)\\)', block_line)\n fifo_name = m.group(1).split(',')[0]\n PE_index_end = fifo_name.split('_')[-len(PE_dims):]\n PE_index_end = [int(s) for s in PE_index_end]\n #print(PE_index_end)\n group_name = fifo_name.split('_')[1]\n data_trans_info[group_name]['PE_index_end'] = PE_index_end\n data_trans_info[group_name]['out_block_lines'] = block_lines\n data_trans_info[group_name]['out_block_pos'] = [block_start, block_end]\n\n return data_trans_info", "def read_data(infile, input_count):\n example_inputs = []\n example_outputs = []\n\n for line in infile.readlines():\n line = line.split()\n # note added bias term, a fixed input of 1.0\n example_inputs.append([int(x) for x in line[:input_count]] + [1.0])\n example_outputs.append([int(x) for x in line[input_count:]])\n return example_inputs, example_outputs", "def read_inputs(self):\n #inputs \n inputs = {}\n # read inputs\n c = 1\n with open(self.path) as f:\n lines = f.readlines()\n for line in lines:\n data = line.rstrip(os.linesep).rstrip(',').split(',')\n input = np.array([np.float64(i) for i in data])\n inputs['image'+str(c)] = input\n c += 1\n\n\n\n return inputs", "def readenergyfile(filename):\n def parsemeta(metalines):\n \"\"\"Parse metadata lines to get metadata object (ordered dict)\n\n Allow only numbers, lists of numbers and strings\n \"\"\"\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))\n\n with io.open(filename, 'r') as datafile:\n components, meta = [], []\n for ii, line in enumerate(datafile):\n line = line.strip()\n if (line == '') or line.startswith('vector'):\n continue\n elif line.startswith('#'):\n meta.append(line)\n else:\n fields = line.split('#', 1)\n data = [x.strip() for x in fields[0].split(',')]\n comment = fields[1] if len(fields) > 1 else ''\n carrier, ctype, originoruse = data[0:3]\n values = [float(v.strip()) for v in data[3:]]\n\n if ctype not in ('PRODUCCION', 'CONSUMO'):\n raise ValueError(\"Carrier type is not 'CONSUMO' or 'PRODUCCION' in line %i\\n\\t%s\" % (ii+2, line))\n if originoruse not in ('EPB', 'NEPB', 'INSITU', 'COGENERACION'):\n raise ValueError((\"Origin or end use is not 'EPB', 'NEPB', 'INSITU' or 'COGENERACION'\"\n \" in line %i\\n\\t%s\" % (ii+2, line)))\n\n components.append({ \"carrier\": carrier, \"ctype\": ctype,\n \"originoruse\": originoruse,\n \"values\": values, \"comment\": comment })\n numsteps = [len(c['values']) for c in components]\n if max(numsteps) != min(numsteps):\n raise ValueError(\"All input must have the same number of timesteps.\")\n return (parsemeta(meta), components)", "def read_input_data(input_f, features_file, output):\n\n print('\\n Reading input...')\n log_write(output['log'], 'Input file: %s\\n' % input_f, 'w+')\n\n # Read input data\n df = pd.read_csv(input_f, low_memory=False)\n df = df.astype({'Column': int,\n 'Row': int,\n 'ImageNumber': int,\n 'ObjectNumber': int\n })\n\n # Data features\n if features_file:\n data_features = [l.rstrip() for l in open(features_file).readlines()]\n else:\n data_features = [c for c in df.columns.values if 'PC' in c]\n\n return df, wt_strains(df), data_features", "def read_data_file(input_file):\n header_lines = 0\n last_pound_pos = -1\n with open(input_file, 'r') as data_file:\n while (data_file.read(1) == '#'):\n last_pound_pos = data_file.tell()\n header = data_file.readline()\n header_lines += 1\n\n #Read the next lines\n data_1 = data_file.readline().split()\n data_2 = data_file.readline().split()\n data_file.seek(last_pound_pos + 1) #Goes back to the last line of the header\n\n if header_lines == 0:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n\n else:\n # Single line file\n if len(data_2) == 0:\n data_file.readline()\n\n else:\n\n if len(data_1) != len(\n data_2): #If there is a line containing the number of particles,\n data_file.readline()\n data_file.readline()\n\n try:\n data = pd.read_csv(data_file, sep=\" \", header=None).dropna(axis=1, how='all')\n data.columns = header.split()\n except:\n raise Exception(\"The input file '%s' is corrupted, usually the problem is because \"\\\n \"there is an end of a line that has an additional space\" %input_file)\n\n return data", "def _beam_data(beams, network):\n inds, indi, indf = [], [], []\n EIx, EIy = [], []\n for key in beams:\n nodes = beams[key]['nodes']\n inds.extend(nodes[0:-2])\n indi.extend(nodes[1:-1])\n indf.extend(nodes[2:])\n EIx.extend([network.vertex[i]['EIx'] for i in nodes[1:-1]])\n EIy.extend([network.vertex[i]['EIy'] for i in nodes[1:-1]])\n EIx = array(EIx)[:, newaxis]\n EIy = array(EIy)[:, newaxis]\n return inds, indi, indf, EIx, EIy", "def get_eofs(data, max_eofs=None, resample=None, eof_in=None):\n \n # detrend data and weight by area of points\n #prepped = get_detrended(data)\n prepped = data\n prepped[np.isnan(prepped)]=0\n prepped = np.ma.getdata(prepped)\n # reshape to 2d array\n pshape = prepped.shape\n (nt,ns) = (pshape[0],np.product(pshape[1:]))\n prepped = prepped.reshape((nt,ns),order='F')\n \n if max_eofs is None:\n max_eofs = ns\n \n if eof_in is None:\n print('--> Starting to calculate EOFs (takes a minute for large datasets)') \n timer_start = dt.now()\n U, S, V = linalg.svd(prepped, full_matrices=False)\n U = U[:,:max_eofs]\n S = S[:max_eofs]\n V = V[:max_eofs]\n out_eofs = V\n out_pcs = np.dot(U,np.diag(S)) #pcs[:, :max_eofs]\n out_var = data[:].var(ddof=1, axis=0)\n print('--> Completed calculating EOFs (%.1f seconds)' \\\n % (dt.now()-timer_start).total_seconds()) \n else:\n if eof_in.shape[1] != ns:\n print('Input EOFs feature dimension (length={}) does '\n 'not match data feature dimension (length={})'\n ''.format(eof_in.shape[1], prepped.shape[1]))\n raise ValueError('Feature dimension mismatch for input EOFs')\n else:\n max_eofs = np.min([max_eofs,eof_in.shape[0]])\n print('Projecting data into leading {:d} EOFs'.format(max_eofs))\n out_pcs = np.matmul(eof_in[:max_eofs],prepped.T).T\n return out_pcs\n\n eig_vals = (S ** 2) / nt\n total_var = out_var.sum()\n var_expl_by_mode = eig_vals / total_var\n \n eof_dict = {}\n eof_dict['eof'] = out_eofs\n eof_dict['totalVar'] = total_var\n eof_dict['varExplByEOF'] = var_expl_by_mode\n eof_dict['pc'] = out_pcs\n \n return eof_dict", "def reading_data(fname,goal):\n \n #Reading of the EEG data\n data = pd.read_csv(fname)\n events_fname = fname.replace('_data','_events')\n labels= pd.read_csv(events_fname)\n\n if goal==\"training\":\n data=data.drop(['id' ], axis=1)#remove id\n labels=labels.drop(['id' ], axis=1)#remove id\n elif goal==\"testing\":\n labels=labels.drop(['id' ], axis=1)\n else:\n raise SystemExit(\"The goal variable is unknown for the function\")\n\n return data, labels", "def _read_in(config):\n # specify needed genes\n need_genes = _need_genes(config)\n idc = need_genes+['sample', 'project_id', 'sample_type', 'sampleType',\n 'OS', '_PATIENT', 'OS.time']\n\n if config['fpath']:\n # user gives file path\n mat = _read_in_file(config['fpath'], idc)\n elif config['dpath']:\n # user gives foder path where file was saved by cancer type\n mat = _read_in_folder(config['dpath'], config['cancer'], idc)\n else:\n info('Please set -i or -d')\n sys.exit(1)\n info('read in exp successfully')\n # check mat\n if mat.shape[0] == 0:\n info('No expression data loaded, please check reference files and given gene names')\n sys.exit(1)\n # check CTL\n if 'adj_gene' in config.keys() and config['adj_gene'] == 'CTL':\n mat['CTL'] = mat[['CD8A', 'CD8B', 'GZMB', 'GZMA', 'PRF1']].T.mean()\n return(mat)", "def _read_input_file(self):\n pass", "def read_inputs(self):\n curdir = os.getcwd()\n os.chdir(self.fst_dir)\n rstat = self.readFST()\n if rstat == 0:\n os.chdir(curdir)\n return 0\n # the names of the next files are either set by caller or come from the reading the FAST file\n rstat = self.readNoise()\n rstat = self.readAD()\n rstat = self.readBlade()\n rstat = self.readPtfm()\n os.chdir(curdir)", "def basic_input(reac_file,spec_file):\r\n speciesidx={}\r\n speciesmass={}\r\n colnames=[\"Input1\", \"Input2\", \"Input3\", \"Output1\", \"Output2\", \"Output3\",\r\n \"alpha\", \"beta\", \"gamma\", \"Formula\"]\r\n with open(spec_file,'r') as my_file:\r\n for line in my_file:\r\n columns = line.strip().split()\r\n speciesidx[columns[0]]=int(columns[2])\r\n speciesmass[columns[0]]=int(columns[1])\r\n \r\n numspecies=len(speciesidx)+1\r\n\r\n convert = lambda x: speciesidx[x] if x!=' ' else None\r\n\r\n reactions=pd.read_csv(reac_file, comment = '#', names = colnames,\r\n converters = {0:convert, 1:convert, 2:convert,\r\n 3:convert, 4:convert, 5:convert})\r\n reactions.fillna(0,inplace = True)\r\n reactions[colnames[0:6]] = reactions[colnames[0:6]].astype(int)\r\n \r\n return reactions,speciesidx,speciesmass,numspecies", "def read_input_and_prediction(input_file, tags_predicted):\n input_lines = read_file(input_file)\n tag_lines = read_file(tags_predicted)\n\n if DEBUGGING_MODE:\n assert len(input_lines) == len(tag_lines), \"plz, check your number of lines in both input and output file\"\n\n return input_lines, tag_lines", "def read_ripser_output(output_path,max_dim,output_name=None):\n # \\todo add persistence by density (columns pers by threshold and column pers by dens) ## only needed if input weighted network\n output_file_path =os.path.join(output_path,'output_ripser.txt')\n data = open(output_file_path,'rb').readlines()\n value_range = eval(data[1].rstrip().split(' ')[-1])\n holes = dict() ## save holes by dimension (birth, death, persistence)\n for dimH in range(0,max_dim+1):#[0,1,2]:\n print 'dimH ', dimH\n h_start, h_end = ripser_PDs_dim(data,dim=dimH)\n pers = np.array(h_end)-np.array(h_start)\n d = pd.DataFrame()\n d['birth'] = h_start\n d['death'] = h_end\n d['persistence'] = pers\n d['dimH'] = dimH\n holes[dimH] = d \n data_pds = pd.concat(holes.values())\n if(output_name!=None):\n output_file_path = os.path.join(output_path,'%s_PDS.csv'%output_name)\n data_pds.to_csv(output_file_path) ## save pandas file with PDs for dim 0,1,2\n print 'Saved results in %s'%(output_file_path)\n else:\n output_file_path = os.path.join(output_path,'outputs_PDS.csv')\n data_pds.to_csv(output_file_path) ## save pandas file with PDs for dim 0,1,2\n print 'Saved results in %s'%output_file_path\n return()", "def read_ins_file(self):\n self._instruction_lines = []\n self._instruction_lcount = []\n first_line = self._readline_ins()\n if len(first_line) < 2:\n raise Exception(\n \"first line of ins file must have atleast two entries, not '{0}'\".format(\n \",\".join(first_line)\n )\n )\n if first_line[0] != \"pif\":\n raise Exception(\n \"first line of ins file '{0}' must start with 'pif', not '{1}'\".format(\n self._ins_filename, first_line[0]\n )\n )\n self._marker = first_line[1]\n while True:\n line = self._readline_ins()\n\n if line is None:\n break\n elif len(line) == 0:\n self.throw_ins_warning(\"empty line, breaking\")\n break\n else:\n c1 = line[0][:1]\n if c1 == \"l\":\n pass\n elif c1 == self._marker:\n pass\n elif c1 == \"&\":\n self.throw_ins_error(\"line continuation not supported\")\n else:\n self.throw_ins_error(\n \"first token must be line advance ('l'), primary marker, or continuation ('&'),\"\n + \"not: {0}\".format(line[0])\n )\n\n for token in line[1:]:\n t1 = token[:1]\n if t1 == \"t\":\n self.throw_ins_error(\"tab instruction not supported\")\n elif t1 == self._marker:\n tn = token[-1:]\n if not tn == self._marker:\n self.throw_ins_error(\n \"unbalanced secondary marker in token '{0}'\".format(token)\n )\n\n for somarker, eomarker in zip([\"!\", \"[\", \"(\"], [\"!\", \"]\", \")\"]):\n #\n if t1 == somarker:\n ofound = True\n if eomarker not in token[1:]:\n self.throw_ins_error(\n \"unmatched observation marker '{0}', looking for '{1}' in token '{2}'\".format(\n somarker, eomarker, token\n )\n )\n raw = token[1:].split(eomarker)[0].replace(somarker, \"\")\n if raw == \"dum\":\n pass\n else:\n if (\n self._full_oname_set is not None\n and raw not in self._full_oname_set\n ):\n self.throw_ins_error(\n \"obs name '{0}' not in pst\".format(raw)\n )\n elif raw in self._found_oname_set:\n self.throw_ins_error(\n \"obs name '{0}' is listed more than once\".format(\n raw\n )\n )\n self._found_oname_set.add(raw)\n break\n # print(raw)\n\n self._instruction_lines.append(line)\n self._instruction_lcount.append(self._ins_linecount)", "def read_data(self, p_data=''):\n\n _header_ = self._header_ + 'read_data(): '\n\n if p_data:\n self.p_data = p_data\n\n if not self.p_data:\n raise ValueError(_header_ + 'No data to read.')\n\n if not os.path.isfile(self.p_data):\n raise FileNotFoundError(_header_ + 'No such file: %s' % self.p_data)\n\n if self.verbose:\n print(_header_ + 'Reading data from %s ...' % self.p_data)\n\n if self.nidx_pred:\n # If there are nodes already in .nidx_pred, then they are likely copied over from the train data\n # So, these must be purged prior to reading new data\n print(_header_ + 'Excluding %d predicting nodes transfered from training dataset ...' % len(self.nidx_pred))\n self.nidx_exclude += self.nidx_pred\n self.nidx_pred = []\n\n # Extract data\n all_links = []\n all_labels = []\n has_other = False\n self.df = pd.read_table(self.p_data)\n df = self.df.applymap(func=lambda x: [i for i in x.strip().split('/') if i] if isinstance(x, str) else [])\n has_node = self.columns['nodes'] in df\n has_layer = self.columns['layers'] in df\n\n for i_row in range(len(df)):\n if has_layer:\n sp = df[self.columns['layers']][i_row][0]\n if sp in self.masklayer:\n continue\n if sp in self.layer2nidx:\n self.layer2nidx[sp] |= {i_row}\n else:\n self.layer2nidx[sp] = {i_row}\n self.nidx2layer.append(sp)\n labs = df[self.columns['labels']][i_row]\n if self.lab_other:\n node_lab = [x if (not self.labels or x in self.labels) else 'other' for x in labs]\n if not has_other and 'other' in node_lab:\n has_other = True\n else:\n node_lab = [x for x in labs if (not self.labels or x in self.labels)]\n if labs:\n all_labels += labs\n if not node_lab:\n self.nidx_exclude.append(i_row)\n self.nidx_pred.append(i_row)\n self.node_links.append([x for x in list(set(df[self.columns['links']][i_row])) if x not in self.exclude_links])\n self.node_labels.append(node_lab)\n if has_node:\n self.nodes.append(df[self.columns['nodes']][i_row])\n\n all_links += self.node_links[-1]\n\n # track link frequency\n for link in self.node_links[-1]:\n if link in self.link2freq:\n self.link2freq[link] += 1\n else:\n self.link2freq[link] = 1\n\n self.links += sorted(set(all_links) - set(self.links))\n set_all_labels = set(all_labels)\n if self.labels:\n if self.lab_other and 'other' not in self.labels and has_other:\n self.labels.append('other')\n\n if self.verbose:\n if self.lab_other:\n print(_header_ + 'Other labels: %s' % (','.join(set_all_labels - set(self.labels))))\n else:\n print(_header_ + 'Excluded labels: %s' % (','.join(set_all_labels - set(self.labels))))\n else:\n self.labels = sorted(list(set_all_labels))\n\n self.n_labels = len(self.labels)\n\n for idx, link in enumerate(self.links):\n self.link2lidx[link] = idx\n\n if self.verbose:\n print(' Found %d nodes' % len(self.node_links))\n print(' Found %d links' % len(self.links))\n\n return self", "def read_inputs(filename, height, padding, num_quant_levels, p_norm,\n predict_semantics):\n for record in tf.python_io.tf_record_iterator(filename):\n example = tf.train.Example()\n example.ParseFromString(record)\n feature_map = example.features\n # Input scan as sdf.\n input_scan = read_input_float_feature(feature_map, 'input_sdf', shape=None)\n (scene_dim_z, scene_dim_y, scene_dim_x) = input_scan.shape\n # Target scan as df.\n if 'target_df' in feature_map.feature:\n target_scan = read_input_float_feature(\n feature_map, 'target_df', [scene_dim_z, scene_dim_y, scene_dim_x])\n if 'target_sem' in feature_map.feature:\n target_semantics = read_input_bytes_feature(\n feature_map, 'target_sem', [scene_dim_z, scene_dim_y, scene_dim_x])\n # Adjust dimensions for model (clamp height, make even for voxel groups).\n height_y = min(height, scene_dim_y - padding)\n scene_dim_x = (scene_dim_x // 2) * 2\n scene_dim_y = (height_y // 2) * 2\n scene_dim_z = (scene_dim_z // 2) * 2\n input_scan = input_scan[:scene_dim_z, padding:padding + scene_dim_y, :\n scene_dim_x]\n input_scan = util.preprocess_sdf(input_scan, constants.TRUNCATION)\n if target_scan is not None:\n target_scan = target_scan[:scene_dim_z, padding:padding + scene_dim_y, :\n scene_dim_x]\n target_scan = util.preprocess_df(target_scan, constants.TRUNCATION)\n if target_semantics is not None:\n target_semantics = target_semantics[:scene_dim_z, padding:\n padding + scene_dim_y, :scene_dim_x]\n target_semantics = util.preprocess_target_sem(target_semantics)\n\n # Default values for previous resolution inputs.\n prediction_scan_low_resolution = np.zeros(\n [scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2, 2])\n prediction_semantics_low_resolution = np.zeros(\n [scene_dim_z // 2, scene_dim_y // 2, scene_dim_x // 2], dtype=np.uint8)\n if target_semantics is None:\n target_semantics = np.zeros([scene_dim_z, scene_dim_y, scene_dim_x])\n\n # Load previous level prediction.\n if not FLAGS.is_base_level:\n previous_file = os.path.join(\n FLAGS.output_dir_prev, 'level' + str(FLAGS.hierarchy_level - 1) + '_' +\n os.path.splitext(os.path.basename(filename))[0] + 'pred.tfrecord')\n tf.logging.info('Reading previous predictions frome file: %s',\n previous_file)\n assert os.path.isfile(previous_file)\n for record in tf.python_io.tf_record_iterator(previous_file):\n prev_example = tf.train.Example()\n prev_example.ParseFromString(record)\n prev_feature_map = prev_example.features\n prediction_scan_low_resolution = read_input_float_feature(\n prev_feature_map, 'prediction_df', None)\n (prev_scene_dim_z, prev_scene_dim_y,\n prev_scene_dim_x) = prediction_scan_low_resolution.shape\n offset_z = (prev_scene_dim_z - scene_dim_z // 2) // 2\n offset_x = (prev_scene_dim_x - scene_dim_x // 2) // 2\n prediction_scan_low_resolution = prediction_scan_low_resolution[\n offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:\n offset_x + scene_dim_x // 2]\n prediction_scan_low_resolution = util.preprocess_target_sdf(\n prediction_scan_low_resolution, num_quant_levels, constants.TRUNCATION,\n p_norm == 0)\n if predict_semantics:\n prediction_semantics_low_resolution = read_input_bytes_feature(\n prev_feature_map, 'prediction_sem',\n [prev_scene_dim_z, prev_scene_dim_y, prev_scene_dim_x])\n prediction_semantics_low_resolution = prediction_semantics_low_resolution[\n offset_z:offset_z + scene_dim_z // 2, :scene_dim_y // 2, offset_x:\n offset_x + scene_dim_x // 2]\n return (input_scan, target_scan, target_semantics,\n prediction_scan_low_resolution, prediction_semantics_low_resolution)", "def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def read(self):\n\t\tentities = dict()\n\t\trelations = set()\n\t\tedges = set()\n\t\twith open(self.file_path, encoding=\"utf-8\") as f:\n\t\t\tfor line in tqdm(f):\n\t\t\t\tif(self.prob == 1.0 or random() < self.prob):\n\t\t\t\t\tsource, relation, target, _ = line.split(\" \", 3)\n\t\t\t\t\tis_dataprop = target.startswith('\"')\n\t\t\t\t\tif source not in entities:\n\t\t\t\t\t\tentities[source] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\tentities[source][\"out_degree\"] += 1\n\t\t\t\t\tentities[source][\"degree\"] += 1\n\t\t\t\t\tif not is_dataprop:\n\t\t\t\t\t\tif target not in entities:\n\t\t\t\t\t\t\tentities[target] = dict(degree=0, out_degree=0, in_degree=0, data_properties={})\n\t\t\t\t\t\tentities[target][\"in_degree\"] += 1\n\t\t\t\t\t\tentities[target][\"degree\"] += 1\n\t\t\t\t\t\trelations.add(relation)\n\t\t\t\t\t\tedges.add((relation, source, target))\n\t\t\t\t\telse:\n\t\t\t\t\t\tif(self.include_dataprop):\n\t\t\t\t\t\t\tentities[source][\"data_properties\"][relation] = target\n\n\t\treturn (entities, relations, edges)", "def read_microsoft_aec_data(synthetic_dataset_filepath):\n\n synthetic_csv = join(synthetic_dataset_filepath, 'meta.csv')\n\n data = {}\n data_train = {}\n data_test = {}\n\n is_farend_nonlinear = {'train': {1: [], 0: []}, 'test': {1: [], 0: []}}\n is_farend_noisy = {'train': {1: [], 0: []}, 'test': {1: [], 0: []}}\n is_nearend_noisy = {'train': {1: [], 0: []}, 'test': {1: [], 0: []}}\n\n df = pandas.read_csv(synthetic_csv)\n for _, row in df.iterrows():\n\n fileid = row['fileid']\n data[fileid] = {}\n data[fileid]['ser'] = row['ser']\n data[fileid]['is_farend_nonlinear'] = row['is_farend_nonlinear']\n data[fileid]['is_farend_noisy'] = row['is_farend_noisy']\n data[fileid]['is_nearend_noisy'] = row['is_nearend_noisy']\n data[fileid]['split'] = row['split']\n data[fileid]['fileid'] = row['fileid']\n data[fileid]['nearend_scale'] = row['nearend_scale']\n\n # Add the absolute path of the echo, far-end, near-end, and clean near-end\n data[fileid]['echo_path'] = join(synthetic_dataset_filepath,\n 'echo_signal/echo_fileid_' + str(fileid) + '.wav')\n data[fileid]['farend_speech_path'] = join(synthetic_dataset_filepath,\n 'farend_speech/farend_speech_fileid_' + str(fileid) + '.wav')\n data[fileid]['nearend_mic_path'] = join(synthetic_dataset_filepath,\n 'nearend_mic_signal/nearend_mic_fileid_' + str(fileid) + '.wav')\n data[fileid]['nearend_speech_path'] = join(synthetic_dataset_filepath,\n 'nearend_speech/nearend_speech_fileid_' + str(fileid) + '.wav')\n\n # Dictionary train/test -> nonlinear -> file id\n is_farend_nonlinear[row['split']\n ][row['is_farend_nonlinear']].append(fileid)\n is_farend_noisy[row['split']][row['is_farend_noisy']].append(fileid)\n is_nearend_noisy[row['split']][row['is_nearend_noisy']].append(fileid)\n\n if row['split'] == 'test':\n data_test[fileid] = data[fileid]\n else:\n data_train[fileid] = data[fileid]\n\n return data, data_train, data_test, is_farend_nonlinear, is_farend_noisy, is_nearend_noisy", "def readData(self):\n self._setupArrays()\n\n with open(self.filename) as fh:\n datalines = fh.readlines()[self.NLHEAD:]\n\n datalines = self._checkForBlankLines(datalines)\n\n # Set up loop over unbounded indpendent variable\n m = 0 # Unbounded independent variable mark \n while len(datalines) > 0:\n datalines = self._readData1(datalines, m)\n datalines = self._readData2(datalines, m)\n m = m + 1", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def read_b2_transport_inputfile(infileloc, carbon=True):\n with open(infileloc, 'r') as f:\n lines = f.readlines()\n\n ndata = int(\n lines[1].strip().split()[5]) # This is the same for every array in our write routine\n\n rn = np.zeros(ndata)\n dn = np.zeros(ndata)\n ke = np.zeros(ndata)\n ki = np.zeros(ndata)\n if carbon:\n vrc = np.zeros(ndata)\n dc = np.zeros(ndata)\n\n for line_full in lines[2:]:\n line = line_full.strip().split()\n if len(line) < 4: continue\n\n if line[2][0] == '1' and line[3][0] == '1':\n rn[int(line[1][:-1]) - 1] = np.float(line[5])\n dn[int(line[1][:-1]) - 1] = np.float(line[-2])\n\n elif line[2][0] == '3' and line[3][0] == '1':\n ki[int(line[1][:-1]) - 1] = np.float(line[-2])\n\n elif line[2][0] == '4' and line[3][0] == '1':\n ke[int(line[1][:-1]) - 1] = np.float(line[-2])\n\n elif carbon:\n\n if line[2][0] == '1' and line[3][0] == '4':\n dc[int(line[1][:-1]) - 1] = np.float(line[-2])\n\n elif line[2][0] == '6' and line[3][0] == '3':\n vrc[int(line[1][:-1]) - 1] = np.float(line[-2])\n\n if carbon:\n return {'rn': rn, 'dn': dn, 'ki': ki, 'ke': ke, 'dc': dc, 'vrc': vrc}\n else:\n return {'rn': rn, 'dn': dn, 'ki': ki, 'ke': ke}", "def read_psmecalist( istream , isEig=False ):\n\n mtlist=[] # this will be the output list\n\n # read everything\n alltxt = NP.genfromtxt( istream, delimiter='\\n' , dtype=str)\n try: \n istream.close()\n except:\n tmp=1\n\n # loop through all tensors\n n = len(alltxt)\n\n # check for desired output type\n if isEig:\n for i in range(0,n):\n mtlist.append( psmeca2EigMT( alltxt[i] ) )\n else:\n for i in range(0,n):\n mtlist.append( psmeca2SymMT( alltxt[i] ) )\n\n \n return mtlist, alltxt", "def _read_txt(self, expected_col_names):\n\n try:\n # Read data\n data = pd.read_csv(self.source)\n\n # Check number of columns\n if data.shape[1] != len(expected_col_names):\n raise ValueError(\n \"Unexpected number of columns. Expected {}.\".format(\n len(expected_col_names)))\n # Check column names\n for item in data.columns:\n if item not in expected_col_names:\n raise ValueError(\"Unexpected column name. Expected:{}\"\\\n .format(expected_col_names))\n\n # Convert data\n for column in data.columns:\n data[column] = pd.to_numeric(data[column])\n\n # Generate output\n if self.coordinate_system == CoordinateSystem.GEOGRAPHIC:\n def generate_utm(row):\n return UtmCoordinate.create_from_geographic(\n row['latitude'],\n row['longitude'],\n row['elevation'])\n data['UTM'] = data.apply(generate_utm, axis=1)\n data['easting'] = data.apply(lambda row: row['UTM'].easting,\n axis=1)\n data['northing'] = data.apply(lambda row: row['UTM'].northing,\n axis=1)\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.UTM:\n data['x'] = data['easting'] - data['easting'].min()\n data['y'] = data['northing'] - data['northing'].min()\n data['z'] = data['elevation'] - data['elevation'].min()\n\n elif self.coordinate_system == CoordinateSystem.CARTESIAN:\n data['elevation'] = data['z'] # keeping return values consitent\n data['z'] = data['elevation'] - data['elevation'].min()\n\n else:\n raise ValueError('Unknown coordinate system.')\n\n selection = ['x', 'y', 'z', 'elevation']\n return data[selection]\n except Exception as exception:\n raise exception", "def _read_onr1_3(self, data: bytes, ndata: int):\n op2 = self.op2\n op2._analysis_code_fmt = b'i'\n op2.words = [\n 'aCode', 'tCode', 'eTotal', 'isubcase',\n '???', '???', 'element_name', 'load_set',\n 'format_code', 'num_wide', 'cvalres', 'setID',\n 'setID', 'eigenReal', 'eigenImag', 'rmssf',\n 'etotpos', 'etotneg', 'thresh', '???',\n '???', '???', '???', '???',\n '???', 'Title', 'subtitle', 'label']\n\n #aCode = self.get_block_int_entry(data, 1)\n\n ## total energy of all elements in isubcase/mode\n self.etotal = op2.parse_approach_code(data)\n if op2.is_debug_file:\n op2.binary_debug.flush()\n\n self._onr_element_name(data)\n\n #: Load set or zero\n op2.load_set = op2.add_data_parameter(data, 'load_set', b'i', 8, False)\n\n #: format code\n op2.format_code = op2.add_data_parameter(data, 'format_code', b'i', 9, False)\n\n #: number of words per entry in record\n #: .. note:: is this needed for this table ???\n op2.num_wide = op2.add_data_parameter(data, 'num_wide', b'i', 10, False)\n ## C\n op2.cvalres = op2.add_data_parameter(data, 'cvalres', b'i', 11, False)\n\n #: Set identification number Number\n op2.set_id = op2.add_data_parameter(data, 'set_id', b'i', 13, False)\n\n #: Natural eigenvalue - real part\n op2.eigen_real = op2.add_data_parameter(data, 'eigen_real', b'i', 14, False)\n\n #: Natural eigenvalue - imaginary part\n op2.eigen_imag = op2.add_data_parameter(data, 'eigen_imag', b'i', 15, False)\n\n #: Natural frequency\n op2.freq = op2.add_data_parameter(data, 'freq', b'f', 16, False)\n\n #: RMS and CRMS scale factor - NX\n op2.rmssf = op2.add_data_parameter(data, 'rmssf', b'f', 17)\n\n #: Total positive energy\n op2.etotpos = op2.add_data_parameter(data, 'etotpos', b'f', 18)\n\n #: Total negative energy\n op2.etotneg = op2.add_data_parameter(data, 'etotneg', b'f', 19, False)\n\n #: Energy Threshold - NX\n op2.thresh = op2.add_data_parameter(data, 'thresh', b'f', 17)\n\n if not op2.is_sort1:\n raise NotImplementedError('sort2...')\n\n if op2.analysis_code == 1: # statics / displacement / heat flux\n #del op2.data_code['nonlinear_factor']\n op2.lsdvmn = op2.add_data_parameter(data, 'lsdvmn', b'i', 5, False)\n op2.data_names = op2.apply_data_code_value('data_names', ['lsdvmn'])\n op2.setNullNonlinearFactor()\n elif op2.analysis_code == 2: # real eigenvalues\n op2.mode = op2.add_data_parameter(data, 'mode', b'i', 5) ## mode number\n #op2.mode_cycle1 = op2.add_data_parameter(data, 'mode', b'i', 7)\n #op2.mode_cycle2 = op2.add_data_parameter(data, 'mode', b'f', 7)\n #print('mode = ', op2.mode)\n #print('mode_cycle1 = ', op2.mode_cycle1)\n #print('mode_cycle2 = ', op2.mode_cycle2)\n #self.show_data(data)\n #op2.cycle = 0.\n #self.reader_oug.update_mode_cycle('cycle')\n op2.data_names = op2.apply_data_code_value('data_names', ['mode', 'freq'])\n #print(\"mode(5)=%s eign(6)=%s mode_cycle(7)=%s\" % (\n #op2.mode, self.eign, op2.mode_cycle))\n #elif op2.analysis_code == 3: # differential stiffness\n #op2.lsdvmn = self.get_values(data,'i',5) ## load set number\n #op2.data_code['lsdvmn'] = op2.lsdvmn\n #elif op2.analysis_code == 4: # differential stiffness\n #op2.lsdvmn = self.get_values(data,'i',5) ## load set number\n elif op2.analysis_code == 5: # frequency\n op2.freq2 = op2.add_data_parameter(data, 'freq2', b'f', 5) ## frequency\n op2.data_names = op2.apply_data_code_value('data_names', ['freq2'])\n elif op2.analysis_code == 6: # transient\n op2.time = op2.add_data_parameter(data, 'time', b'f', 5) ## time step\n op2.data_names = op2.apply_data_code_value('data_names', ['time'])\n #elif op2.analysis_code == 7: # pre-buckling\n #op2.data_names = op2.apply_data_code_value('data_names',['lsdvmn'])\n elif op2.analysis_code == 8: # post-buckling\n op2.mode = op2.add_data_parameter(data, 'mode', b'i', 5) ## mode number\n op2.data_names = op2.apply_data_code_value('data_names', ['mode'])\n elif op2.analysis_code == 9: # complex eigenvalues\n op2.mode = op2.add_data_parameter(data, 'mode', b'i', 5) ## mode number\n op2.eigr = op2.eigen_real\n op2.eigi = op2.eigen_imag\n op2.data_names = op2.apply_data_code_value('data_names', ['mode', 'eigr', 'eign'])\n elif op2.analysis_code == 10: # nonlinear statics\n self.loadFactor = op2.add_data_parameter(data, 'loadFactor', b'f', 5) ## load factor\n op2.data_names = op2.apply_data_code_value('data_names', ['loadFactor'])\n #elif op2.analysis_code == 11: # old geometric nonlinear statics\n #op2.data_names = op2.apply_data_code_value('data_names',['lsdvmn'])\n elif op2.analysis_code == 12: # contran ? (may appear as aCode=6) --> straight from DMAP...grrr...\n op2.time = op2.add_data_parameter(data, 'time', b'f', 5) ## time step\n op2.data_names = op2.apply_data_code_value('data_names', ['time'])\n else:\n raise RuntimeError('invalid analysis_code...analysis_code=%s' %\n op2.analysis_code)\n\n op2.fix_format_code()\n if op2.is_debug_file:\n op2.binary_debug.write(' approach_code = %r\\n' % op2.approach_code)\n op2.binary_debug.write(' tCode = %r\\n' % op2.tCode)\n op2.binary_debug.write(' isubcase = %r\\n' % op2.isubcase)\n op2._read_title(data)\n op2._write_debug_bits()", "def _read_input(input_file):\n with open(input_file, 'r') as f:\n input_dict = yaml.load(f, yaml.SafeLoader)\n # dafi inputs\n inputs_dafi = input_dict['dafi']\n inputs_dafi['save_level'] = inputs_dafi.get('save_level', 'time')\n # inverse method inputs\n if 'inverse' not in input_dict or input_dict['inverse'] is None:\n inputs_inverse = dict()\n else:\n inputs_inverse = input_dict['inverse']\n # physics model inputs\n if 'model' not in input_dict or input_dict['model'] is None:\n inputs_model = dict()\n else:\n inputs_model = input_dict['model']\n return inputs_dafi, inputs_inverse, inputs_model", "def parse():\n file = open(INPUT, 'r')\n\n expect_eff = False\n expect_vout = False\n\n eff_dict = {}\n vout_dict = {}\n\n for line in file:\n if line.startswith('PCC'):\n id = line.strip()\n expect_eff = True\n elif expect_eff:\n if line.startswith('efficiency'):\n eff_str = line.strip().split(':')[1]\n # get rid of % symbol\n eff = int(eff_str.split('%')[0])\n eff_dict[id] = .01 * eff\n\n expect_vout = True\n\n expect_eff = False\n elif expect_vout:\n if line.startswith('output voltage'):\n vout_str = line.strip().split(':')[1]\n vout = int(vout_str)\n vout_dict[id] = vout\n\n expect_vout = False\n\n with open(EFF_OUTPUT, 'w') as f:\n json.dump(eff_dict, f)\n\n with open(VOUT_OUTPUT, 'w') as f:\n json.dump(vout_dict, f)\n\n # plot stats of eff and vout\n plot_hist(eff_dict.values(), 'Efficiency', 'eff', bins=50)\n plot_hist(vout_dict.values(), 'V_out', 'vout', bins=50)", "def read_input():\n return Path(__file__).with_name('input.txt').read_text().splitlines()", "def read_model(input_file):\n with open(input_file) as inp:\n labels = inp.readline().strip().split(\" \")\n init_conc = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n\n stoich = []\n for i in range(len(labels)):\n stoich.append(list(map(float, inp.readline().strip().split(\" \"))))\n S_matrix = np.array(stoich)\n\n educt = []\n for i in range(len(labels)):\n educt.append(list(map(float, inp.readline().strip().split(\" \"))))\n educt_matrix = np.array(educt)\n\n kin_par = np.array(list(map(float, inp.readline().strip().split(\" \"))))\n t_T, t_eval_step = list(map(float, inp.readline().strip().split(\" \")))\n\n return labels, init_conc, S_matrix, educt_matrix, kin_par, t_T, t_eval_step", "def read(self,isOutputFile = False, headerCols = None, verbose = 0):\n \n #\n # TODO TODO also need a 'readFinal' one to read the FINAL information!!\n # set a flag in MonteFormat.py to select which cs info to read...\n\n if verbose == 1:\n print \"Reading %s chemical shift list %s\" % (self.format,self.name)\n\n fin = open(self.name, 'rU')\n\n line = fin.readline()\n \n spinSystemId = 0\n resLabel = oldResLabel = None\n\n while line:\n\n if self.patt['%sComment' % self.format].search(line):\n\n if not isOutputFile and not self.chemShifts and not headerCols:\n\n #\n # Get atom info from first line...\n #\n \n headerCols = line.split()\n headerCols.pop(0)\n\n line = fin.readline()\n continue\n\n if self.patt['emptyline'].search(line):\n line = fin.readline()\n continue\n \n #\n # Make sure header info is available - otherwise no point\n #\n \n if not headerCols:\n raise \"Error: no header column information available. Try reading .par file!\"\n return\n \n #\n # Get the info... should really come for .par file!!\n #\n \n cols = line.split()\n \n infoCode = None\n \n if not isOutputFile:\n \n stripId = returnFloat(cols.pop(0))\n\n #\n # NOt necessarily info string available...\n #\n\n if self.patt['onlyFloat'].search(cols[0]):\n seqCode = None\n resLabel = None\n\n else:\n assignment = cols.pop(0)\n\n searchAssignment = self.patt['%sAssignment' % self.format].search(assignment)\n\n resLabel = searchAssignment.group(1)\n seqCode = searchAssignment.group(2)\n \n else:\n \n seqCode = cols.pop(0)\n if seqCode[-1] in '+':\n seqCode = seqCode[:-1]\n infoCode = seqCode[-1]\n \n oldResLabel = resLabel\n resLabel = cols.pop(0)\n stripId = returnFloat(cols.pop(0))\n voidCol = cols.pop(0)\n \n #\n # Set up info for atoms...\n #\n \n if not seqCode or seqCode == '?':\n seqCode = None\n spinSystemId = spinSystemId + 2\n else:\n seqCode = returnInt(seqCode)\n\n if len(cols) == 1:\n cols = cols.split(',')\n\n values = returnFloats(cols)\n\n for i in range(0,len(values)):\n atomId = headerCols[i]\n value = values[i]\n \n if value == 0.0:\n continue\n \n atomSearch = self.patt['%sAtomInfo' % self.format].search(atomId)\n \n atomName = atomSearch.group(1)\n atomPlace = atomSearch.group(2)\n \n if atomName == 'HA1':\n nextAtomValue = values[i+1]\n if nextAtomValue == 0.00:\n atomName = 'HA'\n \n curSeqCode = seqCode\n curResLabel = None\n \n if seqCode == None:\n curSpinSystemId = spinSystemId\n prevSpinSystemId = spinSystemId - 1\n else:\n curSpinSystemId = None\n prevSpinSystemId = None\n \n if atomPlace == '(i-1)' or atomPlace == '-1':\n\n if seqCode != None:\n curSeqCode = seqCode - 1\n else:\n curSpinSystemId = spinSystemId - 1\n prevSpinSystemId = None\n \n if not isOutputFile:\n curResLabel = resLabel\n else:\n curResLabel = oldResLabel\n \n elif isOutputFile:\n curResLabel = resLabel\n\n self.chemShifts.append(MonteChemShift(value,atomName,curSeqCode,curSpinSystemId,stripId,curResLabel,self.defaultMolCode, infoCode = infoCode, prevSpinSystemId = prevSpinSystemId))\n\n line = fin.readline()\n\n fin.close()", "def read_data(path):\n with h5py.File(path, 'r') as hf:\t\n input_ = np.array(hf.get('input'))\n label_ = np.array(hf.get('label'))\n return input_, label_", "def read_input():\n \n argv = sys.argv\n\n # Read file names from sd input\n f_dy = argv[1] # matdyn.modes\n f_pat = argv[2] # path.out (should be in crystal coords)\n f_ph = argv[3] # ph.x output (Gamma point)\n\n # Read input card\n f_inp = open(\"input.dat\",'r')\n l1 = f_inp.readline()\n l2 = f_inp.readline()\n l3 = f_inp.readline().split()\n f_inp.close()\n\n # Open files\n\n f = open(f_dy,'r') # matdyn.modes \n f_dyn = f.readlines()\n f.close()\n\n f = open(f_pat,'r') # path.out\n f_path = f.readlines()\n f.close()\n\n f = open(f_ph,'r') # ph.x output\n f_zs = f.readlines()\n f.close()\n\n # Assign values to a0, nat, M, nqp\n a0, vol = float(l1.split()[0]), float(l1.split()[1])\n nat = int(l2) \n mass = np.zeros(nat)\n for iat in range(nat):\n mass[iat] = float(l3[iat])\n\n # Assign values to G (reciprocal lattice vec)\n ig = 0 ; i = 0\n for line in f_zs:\n if \"reciprocal axes:\" in line:\n ig = i + 1 \n break\n i += 1 \n\n rG = np.zeros((3,3))\n for ic in range(3):\n rGtext = f_zs[ig+ic][23:48].split()\n rG[ic,:] = np.array([float(rGtext[0]), float(rGtext[1]), float(rGtext[2])])\n\n # Read Z* tensor from f_zs\n i = 0\n iz = 0\n zstart = []\n for line in f_zs:\n if \"(d P / du)\" in line:\n iz = i + 3\n if \"Px\" in line:\n zstart.append(i)\n\n i += 1\n\n # Read the dielectric tensor from f_zs\n i = 0\n ie = 0\n for line in f_zs:\n if \"Dielectric constant in cartesian axis\" in line:\n ie = i + 2\n break\n\n i += 1\n\n # Assign Z* values\n zs = np.zeros((nat,3,3)) # initialize Z*\n\n for iat in range(nat):\n for ic in range(3):\n ztext = f_zs[zstart[iat]+ic][19:56].split()\n for jc in range(3):\n zs[iat][ic][jc] = float(ztext[jc])\n\n # Assing the dielectric tensor\n eps = np.zeros((3,3))\n\n for ic in range(3):\n epstext = f_zs[ie+ic][16:66].split()\n for jc in range(3):\n eps[ic][jc] = float(epstext[jc])\n\n # Number of modes and q-points\n nmodes = 3 * nat\n nqpt = int(f_path[0].split()[0])\n\n # Read the q-points\n q = np.zeros((nqpt,4)) # 4th dimension is lenght for q-points on a line, weights for q-points on a grid \n for iq in range(1,nqpt+1):\n q[iq-1,] = np.array([float(f_path[iq].split()[0]),float(f_path[iq].split()[1]), \\\n float(f_path[iq].split()[2]),float(f_path[iq].split()[3])])\n\n # Read the eigenvalues(om) and eigenvectors(eig) \n # Initiate first\n om = np.zeros((nmodes,nqpt))\n eig = np.zeros((nmodes,nqpt,nat,3), dtype=complex) \n\n # Get the starting lines for each q-pt\n i = 0\n i_q = []\n for line in f_dyn:\n if \"q =\" in line:\n i_q.append(i+2)\n i += 1\n\n #Assign values to om and eig\n for iq in range(nqpt):\n for imod in range(nmodes):\n omtext = f_dyn[i_q[iq]+imod*(nat+1)][43:55]\n om[imod][iq] = float(omtext)\n for iat in range(nat):\n etext = f_dyn[i_q[iq]+imod*(nat+1)+iat+1][2:72].split()\n for ic in range(3):\n eig.real[imod][iq][iat][ic]=float(etext[2*ic])*np.sqrt(mass[iat])\n eig.imag[imod][iq][iat][ic]=float(etext[2*ic+1])*np.sqrt(mass[iat])\n\n #Normalize the eigenvectors\n t1 = eig[imod,iq,:,:]\n t_nu = np.sum(np.sum(np.conjugate(t1)*t1,axis=0))\n eig[imod,iq,:,:] = eig[imod,iq,:,:]/np.sqrt(np.abs(t_nu))\n\n # Check normalization\n delta = np.zeros((nmodes,nmodes), dtype=complex)\n for iat in range(nat):\n for ic in range(3):\n t2 = eig[:,iq,iat,ic]\n delta += np.outer(np.conjugate(t2),t2)\n\n unit = np.diag(np.diag(np.ones((nmodes,nmodes)))) # Unit vector\n test = np.abs( (delta-unit) )\n if ( np.max(test) > 1e-3):\n print \"Non-orthonormal eigenvector at iq=\", q[iq,:]\n\n return om, eig, q, zs, eps, mass, a0, vol, rG, nmodes, nqpt, nat", "def read_dataset(fd):\n\n T = int(fd.readline())\n pair_list = []\n for _ in xrange(T):\n line = fd.readline()\n (left, right) = line.split()\n pair_list.append((int(left), int(right)))\n return (T, pair_list)", "def test_read_EOF2(demo_data):\n\n openeeg = openEDF(demo_data)\n #read 200 samples starting from 100 samples before EOF\n start = max(openeeg.header.samples) - 100\n arr = openeeg.read(start, start + 200)\n assert arr.shape[-1] == 100\n\n openeeg.close()", "def read_data(filename):\n \n ######################################################\n # Disadvantage here: only includes J_up = 11 here, #\n # please manually add more if you have #\n # J_up >= 12 CO lines #\n ######################################################\n \n ascii_data = ascii.read(\n filename, names=[\n \"SOURCE\", \"z\", \"D_L\", \"line_width\",\n \"CO_J_1\", \"eCO_J_1\", \"CO_J_2\", \"eCO_J_2\", \"CO_J_3\", \"eCO_J_3\",\n \"CO_J_4\", \"eCO_J_4\", \"CO_J_5\", \"eCO_J_5\", \"CO_J_6\", \"eCO_J_6\",\n \"CO_J_7\", \"eCO_J_7\", \"CO_J_8\", \"eCO_J_8\", \"CO_J_9\", \"eCO_J_9\",\n \"CO_J_10\", \"eCO_J_10\", \"CO_J_11\", \"eCO_J_11\", \"CI_1\", \"eCI_1\",\n \"CI_2\", \"eCI_2\"])\n\n pd = ascii_data.to_pandas()\n pd = pd.set_index('SOURCE')\n return pd.T", "def read_pendf_xs(file,start,finish):\n with open(file) as f:\n e = []\n cs = []\n\n break_outer = False\n\n for i,line in enumerate(f):\n # -------------------------------\n # Stop the loop once finish is reached\n # -------------------------------\n if i == finish:\n break\n if i >= start-1:\n \t# -------------------------------\n \t# Only include first 66 columns, split on space\n \t# and convert to an array of strings\n \t# -------------------------------\n word_len = 11\n word_start = 0\n for j in range(6):\n word = line[word_start:word_start+11]\n\n if( j%2 == 0 ):\n # -------------------------------\n # Grab the energies, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n e.append(word.replace('-','e-').replace('+','e+'))\n else:\n # -------------------------------\n # Grab cross section, convert to readable format\n # -------------------------------\n if( word == ' ' ):\n break_outer = True\n break # end of TAB1\n cs.append(word.replace('-','e-').replace('+','e+'))\n word_start+=word_len\n\n if( break_outer ):\n break # end of TAB1\n \n # -------------------------------\n # Convert to floats\n # -------------------------------\n e = np.array(e).astype(float)\n cs = np.array(cs).astype(float)\n\n # -------------------------------\n # Stack them into a numpy array\n # -------------------------------\n pointwise_cs = np.array([e,cs])\n \n return pointwise_cs", "def read_input(self, xml, mode):\n # get specs for allowable inputs\n specs = self.get_input_specs()()\n specs.parseNode(xml)\n self.name = specs.parameterValues['name']\n self.raiseADebug('Loading component \"{}\"'.format(self.name))\n for item in specs.subparts:\n if self.get_interaction() and item.getName() in ['produces', 'stores', 'demands']:\n self.raiseAnError(NotImplementedError, 'Currently each Component can only have one interaction (produces, stores, demands)! Check Component \"{}\"'.format(self.name))\n # read in producers\n if item.getName() == 'produces':\n prod = Producer(messageHandler=self.messageHandler)\n try:\n prod.read_input(item, mode, self.name)\n except IOError as e:\n self.raiseAWarning('Errors while reading component \"{}\"!'.format(self.name))\n raise e\n self._produces.append(prod)\n # read in storages\n elif item.getName() == 'stores':\n store = Storage(messageHandler=self.messageHandler)\n store.read_input(item, mode, self.name)\n self._stores.append(store)\n # read in demands\n elif item.getName() == 'demands':\n demand = Demand(messageHandler=self.messageHandler)\n demand.read_input(item, mode, self.name)\n self._demands.append(demand)\n # read in economics\n elif item.getName() == 'economics':\n econ_node = item # need to read AFTER the interactions!\n # after looping over nodes, finish up\n if econ_node is None:\n self.raiseAnError(IOError, '<economics> node missing from component \"{}\"!'.format(self.name))\n CashFlowUser.read_input(self, econ_node)", "def read_input():\n # Define return object\n objects = []\n\n # Check if input file exists in folder, else notify user and exit with error.\n try:\n input_file = open(INPUTFILE, 'r')\n except FileNotFoundError:\n print(\n \"Input file {} not found in current directory. Make sure the file is in the same directory!\"\n .format(INPUTFILE))\n exit(1)\n\n # Start generating the object with values from the input file\n for line in input_file:\n obj = {}\n\n if line.startswith(\"[mod]\"):\n a_original = None\n b_original = None\n f_original = None\n g_original = None\n h_original = None\n deg_original = None\n additional_data = None\n\n obj['mod'] = int(line.split()[1])\n obj['operation'] = input_file.readline()[1:-2]\n\n if '{' in obj['operation']:\n operation = obj['operation'].split()\n obj['operation'] = operation[0].strip(']')\n obj['operation_values_original'] = operation[1] + '}'\n obj['operation_values'] = set_to_array(operation[1])\n\n # Parse rest\n for current_line in input_file:\n if current_line == '\\n': # If we find an empty line, the block is finished\n break\n\n key = current_line.split()[0][1:-1]\n\n if key == 'a':\n a_original = current_line[:-1]\n elif key == 'b':\n b_original = current_line[:-1]\n elif key == 'f':\n f_original = current_line[:-1]\n elif key == 'g':\n g_original = current_line[:-1]\n elif key == 'h':\n h_original = current_line[:-1]\n elif key == 'deg':\n deg_original = current_line[:-1]\n elif '[' not in key and key != '' and key != 'answer':\n additional_data = key\n\n # Convert given values to arrays, keeping the old values\n if a_original:\n a = set_to_array(a_original.split()[1])\n obj['a_original'] = a_original\n obj['a'] = a\n\n if b_original:\n b = set_to_array(b_original.split()[1])\n obj['b_original'] = b_original\n obj['b'] = b\n\n if f_original:\n f = set_to_array(f_original.split()[1])\n obj['f_original'] = f_original\n obj['f'] = f\n\n if g_original:\n g = set_to_array(g_original.split()[1])\n obj['g_original'] = g_original\n obj['g'] = g\n\n if h_original:\n h = set_to_array(h_original.split()[1])\n obj['h_original'] = h_original\n obj['h'] = h\n\n if deg_original:\n obj['deg_original'] = deg_original\n obj['deg'] = int(deg_original.split()[1])\n\n if additional_data:\n obj['additional_data'] = additional_data\n\n objects.append(obj)\n else:\n continue\n\n return objects", "def pyread(eeg, start, stop, channels=None):\n\n if not channels:\n chs = range(len(eeg.chan_info['ch_names']) - 1) #-1 for annotations\n else:\n chs = channels\n\n result = []\n for channel in chs:\n #pyedf is inclusive of stop\n result.append(eeg.read_samples(channel, start, stop-1))\n return np.array(result)", "def read_2hps2_acc(filename, multi_header=True):\n\n num_headers = 27\n header_row = 16\n units_row = 17\n timestamp_row = 20\n\n with open(filename, \"r\") as f:\n accreader = csv.reader(f, delimiter=\" \")\n\n # Skip file info headers\n for i in range(num_headers):\n if i == header_row - 1:\n channels = next(accreader)\n elif i == units_row - 1:\n units = next(accreader)\n elif i == timestamp_row - 1:\n ts_start = next(accreader)\n else:\n next(accreader)\n\n # Read body - drop blanks\n data = [[x for x in line if x != \"\"] for line in accreader]\n\n # Convert column names list so that split by \",\" not \" \", drop \"Time\" item and trim\n channels = \" \".join(channels).split(\",\")[1:]\n channels = [c.strip() for c in channels]\n\n # Read the start timestamp marker and get start datetime\n ts_start = [int(i) for i in ts_start[5:]]\n dt_start = datetime(\n ts_start[5], # year\n ts_start[4], # month\n ts_start[3], # day\n ts_start[2], # hour\n ts_start[1], # minute\n ts_start[0], # second\n )\n\n # Create dataframe and timestamps using start timestamp marker and time steps column\n df = pd.DataFrame(data, dtype=\"float\")\n ts = df.iloc[:, 0].values\n timestamps = [dt_start + timedelta(seconds=t) for t in ts]\n\n # For raw data module\n if multi_header is True:\n # Create multi-index header of channel names and units and time steps index\n units = \" \".join(units).split(\",\")[1:]\n units = [i.strip().split(\"(\")[1][:-1] for i in units]\n header = list(zip(channels, units))\n header.insert(0, (\"Timestamp\", \"\"))\n header = pd.MultiIndex.from_tuples(header, names=[\"channels\", \"units\"])\n df = df.set_index(df.columns[0])\n df.index.name = \"Time (s)\"\n df.insert(loc=0, column=\"Timestamp\", value=timestamps)\n # For screening module\n else:\n # Create single row header of only channel names (i.e. strip out the units)\n # Replace time steps column with timestamps and use range index\n header = [\"Timestamp\"] + channels\n df.iloc[:, 0] = timestamps\n\n # Set desired header (single or multi-index)\n df.columns = header\n\n return df", "def get_expression(data_series, probes_to_genes):\n with open(data_series, 'r') as mtx:\n stage_columns = {'all_stages': {'sample_ids': []}} # will always need an average, other stages are determined by the file\n sample_ids = None\n for line in mtx:\n if line.startswith('!Sample_title'):\n sample_stages = [x.strip().replace('\"','').split(\",\")[0] for x in line.split(\"\\t\")[1:]] # this line is likely dataset specific.\n elif line.startswith('\"ID_REF\"'): # this comes after the sample titles\n sample_ids = [x.strip().replace('\"','') for x in line.split(\"\\t\")[1:]]\n # now have the ids and their stages, convert to dict\n \"\"\"\n if named differently, may need to modify this.\n ultimately, stage_columns should be a dictionary with the following properties:\n - the keys are the stage names. \n - each 'stage' dict should have a key 'sample_ids' that has a list the sample_ids belonging to that stage.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn']\n },\n 'stage2': {\n 'sample_ids': ['sample_idn+1', ...]\n },\n ...\n }\n \"\"\"\n for i in range(0, len(sample_stages)):\n if sample_stages[i] not in stage_columns:\n stage_columns[sample_stages[i]] = {'sample_ids': []}\n stage_columns[sample_stages[i]]['sample_ids'].append(sample_ids[i])\n stage_columns['all_stages']['sample_ids'].append(sample_ids[i]) # add every sample to this\n elif sample_ids is not None:\n row = [x.strip().replace('\"','') for x in line.split('\\t')]\n \"\"\"\n here, the stage_columns dictionary is being updated with the expression data for each gene.\n {\n 'stage1': {\n 'sample_ids': ['sample_id1','sample_id2', ..., 'sample_idn'],\n 'genes': { <- **NEW KEY**\n 'entrezID-1': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n 'entrezID-2': ['sample_id1ExpLevel', 'sample_id2ExpLevel', ..., 'sample_idnExpLevel'],\n ... (if PERCENTILE_RANK is True, all in dataset are recorded otherwise, just the genes of interest )\n }\n },\n ...\n }\n \"\"\"\n if row[0] in probes_to_genes:\n # get gene from probe\n entrez_id = probes_to_genes[row[0]]\n # add the average expression for all the samples in a stage for the gene\n for stage, stage_data in stage_columns.items():\n stage_data['genes'] = {} if 'genes' not in stage_data else stage_data['genes'] # initialize\n for sample_id in stage_data['sample_ids']:\n # get the index of the sample_id in the row\n sample_idx = sample_ids.index(sample_id) + 1\n if entrez_id not in stage_data['genes']:\n stage_data['genes'][entrez_id] = [float(row[sample_idx])]\n else:\n stage_data['genes'][entrez_id].append(float(row[sample_idx]))\n\n return stage_columns", "def read_track(fp, colnames=None):\n # read lines\n f = open(fp, \"r+\")\n s = f.readlines()\n\n # get info\n MIST_version = re.split(r\"\\s+\", s[0].strip())[-1]\n MESA_revision = re.split(r\"\\s+\", s[1].strip())[-1]\n\n Yinit, Zinit, FeH, aFe, vvcrit = re.split(r\"\\s+\", s[4].strip())[1:]\n Yinit = np.float(Yinit)\n Zinit = np.float(Zinit)\n FeH = np.float(FeH)\n aFe = np.float(aFe)\n vvcrit = np.float(vvcrit)\n\n initial_mass, N_pts, N_EEP, N_col, phase, type_ = \\\n re.split(r\"\\s+\", s[7].strip())[1:]\n initial_mass = np.float(initial_mass)\n N_pts = np.int(N_pts)\n N_EEP = np.int(N_EEP)\n N_col = np.int(N_col)\n\n # get eep info\n EEPs = tuple([np.int(_) for _ in re.split(r\"\\s+\", s[8].strip())[2:]])\n # eep = np.arange(EEPs[0], EEPs[-1] + 1) sometimes inconsistent with data\n\n # add eep column\n # _eep\n t = Table.read(s[11:], format=\"ascii.commented_header\")\n eep = np.arange(EEPs[0], EEPs[0] + len(t))\n eep_ok = eep[-1] == EEPs[-1] + 1\n t.add_column(Column(eep, \"_eep\"))\n # _lgmass\n t.add_column(Column(np.ones(len(t), )*np.log10(initial_mass), \"_lgmass\"))\n # _lgage\n t.add_column(Column(np.log10(t[\"star_age\"].data), \"_lgage\"))\n # _feh\n t.add_column(Column(np.ones(len(t), ) * FeH, \"_feh_ini\"))\n t.add_column(Column(t[\"log_surf_z\"]-np.log10(Zsun), \"_feh\"))\n\n # add meta info\n meta = OrderedDict(\n MIST_version=MIST_version,\n MESA_revision=MESA_revision,\n Yinit=Yinit,\n Zinit=Zinit,\n FeH=FeH,\n aFe=aFe,\n vvcrit=vvcrit,\n initial_mass=initial_mass,\n N_pts=N_pts,\n N_EEP=N_EEP,\n N_col=N_col,\n phase=phase,\n type_=type_,\n EEPs=EEPs,\n EEP0=EEPs[0],\n EEP1=EEPs[-1],\n EEP1ACT=EEPs[0] + len(t),\n EEPOK=eep_ok,\n INTERP=(\"_INTERP\" in fp)\n )\n t.meta = meta\n\n if colnames is None:\n return t\n else:\n for colname in colnames:\n try:\n assert colname in t.colnames\n except AssertionError as ae:\n raise(ae(\"{} not in track.colnames!!!\".format(colname)))\n return t[colnames]", "def dummy_splice_junction_reads(dummy_isoform_reads):\n from outrigger.common import READS\n\n s = 'sample_id,junction,{reads}\\n'.format(reads=READS)\n for junction_id, reads in dummy_isoform_reads.iteritems():\n s += 'sample1,{junction_id},{reads}\\n'.format(junction_id=junction_id,\n reads=reads)\n data = pd.read_csv(six.StringIO(s), comment='#')\n data = data.dropna()\n data = data.set_index(\n ['junction', 'sample_id'])\n data = data.sort_index()\n return data", "def input_data(self):\n return read_file(self.file_path)", "def read_old(lookup_cnfg, lookup_qn, diagram, T, directory, verbose=0):\n\n data = []\n\n for cnfg in lookup_cnfg:\n # filename and path\n filename = directory + '/' + diagram + '_cnfg%i' % cnfg + '.h5'\n # open file\n try:\n fh = h5py.File(filename, \"r\")\n except IOError:\n print 'file %s not found' % filename\n raise\n\n # to achieve hirarchical indexing for quantum numbers build DataFrame for\n # each loop seperately\n # TODO: is it necessary to build that completely or can that be \n # constructed by successively storing each operator with pd.HDFStore()?\n data_qn = pd.DataFrame()\n# print DataFrame(lookup_p)\n# print DataFrame(lookup_g)\n ndata = 0\n nfailed = 0\n\n for op in lookup_qn.index:\n ndata += 1\n # generate operator name\n p = lookup_qn.ix[op, ['p_{so}', 'p_{si}']]\n g = lookup_qn.ix[op, ['\\gamma_{so}', '\\gamma_{si}']]\n groupname = set_groupname(diagram, p, g)\n\n # read operator from file and store in data frame\n try:\n tmp = np.asarray(fh[groupname])\n except KeyError:\n #if diagram == 'C4+C' and cnfg == 714:\n # print(\"could not read %s for config %d\" % (groupname, cnfg))\n nfailed += 1\n continue\n data_qn[op] = pd.DataFrame(tmp, columns=['re/im'])\n if nfailed > 0 and verbose > 0:\n print(\"could not read %d of %d data\" % (nfailed, ndata))\n\n # append all data for one config and close the file\n data.append(data_qn)\n fh.close()\n # generate data frame containing all operators for all configs\n data = pd.concat(data, keys=lookup_cnfg, axis=0, names=['cnfg', 'T'])\n\n if verbose:\n print '\\tfinished reading'\n\n return data.sort_index(level=[0,1])\n ##############################################################################", "def read_input():\n\n parameters = sys.argv[1:]\n \n stop_id = parameters[0]\n month = parameters[1]\n day_of_week = parameters[2]\n rain = parameters[3]\n temp = parameters[4]\n planned_arrival = parameters[5]\n\n df = pd.DataFrame({\n 'stop_id': [stop_id],\n 'day': [day_of_week],\n 'month': [month],\n 'rain': [rain],\n 'temp': [temp],\n 'planned_arrival': [planned_arrival]\n })\n\n df = df.astype({\n 'stop_id': 'int32',\n 'day': 'int32',\n 'month': 'int32',\n 'rain': 'float64',\n 'temp': 'float64',\n 'planned_arrival': 'int32'\n })\n\n return df", "def read_stream(params):\n # Ignore file integrity issues; thus far the only station affected is DR11,\n # with no seeming impact on the seismic trace itself. Consider treating as\n # an error in future implementation.\n warnings.simplefilter(\"error\", category=InternalMSEEDWarning)\n\n start_search = params.start_processing.floor('D')\n stop_search = params.stop_processing.floor('D')\n dts = pd.date_range(start_search, stop_search)\n count = 0\n for i, dt in enumerate(dts):\n if params.name_format == 1:\n fname = f\"{params.network}.{params.station}.{params.channel}.{dt.year}.{dt.dayofyear:03d}.mseed\"\n if params.station == \"*\":\n # filespec = f\"{params.network}/**/{params.network}.{params.station}.{params.channel}.{dt.year}.{dt.dayofyear:03d}.mseed\"\n filespec = os.path.join(params.network, \"**\", fname)\n else:\n # filespec = f\"{params.network}/{params.station}/{params.network}.{params.station}.{params.channel}.{dt.year}.{dt.dayofyear:03d}.mseed\"\n filespec = os.path.join(params.network, params.station, fname)\n elif params.name_format == 2:\n filespec = f\"{params.network}.{params.station}..{params.channel}__{dt.year}{dt.month:02d}{dt.day:02d}T*\"\n\n try:\n if count == 0:\n # st = read(f\"{params.sourcepath}/MSEED/{filespec}\")\n st = read(os.path.join(params.sourcepath, \"MSEED\", filespec))\n # st = read(f\"{params.sourcepath}/{filespec}\")\n else:\n # st += read(f\"{params.sourcepath}/MSEED/{filespec}\")\n st += read(os.path.join(params.sourcepath, \"MSEED\", filespec))\n # st += read(f\"{params.sourcepath}/{filespec}\")\n count += 1\n except:\n pass\n\n if count > 0:\n st.merge(fill_value=\"interpolate\", interpolation_samples=1)\n st.trim(\n starttime=UTCDateTime(params.start_processing),\n endtime=UTCDateTime(params.stop_processing)\n )\n return st\n else:\n return -1", "def read(self):\n try:\n datos = self.base.readData()\n for i in range(len(datos)):\n self.verDatos.insert('', i+1, text = i+1, values = (datos[i][0], datos[i][1], datos[i][2]))\n except:\n showerror(\"Error\", exc_info()[1])", "def read_microsatellite_lines(raw_lines):\n lines = Util.get_stripped_lines(raw_lines)\n if len(lines) % 2:\n raise ValueError('expected an even number of lines')\n if len(lines) < 2:\n raise ValueError('expected at least two lines')\n full_rows = [x.split() for x in lines]\n nfullcols = len(full_rows[0])\n if nfullcols < 2:\n raise ValueError('expected at least two columns')\n for row in full_rows:\n if len(row) != nfullcols:\n msg = 'each row should have the same number of elements'\n raise ValueError(msg)\n a_full_rows = [row for i, row in enumerate(full_rows) if i % 2 == 0]\n b_full_rows = [row for i, row in enumerate(full_rows) if i % 2 == 1]\n a_headers = [row[0] for row in a_full_rows]\n b_headers = [row[0] for row in b_full_rows]\n for h in a_headers:\n if not h.endswith('a'):\n msg = 'each odd row label should end with the letter a'\n raise ValueError(msg)\n for h in b_headers:\n if not h.endswith('b'):\n msg = 'each even row label should end with the letter b'\n raise ValueError(msg)\n headers = [h[:-1] for h in a_headers]\n # get the unique elements of each column\n rows = [row[1:] for row in full_rows]\n cols = zip(*rows)\n uniques = [list(iterutils.unique_everseen(col)) for col in cols]\n # get the results for each row\n a_rows = [row[1:] for row in a_full_rows]\n b_rows = [row[1:] for row in b_full_rows]\n a_columns = zip(*a_rows)\n b_columns = zip(*b_rows)\n a_binary_rows = Carbone.get_binary_rows_helper(a_columns, uniques)\n b_binary_rows = Carbone.get_binary_rows_helper(b_columns, uniques)\n # add the elements entrywise and return as a list of lists\n bin_row_groups = [a_binary_rows, b_binary_rows]\n binary_rows = np.array(bin_row_groups).sum(axis=0).tolist()\n return headers, binary_rows", "def get_transformed_io(data_path, data_dir):\n sents, labels = read_line_examples_from_file(data_path)\n\n # the input is just the raw sentence\n inputs = [s.copy() for s in sents]\n\n task = 'asqp'\n if task == 'aste':\n targets = get_para_aste_targets(sents, labels)\n elif task == 'tasd':\n targets = get_para_tasd_targets(sents, labels)\n elif task == 'asqp':\n targets = get_para_asqp_targets(sents, labels)\n else:\n raise NotImplementedError\n\n return inputs, targets", "def data(dataset=\"bio_eventrelated_100hz\"):\n # TODO: one could further improve this function with like\n # selectors 'ecg=True, eda=True, restingstate=True' that would\n # find the most appropriate dataset\n\n dataset = dataset.lower()\n\n # TODO: change this path back to \"master\"\n path = \"https://raw.githubusercontent.com/neuropsychology/NeuroKit/dev/data/\"\n\n # Signals as vectors =======================\n if dataset in [\"eeg\", \"eeg_150hz\", \"eeg.txt\"]:\n return pd.read_csv(path + \"eeg.txt\").values[:, 0]\n\n if dataset in [\"rsp\", \"rsp_1000hz\", \"rsp_1000hz.txt\"]:\n return pd.read_csv(path + \"rsp_1000hz.txt\", header=None).values[:, 0]\n\n if dataset in [\"ecg\", \"ecg_1000hz\", \"ecg_1000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"ecg_3000hz\", \"ecg_3000hz.csv\"]:\n return pd.read_csv(path + \"ecg_1000hz.csv\")[\"ECG\"].values\n\n if dataset in [\"eog\", \"veog\", \"eog_100hz\", \"eog_100hz.csv\"]:\n return pd.read_csv(path + \"eog_100hz.csv\")[\"vEOG\"].values\n\n # Dataframes ===============================\n if dataset == \"iris\":\n info = sklearn_datasets.load_iris()\n data = pd.DataFrame(\n info.data, columns=[\"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\", \"Petal.Width\"]\n )\n data[\"Species\"] = info.target_names[info.target]\n return data\n\n if dataset in [\"eogs\", \"eogs_200hz\", \"eog_200hz\", \"eog_200hz.csv\"]:\n return pd.read_csv(path + \"eog_200hz.csv\")\n\n # Add extension\n if dataset in [\"bio_resting_8min_200hz\"]:\n dataset += \".json\"\n\n # Specific case for json file\n if dataset.endswith(\".json\"):\n if \"https\" not in dataset:\n data = pd.read_json(path + dataset, orient=\"index\")\n else:\n data = pd.read_json(dataset, orient=\"index\")\n df = {}\n for participant, row in data.iterrows():\n for _, data_string in row.items():\n data_list = json.loads(data_string)\n data_pd = pd.DataFrame(data_list)\n df[participant] = data_pd\n\n return df\n\n # TODO: Add more EEG (fif and edf datasets)\n if dataset in [\"eeg_1min_200hz\"]:\n\n return pickle.load(\n urllib.request.urlopen(\n \"https://github.com/neuropsychology/NeuroKit/blob/dev/data/eeg_1min_200hz.pickle?raw=true\"\n )\n )\n\n # General case\n file, ext = os.path.splitext(dataset) # pylint: disable=unused-variable\n if ext == \"\":\n df = pd.read_csv(path + dataset + \".csv\")\n else:\n if \"https\" not in dataset:\n df = pd.read_csv(path + dataset)\n else:\n df = pd.read_csv(dataset)\n return df", "def read_inputs(argn=1):\n if len(sys.argv) < argn+1:\n raise IOError(\"Hey, no input file was passed as argument to\"\n \" the program!!\")\n if not os.path.exists(sys.argv[argn]):\n raise FileNotFoundError(\"Input file '{}' not found.\".\n format(sys.argv[argn]))\n return read_config_file(sys.argv[argn], attribution_char='=')", "def structure(self, ism_input):\n f = open(ism_input, 'r')\n data = []\n for line in f:\n line = line.replace('\\\"', '')\n line = line.replace('],[', '];[')\n line = line.strip()\n line = line.replace(']', '')\n line = line.replace('[', '')\n line = line.split(';')\n line[0] = line[0].split('|')\n ls = list(map(lambda x: x.split(','), line[1:]))\n ls = list(map(lambda x: list(map(lambda y: y.split('|'), x)), ls))\n line[1:] = ls\n data.append(line)\n data = np.array(data[1:]) \n \n return data", "def readData(file):\n \n inputValues=list()\n outputValue=list()\n totalData=list()\n \n with open(file) as fp :\n for line in fp:\n if line.strip( ) == '':\n continue\n attributeValue = line.strip().split(\",\")\n inputValue1 = float(attributeValue[0])\n inputValue2 = float(attributeValue[1])\n \n inputValues+=[[inputValue1]+[inputValue2]]\n outputValue+=[int(attributeValue[2])]\n totalData+=[[inputValue1]+[inputValue2]+[int(attributeValue[2])]]\n \n \n return inputValues,outputValue,totalData", "def create(self): # , **kwargs):\n\n # Preamble\n csvReader = CSVReader()\n csvReader.inputs.in_file = self.csv_file.default_value\n csvReader.inputs.header = self.hasHeader.default_value\n csvOut = csvReader.run()\n\n print((\"=\" * 80))\n print((csvOut.outputs.__dict__))\n print((\"=\" * 80))\n\n iters = OrderedDict()\n label = list(csvOut.outputs.__dict__.keys())[0]\n result = eval(\"csvOut.outputs.{0}\".format(label))\n iters[\"tests\"], iters[\"trains\"] = sample_crossvalidation_set(\n result, self.sample_size.default_value\n )\n # Main event\n out_fields = [\"T1\", \"T2\", \"Label\", \"trainindex\", \"testindex\"]\n inputsND = Node(\n interface=IdentityInterface(fields=out_fields),\n run_without_submitting=True,\n name=\"inputs\",\n )\n inputsND.iterables = [\n (\"trainindex\", iters[\"trains\"]),\n (\"testindex\", iters[\"tests\"]),\n ]\n if not self.hasHeader.default_value:\n inputsND.inputs.T1 = csvOut.outputs.column_0\n inputsND.inputs.Label = csvOut.outputs.column_1\n inputsND.inputs.T2 = csvOut.outputs.column_2\n else:\n inputsND.inputs.T1 = csvOut.outputs.__dict__[\"t1\"]\n inputsND.inputs.Label = csvOut.outputs.__dict__[\"label\"]\n inputsND.inputs.T2 = csvOut.outputs.__dict__[\"t2\"]\n pass # TODO\n metaflow = Workflow(name=\"metaflow\")\n metaflow.config[\"execution\"] = {\n \"plugin\": \"Linear\",\n \"stop_on_first_crash\": \"false\",\n \"stop_on_first_rerun\": \"false\",\n # This stops at first attempt to rerun, before running, and before deleting previous results.\n \"hash_method\": \"timestamp\",\n \"single_thread_matlab\": \"true\", # Multi-core 2011a multi-core for matrix multiplication.\n \"remove_unnecessary_outputs\": \"true\",\n \"use_relative_paths\": \"false\", # relative paths should be on, require hash update when changed.\n \"remove_node_directories\": \"false\", # Experimental\n \"local_hash_check\": \"false\",\n }\n\n metaflow.add_nodes([inputsND])\n \"\"\"import pdb; pdb.set_trace()\"\"\"\n fusionflow = FusionLabelWorkflow()\n self.connect(\n [\n (\n metaflow,\n fusionflow,\n [\n (\"inputs.trainindex\", \"trainT1s.index\"),\n (\"inputs.T1\", \"trainT1s.inlist\"),\n ],\n ),\n (\n metaflow,\n fusionflow,\n [\n (\"inputs.trainindex\", \"trainLabels.index\"),\n (\"inputs.Label\", \"trainLabels.inlist\"),\n ],\n ),\n (\n metaflow,\n fusionflow,\n [\n (\"inputs.testindex\", \"testT1s.index\"),\n (\"inputs.T1\", \"testT1s.inlist\"),\n ],\n ),\n ]\n )", "def readVCTPINPUTS(self): \n #fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.CTPINPUTS\"\n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/ctpinputs.cfg\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n count=0\n #print \"look for me if you want different inputs range...\"\n for i in lines:\n if(i[0] == 'l' and i[1] == '0'): continue\n if i == \"\\n\": continue\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n continue\n #return None\n if items[3] == 'M': continue\n count=count+1\n #if count<6 or count>11 : continue\n #if count>10 and count<24 : continue\n #if count<16: continue\n #if count > 4 and count < 15: continue\n #if items[3] != '1': continue\n #if items[2] != \"EMCAL\": continue\n #if (items[2] != \"SPD\") and (items[2] != \"T0\"): continue\n flag=1\n for i in self.detectors:\n if items[2].find(i)>=0 or i.find(\"ALL\")>=0:\n flag=0;\n break\n if flag: continue\n # input not connected\n if items[7] == '0' and items[3] == '0': continue\n db={}\n db['name']=items[0]\n db['detector']=items[2]\n db['level']='L'+items[3]\n db['signature']=items[4]\n #db['number']=items[5]\n db['number']=items[7]\n db['numberDIM']=items[6]\n db['ctpnum']=items[5]\n db['Edge'] = items[8]\n db['Delay'] = items[9]\n dbinputs.append(db)\n #print \"Adding: \", db\n return dbinputs", "def read_one(paths_in, paths_out, current_fis=0):\n\n f = open(f\"{paths_out[current_fis]}\", \"r\")\n if f.read() != \"\":\n print(\"Output is already written for this input!\")\n return None\n f.close()\n f = open(f\"{paths_in[current_fis]}\", \"r\")\n \n line = f.readline()\n line_spaces = line.split(\" \")\n line_dest = line.split(\",\")\n time_begin, time_end = line_spaces[0].strip(), line_spaces[1].strip()\n\n autobuze = []\n\n line = f.readline()\n line_spaces = line.split(\" \")\n line_dest = line.split(\",\")\n while line_spaces[0].isnumeric() and line_spaces[1][0].isnumeric():\n id = int(line_spaces[0])\n price = float(line_spaces[1][:-3])\n break_duration = int(line_spaces[2][:-3])\n trip_duration = int(line_spaces[3][:-3])\n path = [get_dest(line_dest[0])] + get_multiple_dest(line_dest[1:])\n autobuze.append( Autobuz(id, price, break_duration, trip_duration, path) )\n\n line = f.readline()\n line_spaces = line.split(\" \")\n line_dest = line.split(\",\")\n \n nr_oameni = int(line_spaces[0])\n oameni = []\n\n for _ in range(nr_oameni):\n line = f.readline()\n line_spaces = line.split(\" \")\n line_dest = line.split(\",\")\n\n name = line_spaces[0]\n money = float(line_spaces[1][:-3])\n destinations = [get_dest(line_dest[0])] + get_multiple_dest(line_dest[1:])\n oameni.append( Om(name, money, destinations) )\n\n return time_begin, time_end, autobuze, oameni", "def readAMBERTop(self, phys, filename):\r\n\r\n def skipLine(data):\r\n nl = data.index('\\n')\r\n return data[nl+1:len(data)]\r\n\r\n def jumpTo(data, target):\r\n fp = data.index(target)\r\n return data[fp:len(data)]\r\n\r\n def readRemove(data, size):\r\n retval = data[0:size-1]\r\n return data[size:len(data)]\r\n\r\n def getInteger(data):\r\n pos = 0\r\n retval = \"\"\r\n while (not data[pos].isdigit()):\r\n pos = pos + 1\r\n while (data[pos].isdigit()):\r\n retval = retval + data[pos]\r\n pos = pos + 1\r\n data = data[pos:len(data)]\r\n return int(retval), data\r\n\r\n def parse(data, arr, str, count, dtype, tupsize=1):\r\n data = jumpTo(data, \"%FLAG \"+str)\r\n data = jumpTo(data, \"%FORMAT\")\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data) \r\n \r\n arr2 = []\r\n numread = 0\r\n for j in range(0, (tupsize*count-1) / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n if (tupsize == 1):\r\n arr.append(dtype(data[0:fieldsize].strip()))\r\n else:\r\n arr2.append(dtype(data[0:fieldsize].strip()))\r\n if (len(arr2) == tupsize):\r\n arr.append(arr2)\r\n arr2 = []\r\n numread += 1\r\n data = data[fieldsize:len(data)]\r\n if (numread == tupsize*count):\r\n break\r\n data = skipLine(data) \r\n return data\r\n\r\n def scan(data, str):\r\n return (data.count(str) != 0)\r\n\r\n\r\n f = open(filename, 'r')\r\n data = f.read()\r\n\r\n # First Line: VERSION ...\r\n data = skipLine(data)\r\n\r\n # Go To: %FLAG POINTERS\r\n data = jumpTo(data, '%FLAG POINTERS')\r\n\r\n data = jumpTo(data, '%FORMAT')\r\n numPerLine, data = getInteger(data)\r\n fieldsize, data = getInteger(data)\r\n data = skipLine(data)\r\n \r\n temp = []\r\n numread = 0\r\n for j in range(0, 31 / numPerLine + 1):\r\n for i in range(0, numPerLine):\r\n temp.append(int(data[0:8]))\r\n data = data[8:len(data)]\r\n numread += 1\r\n if (numread == 31):\r\n break\r\n data = skipLine(data)\r\n \r\n [natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n\r\n\r\n #################################################\r\n # Read AtomTypes\r\n atomnames = []\r\n charges = []\r\n masses = []\r\n atindex = []\r\n exclusions = []\r\n nparams = []\r\n reslabels = []\r\n respointers = []\r\n forceconstants = [[], [], []] # bond, angle, dihedral\r\n equilvals = [[], [], [[], []]] # bond, angle, dihedral\r\n scee_scales = []\r\n scnb_scales = []\r\n solty = []\r\n lj_acoef = []\r\n lj_bcoef = []\r\n\r\n data = parse(data, atomnames, \"ATOM_NAME\", natoms, str) \r\n data = parse(data, charges, \"CHARGE\", natoms, float)\r\n data = parse(data, masses, \"MASS\", natoms, float)\r\n data = parse(data, atindex, \"ATOM_TYPE_INDEX\", natoms, int)\r\n data = parse(data, exclusions, \"NUMBER_EXCLUDED_ATOMS\", natoms, int)\r\n data = parse(data, nparams, \"NONBONDED_PARM_INDEX\", ntypes*ntypes, int)\r\n data = parse(data, reslabels, \"RESIDUE_LABEL\", nres, str)\r\n data = parse(data, respointers, \"RESIDUE_POINTER\", nres, int)\r\n data = parse(data, forceconstants[0], \"BOND_FORCE_CONSTANT\", numbnd, float)\r\n data = parse(data, equilvals[0], \"BOND_EQUIL_VALUE\", numbnd, float)\r\n data = parse(data, forceconstants[1], \"ANGLE_FORCE_CONSTANT\", numang, float)\r\n data = parse(data, equilvals[1], \"ANGLE_EQUIL_VALUE\", numang, float)\r\n data = parse(data, forceconstants[2], \"DIHEDRAL_FORCE_CONSTANT\", nptra, float)\r\n data = parse(data, equilvals[2][0], \"DIHEDRAL_PERIODICITY\", nptra, float)\r\n data = parse(data, equilvals[2][1], \"DIHEDRAL_PHASE\", nptra, float)\r\n if (scan(data, \"SCEE_SCALE_FACTOR\")):\r\n data = parse(data, scee_scales, \"SCEE_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scee_scales.append(1.2) # Default \r\n if (scan(data, \"SCNB_SCALE_FACTOR\")):\r\n data = parse(data, scnb_scales, \"SCNB_SCALE_FACTORS\", nptra, float)\r\n else:\r\n for i in range(0, nptra):\r\n scnb_scales.append(2.0) # Default \r\n\r\n data = parse(data, solty, \"SOLTY\", natyp, float)\r\n data = parse(data, lj_acoef, \"LENNARD_JONES_ACOEF\", ntypes*(ntypes+1)/2, float)\r\n data = parse(data, lj_bcoef, \"LENNARD_JONES_BCOEF\", ntypes*(ntypes+1)/2, float)\r\n\r\n\r\n ##########################################################\r\n # STRUCTURE\r\n\r\n bonds = [[], []] # With H, Without H\r\n angles = [[], []] # With H, Without H\r\n dihedrals = [[], []] # With H, Without H\r\n impropers = [[], []] # With H, Without H\r\n excluded_atoms = [] \r\n hbond_acoef = []\r\n hbond_bcoef = []\r\n hbcut = []\r\n amber_atom_types = []\r\n tree_chain = []\r\n join_array = []\r\n irotat = []\r\n radii = []\r\n screen = []\r\n\r\n data = parse(data, bonds[0], \"BONDS_INC_HYDROGEN\", nbonh, int, 3)\r\n data = parse(data, bonds[1], \"BONDS_WITHOUT_HYDROGEN\", nbona, int, 3)\r\n data = parse(data, angles[0], \"ANGLES_INC_HYDROGEN\", ntheth, int, 4)\r\n data = parse(data, angles[1], \"ANGLES_WITHOUT_HYDROGEN\", ntheta, int, 4)\r\n data = parse(data, dihedrals[0], \"DIHEDRALS_INC_HYDROGEN\", nphih, int, 5)\r\n data = parse(data, dihedrals[1], \"DIHEDRALS_WITHOUT_HYDROGEN\", nphia, int, 5)\r\n \r\n # MERGE ARRAYS - PM HANDLES THE H+\r\n final_bonds = bonds[0] + bonds[1]\r\n final_angles = angles[0] + angles[1]\r\n final_dihedrals = dihedrals[0] + dihedrals[1]\r\n final_impropers = []\r\n \r\n # CLEAN UP THE TRASH\r\n del(bonds)\r\n del(angles)\r\n del(dihedrals)\r\n \r\n\r\n # Move impropers into their own array\r\n i = 0\r\n while (i < len(final_dihedrals)):\r\n if (final_dihedrals[i][2] < 0): # 1-4 exclusions are handled by our back end\r\n final_dihedrals[i][2] *= -1\r\n if (final_dihedrals[i][3] < 0):\r\n final_dihedrals[i][3] *= -1 # Make + again\r\n final_impropers.append(final_dihedrals[i])\r\n final_dihedrals.remove(final_dihedrals[i])\r\n i -= 1\r\n i += 1\r\n\r\n # Convert charge units\r\n for i in range(0, len(charges)):\r\n charges[i] /= 18.223\r\n\r\n\r\n data = parse(data, excluded_atoms, \"EXCLUDED_ATOMS_LIST\", nnb, int)\r\n data = parse(data, hbond_acoef, \"HBOND_ACOEF\", nphb, float)\r\n data = parse(data, hbond_bcoef, \"HBOND_BCOEF\", nphb, float)\r\n data = parse(data, hbcut, \"HBCUT\", nphb, float)\r\n data = parse(data, amber_atom_types, \"AMBER_ATOM_TYPE\", natoms, str)\r\n data = parse(data, tree_chain, \"TREE_CHAIN_CLASSIFICATION\", natoms, str)\r\n data = parse(data, join_array, \"JOIN_ARRAY\", natoms, int)\r\n data = parse(data, irotat, \"IROTAT\", natoms, int)\r\n data = parse(data, radii, \"RADII\", natoms, float)\r\n data = parse(data, screen, \"SCREEN\", natoms, float)\r\n\r\n # Further process dihedrals and impropers\r\n # Deal with multiplicity\r\n # A bit ugly, but the fastest for now\r\n # forceconstants[2][dihedrals[0][i][4]-1], int(equilvals[2][0][dihedrals[0][i][4]-1]), equilvals[2][1][dihedrals[0][i][4]-1]\r\n\r\n mult_di = dict()\r\n mult_im = dict()\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n if (not mult_di.has_key(di_id)):\r\n mult_di[di_id] = [1, False, [forceconstants[2][final_dihedrals[i][4]-1]], [int(equilvals[2][0][final_dihedrals[i][4]-1])], [equilvals[2][1][final_dihedrals[i][4]-1]]]\r\n else:\r\n mult_di[di_id][0] += 1\r\n mult_di[di_id][2].append(forceconstants[2][final_dihedrals[i][4]-1])\r\n mult_di[di_id][3].append(int(equilvals[2][0][final_dihedrals[i][4]-1]))\r\n mult_di[di_id][4].append(equilvals[2][1][final_dihedrals[i][4]-1])\r\n \r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n if (not mult_im.has_key(di_id)):\r\n mult_im[im_id] = [1, False, [forceconstants[2][final_impropers[i][4]-1]], [int(equilvals[2][0][final_impropers[i][4]-1])], [equilvals[2][1][final_impropers[i][4]-1]]]\r\n else:\r\n mult_im[im_id][0] += 1\r\n mult_im[im_id][2].append(forceconstants[2][final_impropers[i][4]-1])\r\n mult_im[im_id][3].append(int(equilvals[2][0][final_impropers[i][4]-1]))\r\n mult_im[im_id][4].append(equilvals[2][1][final_impropers[i][4]-1])\r\n\r\n\r\n\r\n \r\n #[natoms, ntypes, nbonh, mbona, ntheth, mtheta, nphih, mphia, nhparm, nparm, nnb, nres, nbona, ntheta, nphia, numbnd, numang, nptra, natyp, nphb, ifpert, nbper, ngper, ndper, mbper, mgper, mdper, ifbox, nmxrs, ifcap, numextra] = temp \r\n #phys.myPSF.createAll(natoms, nbonh+mbona, ntheth+mtheta,\r\n # len(dihedrals[0])+len(dihedrals[1]),\r\n # len(impropers[0])+len(impropers[1]),\r\n # 0, 0, 0, 0)\r\n \r\n # Add atoms\r\n curres = 1\r\n for i in range(0, natoms):\r\n phys.myPSF.addAtom(i, 'SIM', curres, reslabels[curres-1],\r\n atomnames[i], atomnames[i], charges[i],\r\n masses[i]) \r\n if (curres != nres and i >= respointers[curres]):\r\n curres += 1\r\n\r\n # Add bonds\r\n for i in range(0, nbonh+nbona):\r\n phys.myPSF.addBond(i+1, final_bonds[i][0]/3+1, final_bonds[i][1]/3+1)\r\n phys.myPAR.addBond(i+1, atomnames[final_bonds[i][0]/3], atomnames[final_bonds[i][1]/3], forceconstants[0][final_bonds[i][2]/3], equilvals[0][final_bonds[i][2]/3])\r\n \r\n # Add angles\r\n for i in range(0, ntheth+ntheta):\r\n phys.myPSF.addAngle(i+1, final_angles[i][0]/3+1, final_angles[i][1]/3+1, final_angles[i][2]/3+1)\r\n phys.myPAR.addAngle(i+1, atomnames[final_angles[i][0]/3], atomnames[final_angles[i][1]/3], atomnames[final_angles[i][2]/3], forceconstants[1][final_angles[i][3]/3], equilvals[1][final_angles[i][3]/3])\r\n \r\n # Add dihedrals\r\n for i in range(0, len(final_dihedrals)):\r\n di_id = str(final_dihedrals[i][0])+' '+str(final_dihedrals[i][1])+' '+str(final_dihedrals[i][2])+' '+str(final_dihedrals[i][3])\r\n mult = mult_di[di_id][0]\r\n checked = mult_di[di_id][1]\r\n print di_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], forceconstants[2][final_dihedrals[i][4]-1], int(equilvals[2][0][final_dihedrals[i][4]-1]), equilvals[2][1][final_dihedrals[i][4]-1])\r\n else:\r\n mult_di[di_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_di[di_id][2])):\r\n fcvec.push_back(mult_di[di_id][2][j])\r\n periodvec.push_back(mult_di[di_id][3][j])\r\n phasevec.push_back(mult_di[di_id][4][j])\r\n phys.myPSF.addDihedral(i+1, final_dihedrals[i][0]/3+1, final_dihedrals[i][1]/3+1, int(numpy.abs(final_dihedrals[i][2]))/3+1, final_dihedrals[i][3]/3+1)\r\n phys.myPAR.addDihedral(i+1, atomnames[final_dihedrals[i][0]/3], atomnames[final_dihedrals[i][1]/3], atomnames[int(numpy.abs(final_dihedrals[i][2]))/3], atomnames[final_dihedrals[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n \r\n\r\n\r\n\r\n for i in range(0, len(final_impropers)):\r\n im_id = str(final_impropers[i][0])+' '+str(final_impropers[i][1])+' '+str(final_impropers[i][2])+' '+str(final_impropers[i][3])\r\n mult = mult_im[im_id][0]\r\n checked = mult_im[im_id][1]\r\n print im_id, \" \", mult\r\n if (not checked):\r\n if (mult == 1):\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], forceconstants[2][final_impropers[i][4]-1], int(equilvals[2][0][final_impropers[i][4]-1]), equilvals[2][1][final_impropers[i][4]-1])\r\n else:\r\n mult_im[im_id][1] = True\r\n # Add dihedral with the appropriate multiplicity\r\n # Force constants, periodicity and phase shifts are in [2], [3], and [4] respectively\r\n fcvec = PARReader.VectorOfDouble()\r\n periodvec = PARReader.VectorOfInt()\r\n phasevec = PARReader.VectorOfDouble() \r\n for j in range(0, len(mult_im[im_id][2])):\r\n fcvec.push_back(mult_im[im_id][2][j])\r\n periodvec.push_back(mult_im[im_id][3][j])\r\n phasevec.push_back(mult_im[im_id][4][j])\r\n phys.myPSF.addImproper(i+1, final_impropers[i][0]/3+1, final_impropers[i][1]/3+1, int(numpy.abs(final_impropers[i][2]))/3+1, final_impropers[i][3]/3+1)\r\n phys.myPAR.addImproper(i+1, atomnames[final_impropers[i][0]/3], atomnames[final_impropers[i][1]/3], atomnames[int(numpy.abs(final_impropers[i][2]))/3], atomnames[final_impropers[i][3]/3], mult, fcvec, periodvec, phasevec)\r\n\r\n \r\n # Need to add garbage nonbonded stuff for now\r\n for i in range(0, natoms):\r\n phys.myPAR.addNonbonded(i, atomnames[i], 1, 1, 1, 1, 1, 1)\r\n\r\n # Add VDW parameters\r\n # AMBER has the Aij and Bij already in the parameter file\r\n # This actually makes life easier.\r\n # CHARMM does not, they simply have the original sigma and epsilon.\r\n # To compensate for this, for now we will leave the nonbondeds empty in phys.myPAR\r\n # We will then access the LennardJones parameter table in Topology directly\r\n k = 0\r\n phys.myTop.resizeLennardJonesParameters(ntypes)\r\n for i in range(0, ntypes):\r\n for j in range(i, ntypes):\r\n params = GenericTopology.LennardJonesParameters(lj_acoef[k], lj_bcoef[k])\r\n k += 1\r\n phys.myTop.setLennardJonesParameters(i, j, params)\r\n \r\n phys.myPAR.readFlag = 1\r\n phys.build()", "def generate_input_from_cweeds(outdir, cweed2_paths, cweed3_paths, year_range):\n if not isinstance(cweed2_paths, (list, tuple)):\n cweed2_paths = [cweed2_paths]\n if not isinstance(cweed3_paths, (list, tuple)):\n cweed3_paths = [cweed3_paths]\n\n print('Reading CWEEDS files...', end=' ')\n lat_dd = []\n lon_dd = []\n stations = []\n data = []\n for cweed2, cweed3 in zip(cweed2_paths, cweed3_paths):\n daily_wy2 = read_cweeds_file(cweed2, format_to_daily=True)\n daily_wy3 = read_cweeds_file(cweed3, format_to_daily=True)\n wy23_df = join_daily_cweeds_wy2_and_wy3(daily_wy2, daily_wy3)\n\n lat_dd.append(wy23_df['Latitude'])\n lon_dd.append(wy23_df['Longitude'])\n stations.append(wy23_df['Location'])\n\n indexes = np.where((wy23_df['Years'] >= year_range[0]) &\n (wy23_df['Years'] <= year_range[1]))[0]\n data.append(wy23_df['Irradiance'][indexes])\n data = nan_as_text_tolist(np.array(data).astype(float).transpose())\n print('done')\n\n fname = osp.join(outdir, 'solrad_input_data.csv')\n print('Saving {} data to {}...'.format('solrad', fname), end=' ')\n\n # Create an array of datestring and lat/lon\n Ndt = len(wy23_df['Years'][indexes])\n start = datetime.datetime(year_range[0], 1, 1)\n datetimes = [start + datetime.timedelta(days=i) for i in range(Ndt)]\n datestrings = [dt.strftime(\"%d/%m/%Y\") for dt in datetimes]\n\n # Save the data to file.\n fheader = [['Global solar irradiance in MJ/m²'],\n ['', ''],\n ['Created by ' + __namever__],\n ['Created on ' + strftime(\"%d/%m/%Y\")],\n ['Created from CWEED files'],\n ['', ''],\n ['Stations'] + stations,\n ['Latitude (dd)'] + lat_dd,\n ['Longitude (dd)'] + lon_dd,\n ['', '']]\n fdata = [[datestrings[i]] + data[i] for i in range(Ndt)]\n fcontent = fheader + fdata\n save_content_to_csv(fname, fcontent)\n print('done')", "def read_dataset_v1():\n path = load_config()\n T = feather.read_dataframe(path['data_dir'] / 'T_dat.feather')\n E = feather.read_dataframe(path['data_dir'] / 'E_dat.feather')\n M = feather.read_dataframe(path['data_dir'] / 'Meta.feather')\n data = sio.loadmat(path['data_dir'] / 'highvar_genes.mat', squeeze_me=True)\n return T, E, M, data", "def read(lookup_cnfg, lookup_qn, diagram, T, directory, verbose=0):\n\n data = []\n comb = True if diagram == 'C4+D' else False\n\n for cnfg in lookup_cnfg:\n # filename and path\n filename = directory + '/' + diagram + '_cnfg%i' % cnfg + '.h5'\n try:\n fh = h5py.File(filename, \"r\")\n except IOError:\n print 'file %s not found' % filename\n raise\n\n # to achieve hirarchical indexing for quantum numbers build DataFrame for\n # each loop seperately\n # TODO: is it necessary to build that completely or can that be \n # constructed by successively storing each operator with pd.HDFStore()?\n data_qn = DataFrame()\n# print DataFrame(lookup_p)\n# print DataFrame(lookup_g)\n\n for op in lookup_qn.index:\n p = lookup_qn.ix[op, ['p_{so}', 'p_{si}']]\n g = lookup_qn.ix[op, ['\\gamma_{so}', '\\gamma_{si}']]\n groupname = set_groupname(diagram, p, g)\n\n # read data from file as numpy array and interpret as complex\n # numbers for easier treatment\n try:\n tmp = np.asarray(fh[groupname]).view(complex)\n except KeyError:\n print(\"could not read %s for config %d\" % (groupname, cnfg))\n continue\n\n # in case diagram is C4+D perform last mutliplication of factorizing\n # traces\n # the file contains 4 numbers per time slice: ReRe, ReIm, ImRe, and ImIm,\n # here combined 2 complex number\n if comb:\n # reshaping so we can extract the data easier\n tmp = tmp.reshape((-1,2))\n # extracting right combination, assuming ImIm contains only noise\n dtmp = 1.j * (tmp[:,1].real + tmp[:,0].imag) + tmp[:,0].real\n tmp = dtmp.copy()\n\n # save data into data frame\n data_qn[op] = pd.DataFrame(tmp, columns=['re/im'])\n data.append(data_qn)\n data = pd.concat(data, keys=lookup_cnfg, axis=0, names=['cnfg', 'T'])\n\n if verbose:\n print '\\tfinished reading'\n\n return data.sort_index(level=[0,1])", "def read_results():\r\n with open(\"packing.nfo\", \"r\") as fin:\r\n fin.readline()\r\n fin.readline()\r\n por_theory = float(fin.readline().split()[2])\r\n por_final = float(fin.readline().split()[2])\r\n print('Theoretical porosity:', por_theory)\r\n print('Final porosity:', por_final)\r\n with open(\"packing.xyzd\", \"rb\") as fin:\r\n btxt = fin.read()\r\n txt = list(struct.unpack(\"<\" + \"d\" * (len(btxt) // 8), btxt))\r\n data = array(zip(*[iter(txt)] * 4))\r\n data[:, 3] = data[:, 3] * \\\r\n ((1 - por_final) / (1 - por_theory))**(1 / 3)\r\n return data", "def parse_seqscreen(input, output):\n \n df = pd.read_csv(input, sep='\\t', index_col=0, na_values='-')\n pathogenicity_features = df.loc[:, 'disable_organ':'virulence_regulator'].fillna(0).astype(int).sum(axis=1)\n\n pathogenic_genes_df = df.loc[pathogenicity_features > 0, ['taxid', \n 'centrifuge_multi_tax', \n 'diamond_multi_tax',\n 'go',\n 'multi_taxids_confidence',\n 'go_id_confidence',\n 'size',\n 'organism',\n 'gene_name',\n 'uniprot',\n 'uniprot evalue']]\n \n pathogenic_genes_df['taxid'] = pathogenic_genes_df['taxid'].astype(int)\n \n pathogenic_genes_df.index.name = 'gene'\n \n \n pathogenic_genes = pathogenicity_features[pathogenicity_features > 0].index\n \n gene_pathogenicity_features_dict = {}\n \n for gene, row in df.loc[pathogenic_genes,'disable_organ':'virulence_regulator'].iterrows():\n gene_pathogenicity_features_dict[gene] = ';'.join(row[row>0].index)\n \n pathogenicity_df = pd.DataFrame.from_dict(gene_pathogenicity_features_dict, \n orient='index',\n columns=['Pathogenicity'])\n\n pathogenicity_df.index.name = 'gene'\n\n pd.merge(pathogenic_genes_df, pathogenicity_df, left_index=True, right_index=True).to_csv(output, sep='\\t')", "def read_input_file():\n \n global input\n \n config = ConfigParser.RawConfigParser()\n config.read(os.path.join(os.getcwd(), 'INPUT.cfg'))\n\n input = {}\n input['datapath'] = config.get('Address_info', 'datapath')\n input['inter_address'] = config.get('Address_info', 'interactive_address')\n input['target_folder'] = config.get('Address_info', 'target_folder')\n input['save_folder'] = config.get('Address_info', 'save_folder')\n \n if not os.path.isabs(input['datapath']):\n input['datapath'] = os.path.join(os.getcwd(), input['datapath'])\n \n if not os.path.isabs(input['inter_address']):\n input['inter_address'] = os.path.join(os.getcwd(), input['inter_address'])\n \n if not os.path.isabs(input['target_folder']):\n input['target_folder'] = os.path.join(os.getcwd(), input['target_folder'])\n \n if not os.path.isabs(input['save_folder']):\n input['save_folder'] = os.path.join(os.getcwd(), input['save_folder'])\n \n \n input['min_date'] = str(eval(config.get('Event_Request', 'min_datetime')))\n input['max_date'] = str(eval(config.get('Event_Request', 'max_datetime')))\n input['min_mag'] = config.getfloat('Event_Request', 'min_magnitude')\n input['max_mag'] = config.getfloat('Event_Request', 'max_magnitude')\n input['min_depth'] = config.getfloat('Event_Request', 'min_depth')\n input['max_depth'] = config.getfloat('Event_Request', 'max_depth')\n input['evlonmin'] = config.getfloat('Event_Request', 'evlonmin')\n input['evlonmax'] = config.getfloat('Event_Request', 'evlonmax')\n input['evlatmin'] = config.getfloat('Event_Request', 'evlatmin')\n input['evlatmax'] = config.getfloat('Event_Request', 'evlatmax')\n input['preset'] = config.getfloat('Event_Request', 'preset')\n input['offset'] = config.getfloat('Event_Request', 'offset')\n input['max_result'] = config.getint('Event_Request', 'max_results')\n \n input['get_events'] = config.get('Request', 'get_events')\n input['input_period'] = config.get('Parallel', 'input_period')\n input['IRIS'] = config.get('Request', 'IRIS')\n input['ArcLink'] = config.get('Request', 'ArcLink')\n input['time_iris'] = config.get('Request', 'time_iris')\n input['time_arc'] = config.get('Request', 'time_arc')\n \n input['nodes'] = config.get('Parallel', 'nodes')\n\n input['waveform'] = config.get('Request', 'waveform')\n input['response'] = config.get('Request', 'response')\n input['SAC'] = config.get('Request', 'SAC')\n \n input['net'] = config.get('specifications_request', 'network')\n input['sta'] = config.get('specifications_request', 'station')\n \n if config.get('specifications_request', 'location') == \"''\":\n input['loc'] = ''\n elif config.get('specifications_request', 'location') == '\"\"':\n input['loc'] = ''\n else:\n input['loc'] = config.get('specifications_request', 'location')\n \n input['cha'] = config.get('specifications_request', 'channel')\n\n if config.get('specifications_request', 'lat') == 'None':\n input['lat_cba'] = None\n else:\n input['lat_cba'] = config.get('specifications_request', 'lat')\n \n if config.get('specifications_request', 'lon') == 'None':\n input['lon_cba'] = None\n else:\n input['lon_cba'] = config.get('specifications_request', 'lon')\n \n if config.get('specifications_request', 'minradius') == 'None':\n input['mr_cba'] = None\n else:\n input['mr_cba'] = config.get('specifications_request', 'minradius')\n \n if config.get('specifications_request', 'maxradius') == 'None':\n input['Mr_cba'] = None\n else:\n input['Mr_cba'] = config.get('specifications_request', 'maxradius')\n \n \n if config.get('specifications_request', 'minlat') == 'None':\n input['mlat_rbb'] = None\n else:\n input['mlat_rbb'] = config.get('specifications_request', 'minlat')\n \n if config.get('specifications_request', 'maxlat') == 'None':\n input['Mlat_rbb'] = None\n else:\n input['Mlat_rbb'] = config.get('specifications_request', 'maxlat')\n \n if config.get('specifications_request', 'minlon') == 'None':\n input['mlon_rbb'] = None\n else:\n input['mlon_rbb'] = config.get('specifications_request', 'minlon')\n \n if config.get('specifications_request', 'maxlon') == 'None':\n input['Mlon_rbb'] = None\n else:\n input['Mlon_rbb'] = config.get('specifications_request', 'maxlon')\n\n \n input['test'] = config.get('test', 'test')\n input['test_num'] = config.getint('test', 'test_num')\n \n input['update_interactive'] = config.get('update', 'update_interactive')\n input['iris_update'] = config.get('update', 'iris_update')\n input['arc_update'] = config.get('update', 'arc_update')\n\n input['QC_IRIS'] = config.get('QC', 'QC_IRIS')\n input['QC_ARC'] = config.get('QC', 'QC_ARC')\n \n input['email'] = config.get('email', 'email')\n input['email_address'] = config.get('email', 'email_address')\n \n input['report'] = config.get('report', 'report')\n \n input['corr_unit'] = config.get('instrument_correction', 'corr_unit')\n input['pre_filt'] = config.get('instrument_correction', 'pre_filter')\n \n input['plt_event'] = config.get('ObsPyPT', 'plot_event')\n input['plt_sta'] = config.get('ObsPyPT', 'plot_sta')\n input['plt_ray'] = config.get('ObsPyPT', 'plot_ray')\n\n input['llcrnrlon'] = config.getfloat('ObsPyPT', 'llcrnrlon')\n input['urcrnrlon'] = config.getfloat('ObsPyPT', 'urcrnrlon')\n input['llcrnrlat'] = config.getfloat('ObsPyPT', 'llcrnrlat')\n input['urcrnrlat'] = config.getfloat('ObsPyPT', 'urcrnrlat')\n \n input['lon_0'] = config.getfloat('ObsPyPT', 'lon_0')\n input['lat_0'] = config.getfloat('ObsPyPT', 'lat_0')", "def readOutputfile(filename, verbose=False):\n\n # -----------------------------------------------------------------------------\n # Defining the classes for data structure\n T_Simulation = namedtuple('Simulation', ['step'])\n T_Step = namedtuple('Step', ['element', 'node'])\n\n T_Displacement = namedtuple('Displacement', ['ux', 'uy'])\n\n T_Element = namedtuple('Element', ['gp', 'avstrain', 'avstress', 'eqstrain'])\n T_GP = namedtuple('GP', ['stress', 'strain'])\n T_Stresses = namedtuple('Stresses', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n T_Strains = namedtuple('Strains', ['xx', 'yy', 'zz', 'yz', 'zx', 'xy'])\n # -----------------------------------------------------------------------------\n\n nSteps = 0 # Simulation step counter\n\n SimData = T_Simulation(list())\n\n with open(filename) as f:\n line = f.readline() # Read in the first line of the input file\n while True: # Loop over all lines of the input file\n # Read the nodes displacements\n #line = f.readline()\n #print(line)\n if line == 'DofManager output:\\n': # String starts a list of nodes displacement information\n nSteps += 1 # The above string starts a new simulation step\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Nodes = list() # Initialize/clear list of nodes\n\n while line != '\\n' and line != 'Element output:\\n': # Strings that finish the list\n #\t\t\t\tnNode = int(line.strip().split()[1]) # Node id\n line = f.readline()\n dim1 = float(line.strip().split()[3]) # Displacement dim1\n line = f.readline()\n dim2 = float(line.strip().split()[3]) # Displacement dim2\n Nodes.append(\n T_Displacement(dim1, dim2)) # Append displacements of the current node to the node list\n line = f.readline()\n\n\n if verbose:\n print('Step {}: Dofs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n # Read the stresses an strains at Gauss points\n elif line == 'Element output:\\n': # String starts a list elements, GPs, strains and stresses\n line = f.readline() # Cancel ---------- seperator\n line = f.readline()\n Elements = list() # Initialize/clear list of elements\n\n while line != '\\n' and line != '\\tR E A C T I O N S O U T P U T:\\n': # Strings that finish the list\n #\t\t\t\t\tnElement = line.strip().split()[2] # Element id\n line = f.readline()\n GPs = T_Element(list(), 0, 0, 0) # List of Gauss points\n\n while line != '\\n' and line.strip().split()[0] == 'GP': # String that starts a new GP\n #\t\t\t\t\t\tnGP = int(line.strip().split()[1].split('.')[1]) # GP id\n tmp = [float(i) for i in line.strip().split()[4:10]] # Read the strains\n strain = T_Strains(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n line = f.readline()\n tmp = [float(i) for i in line.strip().split()[1:7]] # Read the stresses\n stress = T_Stresses(tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])\n GPs.gp.append(\n T_GP(stress, strain)) # Append stresses and strains of the current GP to the GP list\n line = f.readline()\n\n\n Elements.append(GPs) # Append GP list of the current element to the element list\n\n if verbose:\n print('Step {}: GPs completed.\\n'.format(nSteps))\n print('---------------------------------\\n')\n\n SimData.step.append(T_Step(Elements, Nodes)) # Append element and node list of the current step to the step list\n #print('the file input ends')\n #print(nSteps)\n # only needed with a while loop\n # Jump over the lines until we reach the next time step (Caught by if-clause)\n try:\n line = f.readline() # Will generate an error if files end is reached\n if line == \"\":\n raise EOFError\n except:\n if verbose: print(\"End of file reached.\\n\")\n break # Break the 'while True' loop\n\n # -----------------------------------------------------------------------------\n\n\n print('averaging the stress')\n # Averaging of strains and stress of GPs of each element\n for istep in range(len(SimData.step)):\n\n for ielement in range(len(SimData.step[istep].element)):\n print(len)\n # Initialization before each element\n stresses = np.array([0., 0., 0., 0., 0., 0.])\n strains = np.array([0., 0., 0., 0., 0., 0.])\n\n for igp in range(len(SimData.step[istep].element[ielement])):\n print(igp)\n # Add up all data of all GPs\n #stresses[:] += SimData.step[istep].element[ielement].gp[igp].stress[:]\n strains[:] += SimData.step[istep].element[ielement].gp[igp].strain[:]\n\n # Divide GP sum by number of GPs\n stresses /= len(SimData.step[istep].element[ielement])\n strains /= len(SimData.step[istep].element[ielement])\n # Replace the field (initialized with 0) with new information\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstress=T_Stresses(stresses[0], stresses[1], stresses[2], stresses[3], stresses[4], stresses[5]))\n SimData.step[istep].element[ielement] = SimData.step[istep].element[ielement]._replace(\n avstrain=T_Strains(strains[0], strains[1], strains[2], strains[3], strains[4], strains[5]))\n print('Analysis finished')\n return SimData", "def split_inputs(self):\n\n lca = self.lca\n inputs = self.inputs\n\n inputs_dict = {} # Only store exchanges with uncertainty\n\n # Keep track of which tech_params and bio_params are already included to the analysis\n # Needed to avoid running sa indices computations twice for the same tech or bio params. \n # Initialize with parameterized exchanges\n if self.parameters != None and self.ParametersModel != None:\n indices_tech_all = self.parameters_dict['tech_params_where']\n indices_bio_all = self.parameters_dict['bio_params_where']\n else:\n indices_tech_all = np.array([], dtype=int)\n indices_bio_all = np.array([], dtype=int)\n\n for input_ in inputs:\n\n if input_ == 'biosphere':\n continue\n\n inputs_dict[input_] = {}\n\n indices_tech = np.array([], dtype=int)\n indices_bio = np.array([], dtype=int)\n\n if input_ == 'technosphere':\n indices_tech = np.where(lca.tech_params['uncertainty_type']!=0)[0]\n if 'biosphere' in inputs:\n indices_bio = np.where(lca.bio_params['uncertainty_type']!=0)[0]\n\n elif input_ == 'demand_exc':\n # Select all products that pertain to activities in the given demand vector\n for act_index in np.nonzero(lca.demand_array)[0]:\n mask_tech = np.all([lca.tech_params['uncertainty_type']!=0, lca.tech_params['col']==act_index], axis=0)\n indices_tech = np.concatenate([indices_tech, np.where(mask_tech)[0]])\n if 'biosphere' in inputs:\n mask_bio = np.all([lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==act_index], axis=0)\n indices_bio = np.concatenate([indices_bio, np.where(mask_bio)[0]])\n\n elif input_ in self.databases:\n # Select all products and flows that are linked to the given database\n # Indices corresponding to exchanges in the tech_params depending on the given database\n db_act_indices_tech = [val for key,val in lca.activity_dict.items() if key[0]==input_]\n if len(db_act_indices_tech) > 0:\n db_act_index_min_tech = db_act_indices_tech[0]\n db_act_index_max_tech = db_act_indices_tech[-1]\n mask = lambda i : np.all( [lca.tech_params['uncertainty_type']!=0, \n lca.tech_params['col']==i,\n lca.tech_params['amount']!=0], axis=0 )\n indices_tech = [ np.where( mask(i) ) [0] for i in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_tech = np.concatenate(indices_tech)\n\n # Indices corresponding to flows in the biosphere params depending on the given database\n if 'biosphere' in inputs:\n mask = lambda j : np.all( [lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==j], axis=0 )\n indices_bio = [ np.where(mask(j))[0] for j in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_bio = np.concatenate(indices_bio)\n\n indices_tech = np.sort(indices_tech)\n indices_bio = np.sort(indices_bio)\n\n # Do not add indices_tech that are already in the indices_tech_all\n indices_tech_same = np.intersect1d(indices_tech, indices_tech_all)\n pos_tech = np.array([ np.where(indices_tech==s)[0] for s in indices_tech_same ]).flatten()\n indices_tech = np.delete(indices_tech, pos_tech)\n np.append(indices_tech_all, indices_tech)\n\n # Do not add indices_bio that are already in the indices_bio_all\n indices_bio_same = np.intersect1d(indices_bio, indices_bio_all)\n pos_bio = np.array([ np.where(indices_bio==s)[0] for s in indices_bio_same ]).flatten()\n indices_bio = np.delete(indices_bio, pos_bio)\n np.append(indices_bio_all, indices_bio)\n \n inputs_dict[input_]['tech_params'] = lca.tech_params[indices_tech] #TODO maybe remove later, indices should be sufficient\n inputs_dict[input_]['tech_params_where'] = indices_tech\n inputs_dict[input_]['tech_n_params'] = len(indices_tech) #TODO remove later\n\n inputs_dict[input_]['bio_params'] = lca.bio_params[indices_bio] #TODO maybe remove later\n inputs_dict[input_]['bio_params_where'] = indices_bio\n inputs_dict[input_]['bio_n_params'] = len(indices_bio)\n\n\n self.indices_tech_all = indices_tech_all #TODO remove later\n self.indices_bio_all = indices_bio_all\n self.inputs_dict = inputs_dict", "def read_outputs(self, fname, out_props, refined_blocks=None):\r\n for prop in out_props:\r\n attr_name = prop[0].replace(\" \", \"\").strip()\r\n attr_title = prop[1]\r\n self.out_props[attr_title] = {}\r\n if not hasattr(self, 'times'):\r\n self.times = []\r\n build = False\r\n found = False\r\n\r\n print('Reading ' + attr_title + ' output')\r\n with open(fname, \"r\") as fp:\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n # Find current time step\r\n if item[0] == 'Time':\r\n time = item[2]\r\n continue\r\n attr = line.replace(\" \", \"\").strip()\r\n\r\n # Locate attribute name\r\n if attr == attr_name:\r\n build = True\r\n found = True\r\n layers = [[[] for j in range(self.size[1])] for i in range(self.size[2])]\r\n I = None\r\n J = None\r\n K = '1'\r\n continue\r\n\r\n if build:\r\n if item[0] == 'All':\r\n v = self._str_to_float(item[3])\r\n grid = np.full((self.size[2], self.size[1], self.size[0]), v)\r\n self.out_props[attr_title][time] = grid\r\n build = False\r\n continue\r\n\r\n if item[0] == 'Plane':\r\n K = item[3]\r\n if len(item) > 4:\r\n if item[4] == 'All':\r\n v = self._str_to_float(item[-1])\r\n k_layer = np.full((self.size[1], self.size[0]), v)\r\n layers[int(K)-1] = k_layer\r\n I = [self.size[0]]\r\n J = []\r\n JIdx = self.size[1]\r\n else:\r\n I = None\r\n continue\r\n\r\n # Detects line indices under which 'I' properties are printed\r\n if item[0] == 'I':\r\n J = None\r\n prop_i = []\r\n I = item[2:]\r\n prev_digit = False\r\n for i in range(len(line)):\r\n if line[i].isdigit():\r\n if not prev_digit:\r\n prop_i.append([i])\r\n else:\r\n prop_i[-1].append(i)\r\n prev_digit = True\r\n else:\r\n prev_digit = False\r\n continue\r\n\r\n # Check if there are any missing values in J line\r\n skip_i = []\r\n if item[0] == 'J=':\r\n JIdx = item[1]\r\n J = item[2:]\r\n for i in range(len(prop_i)):\r\n skip = True\r\n for j in prop_i[i]:\r\n if line[j] != ' ':\r\n skip = False\r\n if skip:\r\n skip_i.append(i)\r\n n_skip = 0\r\n for i in range(len(I)):\r\n if i in skip_i:\r\n # layers[int(K)-1][int(JIdx)-1].append('NULL')\r\n layers[int(K) - 1][int(JIdx) - 1].append(-1)\r\n n_skip += 1\r\n else:\r\n if J[i - n_skip][-1] == 'r':\r\n J[i - n_skip] = J[i - n_skip][:-1]\r\n\r\n v = self._str_to_float(J[i - n_skip])\r\n layers[int(K) - 1][int(JIdx) - 1].append(v)\r\n # layers[int(K)-1][int(JIdx)-1].append(J[i - n_skip])\r\n continue\r\n\r\n # Put entire grid worth of property in dictionary for current time step\r\n if I is not None and J is not None:\r\n if int(I[-1]) == self.size[0] and int(JIdx) == self.size[1] and int(K) == self.size[2]:\r\n if build:\r\n self.out_props[attr_title][time] = layers\r\n build = False\r\n\r\n # Add local grid refinements\r\n if refined_blocks:\r\n refined = self.refine_outputs(fp, copy.deepcopy(refined_blocks))\r\n for c in list(refined.keys()):\r\n idx = c.split(',')\r\n self.out_props[attr_title][time][idx[2]][idx[1]][int(idx[0])-1] = refined[c]\r\n if not found:\r\n print(attr_title + ' was not found in output file!')\r\n del self.out_props[attr_title]\r\n else:\r\n if len(self.times) == 0:\r\n self.times = list(self.out_props[attr_title].keys())", "def read_edges(f=sys.stdin):\n edges = []\n k = ['first', 'last', 'capacity', 'flow', 'used']\n lines = f.readlines()\n for line in lines:\n v = [int(s) for s in line.split(\" \")] + [0, False]\n edges.append(dict(zip(k,v)))\n \n\n return edges", "def parse_ensembl_exons(lines):\n header = []\n for index, line in enumerate(lines):\n # File allways start with a header line\n if index == 0:\n header = line.rstrip().split(\"\\t\")\n continue\n\n exon_info = parse_ensembl_line(line, header)\n\n exon = {\n \"chrom\": str(exon_info[\"chrom\"]),\n \"gene\": exon_info[\"ensembl_gene_id\"],\n \"transcript\": exon_info[\"ensembl_transcript_id\"],\n \"ens_exon_id\": exon_info[\"ensembl_exon_id\"],\n \"exon_chrom_start\": exon_info[\"exon_start\"],\n \"exon_chrom_end\": exon_info[\"exon_end\"],\n \"strand\": exon_info[\"strand\"],\n \"rank\": exon_info[\"exon_rank\"],\n }\n try:\n exon[\"5_utr_start\"] = int(exon_info.get(\"utr_5_start\"))\n except (ValueError, TypeError):\n exon[\"5_utr_start\"] = None\n\n try:\n exon[\"5_utr_end\"] = int(exon_info.get(\"utr_5_end\"))\n except (ValueError, TypeError):\n exon[\"5_utr_end\"] = None\n\n try:\n exon[\"3_utr_start\"] = int(exon_info.get(\"utr_3_start\"))\n except (ValueError, TypeError):\n exon[\"3_utr_start\"] = None\n\n try:\n exon[\"3_utr_end\"] = int(exon_info.get(\"utr_3_end\"))\n except (ValueError, TypeError):\n exon[\"3_utr_end\"] = None\n\n # Recalculate start and stop (taking UTR regions into account for end exons)\n if exon[\"strand\"] == 1:\n # highest position: start of exon or end of 5' UTR\n # If no 5' UTR make sure exon_start is allways choosen\n start = max(exon[\"exon_chrom_start\"], exon[\"5_utr_end\"] or -1)\n # lowest position: end of exon or start of 3' UTR\n end = min(exon[\"exon_chrom_end\"], exon[\"3_utr_start\"] or float(\"inf\"))\n elif exon[\"strand\"] == -1:\n # highest position: start of exon or end of 3' UTR\n start = max(exon[\"exon_chrom_start\"], exon[\"3_utr_end\"] or -1)\n # lowest position: end of exon or start of 5' UTR\n end = min(exon[\"exon_chrom_end\"], exon[\"5_utr_start\"] or float(\"inf\"))\n\n exon[\"start\"] = start\n exon[\"end\"] = end\n exon_id = \"-\".join([str(exon[\"chrom\"]), str(start), str(end)])\n exon[\"exon_id\"] = exon_id\n\n if start > end:\n raise ValueError(\"ERROR: %s\" % exon_id)\n\n yield exon", "def read(f):\n \n if isinstance(f, basestring):\n # If the input is a string, treat as file name\n with open(f) as fh: # Ensure file is closed\n return read(fh) # Call again with file object\n \n # First line contains the date\n date = f.readline()\n if not date:\n raise IOError(\"Cannot read from input file \"+str(filename))\n \n # Second is description\n desc = f.readline()\n \n token = file_numbers(f)\n \n # Third contains number of mesh points\n try:\n npsi = int(token.next())\n ntheta = int(token.next())\n isym = int(token.next())\n except StopIteration:\n raise IOError(\"Unexpected end of file while reading grid size\")\n except ValueError:\n raise IOError(\"Third line should contain npsi, ntheta and isym\")\n \n # Check values\n if (isym < 0) or (isym > 1):\n raise IOError(\"isym must be either 0 or 1\")\n if (npsi < 1) or (ntheta < 1):\n raise IOError(\"Invalid npsi=\"+str(npsi)+\" or ntheta=\" + str(ntheta))\n \n # Read normalisation factors\n\n try:\n rcnt = float(token.next())\n xma = float(token.next())\n zma = float(token.next())\n btor = float(token.next())\n curtot = float(token.next())\n eaxe = float(token.next())\n dnorm = float(token.next())\n except:\n raise IOError(\"Couldn't read normalisation factors\")\n \n def read_array(n, name=\"Unknown\"):\n data = np.zeros([n])\n try:\n for i in np.arange(n):\n data[i] = float(token.next())\n except:\n raise IOError(\"Failed reading array '\"+name+\"' of size \", n)\n return data\n\n def read_2d(nx, ny, name=\"Unknown\"):\n data = np.zeros([nx, ny])\n for i in np.arange(nx):\n data[i,:] = read_array(ny, name+\"[\"+str(i)+\"]\")\n return data\n\n # Read 1D arrays\n psiflux = read_array(npsi, \"psiflux\")\n fnorm = read_array(npsi, \"fnorm\")\n ffpnorm = read_array(npsi, \"ffpnorm\")\n ponly = read_array(npsi, \"ponly\")\n pponly = read_array(npsi, \"pponly\")\n qsf = read_array(npsi, \"qsf\")\n d = read_array(npsi, \"d\")\n \n dpdz = read_array(ntheta, \"dpdz\")\n dpdr = read_array(ntheta, \"dpdr\")\n \n # 2D arrays\n \n xnorm = read_2d(ntheta, npsi, \"xnorm\")\n znorm = read_2d(ntheta, npsi, \"znorm\")\n \n # Try to read Br and Bz (may be present)\n try:\n Br = read_2d(ntheta, npsi, \"Br\")\n Bz = read_2d(ntheta, npsi, \"Bz\")\n except:\n Br = Bz = None\n \n ny = ntheta\n\n if isym == 1:\n # Fill in values for up-down symmetric case\n print(\"Grid is up-down symmetric. Reflecting grid about midplane\")\n ny = tsize = 2*(ntheta - 1) + 1\n \n def reflect(data, mapfunc = lambda x:x):\n \"\"\" Reflect a variable about midplane\n Optionally supply a mapping function\"\"\"\n data2 = np.zeros([tsize, npsi])\n # Copy the original data\n for i in np.arange(ntheta):\n data2[i,:] = data[i,:]\n # Now fill in the remainder\n for i in np.arange(ntheta, tsize):\n t0 = tsize - 1 - i\n data2[i,:] = mapfunc(data[t0,:])\n return data2\n \n xnorm = reflect(xnorm)\n znorm = reflect(znorm, lambda x: 2.*zma - x) # Reflect about zma\n if Br != None:\n Br = reflect(Br, lambda x:-x) # Br reverses\n if Bz != None:\n Bz = reflect(Bz) # Bz remains the same\n theta = tsize\n\n # Make sure we have Br, Bz and Bpol\n\n if (Br == None) or (Bz == None):\n # Calculate Bpol from psi then Br and Bz from Bpol\n # Use dpsi = R*Bp dx (for now)\n Bpol = np.zeros([ny, npsi])\n \n def deriv(f):\n n = np.size(f)\n dfdi = np.zeros(n)\n dfdi[1:-1] = (f[2:n] - f[0:-2])/2. # Central difference in the middle\n dfdi[0] = f[1] - f[0]\n dfdi[-1] = f[-1] - f[-2]\n return dfdi\n \n for i in np.arange(ntheta):\n drdi = deriv(xnorm[i, :])\n dzdi = deriv(znorm[i, :])\n dldi = sqrt(drdi**2 + dzdi**2) # Arc length\n dpsidi = deriv(psiflux)\n \n Bpol[i, :] = dpsidi / (dldi * xnorm[i,:])\n else:\n Bpol = np.sqrt(Br**2 + Bz**2)\n \n # Calculate toroidal field\n Btor = fnorm / xnorm\n \n #########################################\n # Create a dictionary of values to return\n # \n # Need to transpose 2D arrays to [psi, theta] \n # to be consistent with elite inputs\n \n var = {\"npsi\":npsi, \"npol\":ny, # Sizes\n \n \"psi\":psiflux,\n \"f(psi)\":fnorm,\n \"p\":ponly,\n \n \"R\": np.transpose(xnorm),\n \"Z\": np.transpose(znorm),\n\n \"Bp\":np.transpose(Bpol),\n \"Bt\":np.transpose(Btor),\n\n \"q\":qsf,\n\n \"ffprime\":ffpnorm,\n \"pprime\":pponly}\n\n if Br != None:\n var['Br'] = np.transpose(Br)\n if Bz != None:\n var['Bz'] = np.transpose(Bz)\n \n return var", "def read_data_set():\n # shapes of datasets -- [] means expanded form:\n # - X: J\n # - net.R: J [x J x 1]\n # - F_DIST: J x J x num_features\n # - F_DIST_w1: J x J x num_features\n # - w['except_first'][-1]: (last weights) J x num_features [x 1]\n # - w['except_first'][1:-1]: (second to last weights) J x J x num_features\n # - first weights **were** also J x J x num_features\n # - w['first_for_r']: J x 1 x num_features\n\n read_X()\n read_weights(read_FDIST())", "def read_1D_comsol_data(self):\n x=[]\n y=[]\n with open(self.file, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n x.append(row[0])\n y.append(row[1])\n x = np.asarray((x),dtype=float)\n y = np.asarray((y),dtype=float)\n return x,y", "def read():\n try:\n #Open and parse input files.\n nodeFile = open(sys.argv[1], 'r')\n edgeFile = open(sys.argv[2], 'r')\n\t\n parse_nodes(nodeFile)\n parse_edges(edgeFile)\n nodeFile.close()\n\tedgeFile.close()\n\treturn \n except:\n print 'problem parsing input'\n #Put here some more information - usage...", "def _read_input_file(self):\n file_type = 'np.array'\n with open(self._file_properties['file_name'], 'r') as in_file:\n for line in in_file.readlines():\n if line[0:5] == '$$SOE':\n file_type = 'Horizons'\n break\n\n if not isfile(self._file_properties['file_name']):\n msg = 'Horizons files {:} does not exists.'\n message = msg.format(self._file_properties['file_name'])\n raise FileExistsError(message)\n if file_type == 'Horizons':\n self._read_horizons_file()\n else:\n (time, x, y, z) = np.loadtxt(\n self._file_properties['file_name'],\n usecols=(0, 1, 2, 3), unpack=True)\n self._time = time\n if int(astropy_version[0]) >= 4:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation_type='cartesian')\n else:\n self._xyz = SkyCoord(x=x, y=y, z=z,\n representation='cartesian')", "def read():\n # TODO", "def ReadSalome(self, filename, element_type=\"tri\", read_surface_info=False):\n\n if element_type == \"line\":\n el = \"102\"\n bel = \"\"\n elif element_type == \"tri\":\n el = \"203\"\n bel = \"102\"\n elif element_type == \"quad\":\n el = \"204\"\n bel = \"102\"\n elif element_type == \"tet\":\n el = \"304\"\n bel = \"203\"\n elif element_type == \"hex\":\n el = \"308\"\n bel = \"204\"\n\n if read_surface_info is True and element_type == \"line\":\n warn(\"No surface info for lines. I am going to ignore this\")\n read_surface_info = False\n\n\n with open(filename,'r') as f:\n lines = f.readlines()\n\n info = lines[0].rstrip().split()\n\n self.nnode = int(info[0])\n all_nelem = int(info[1])\n\n nodes = lines[1:self.nnode+1]\n\n points = []\n for line in nodes:\n points.append([float(i) for i in line.rstrip().split()[1:4]])\n self.points = np.array(points,copy=True)\n self.nnode = self.points.shape[0]\n\n edges, faces, elements = [], [], []\n for counter in range(self.nnode+1,len(lines)):\n line = lines[counter].rstrip().split()\n if read_surface_info:\n if bel == line[1]:\n faces.append([int(i) for i in line[2:]])\n if el == line[1]:\n elements.append([int(i) for i in line[2:]])\n\n self.element_type = element_type\n self.elements = np.array(elements,dtype=np.int64,copy=True) - 1\n self.nelem = self.elements.shape[0]\n if self.nelem == 0:\n raise ValueError(\"file does not contain {} elements\".format(element_type))\n\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def read_record(self, file_, num_evo_entries,get_data,mode='dist',return_seq=True):\n # This method and the Switch Class were taken from the original ProteinNet repo.\n # arg is a open file, num_evo_entries is 20 by default\n #\n # Strip the dict and insert lists for each of the types of entries\n desc_dict = utils.load_obj('desc_dict.pkl')\n desc_dict_rev = {int(self.aa_dict[k]):v for (k,v) in desc_dict.items()}\n\n # this will be stripped soon.\n if get_data == 'all':\n get_data = ['id','primary','evolutionary','secondary','tertiary','mask']\n aa_dict = {'A': '0', 'C': '1', 'D': '2', 'E': '3', 'F': '4', 'G': '5', 'H': '6', 'I': '7', 'K': '8', 'L': '9', 'M': '10', 'N': '11', 'P': '12', 'Q': '13', 'R': '14', 'S': '15', 'T': '16', 'V': '17', 'W': '18', 'Y': '19'}\n _mask_dict = {'-': '0', '+': '1'}\n while True:\n next_line = file_.readline()\n for case in switch(next_line):\n if case('[ID]' + '\\n'):\n if 'id' in get_data:\n id_ = file_.readline()[:-1]\n \n elif case('[PRIMARY]' + '\\n'):\n if 'primary' in get_data: \n prim = file_.readline()[:-1]\n primary = self.letter_to_num(prim, self.aa_dict)\n \n \n elif case('[EVOLUTIONARY]' + '\\n'):\n if 'evo' in get_data:\n evolutionary = []\n for residue in range(num_evo_entries): evolutionary.append(np.asarray([float(step) for step in file_.readline().split()]))\n evolutionary = np.array(evolutionary)\n evolutionary = evolutionary.T # this will turn evo into an array of shape (-1, 20) Fuck yeah\n \n elif case('[TERTIARY]' + '\\n'):\n if 'tert' in get_data:\n tertiary = []\n for axis in range(3): tertiary.append([float(coord) for coord in file_.readline().split()])\n \n elif case('[MASK]' + '\\n'):\n if 'mask' in get_data:\n mask = file_.readline()[:-1]\n mask = self.letter_to_num(mask, _mask_dict)\n \n # ends reading a Single record\n elif case('\\n'):\n # perform preprocessing\n if 0 in mask:\n return -1\n if len(primary) > self.seq_cutoff:\n return -1\n prop = utils.make_prop_array(primary,desc_dict_rev)\n x = np.concatenate([tc(primary,num_classes=20),evolutionary,prop],axis=1)\n tertiary = dhc.get_backbone_coords(np.array(tertiary))\n if mode == 'dih':\n y = dhc.fix_array(dhc.get_phi_psi(tertiary))\n y = y.astype('float32')\n elif mode =='dist':\n y = self.pairwise_distance(tertiary)\n elif mode == 'zmat':\n tertiary = tertiary.reshape((1,-1,3,3))\n tertiary = tertiary[:,:,1,:].reshape((1,-1,1,3)).astype('float32')/100\n dist = dhc.calc_dist_vector(tertiary).numpy().reshape((1,-1,1))\n ang = np.radians(dhc.calc_angle_vector(tertiary).numpy().reshape((1,-1,1)))\n dih = dhc.calc_dihedral_vector(tertiary).numpy()\n y = np.concatenate([dist,ang,dih],axis=-1)\n elif mode == 'tert':\n #tertiary = tertiary.reshape((1,-1,3,3))\n tertiary = tertiary.reshape((-1,3,3))\n #tertiary = tertiary[:,:,1,:].reshape((1,-1,1,3)).astype('float32')/100\n #y = self.pairwise_distance(tertiary)\n return [x.astype('float32',copy=False), tertiary.astype('float32',copy=False), np.asarray(id_), primary]\n if return_seq:\n return [x.astype('float32',copy=False), y.astype('float32',copy=False), np.asarray(id_), tertiary.astype('float32',copy=False), primary]\n else:# if anything changes, i will be replacing this with a more pythonic way soon enough\n # if I really need tertiary structure at anytime, i just code it her\n return [x.astype('float32',copy=False), y.astype('float32',copy=False), np.asarray(id_)]\n \n elif case(''):\n return None", "def read(self, epsg=None):\n if self.data is None:\n self.process.compute()\n self.data = self.process.output.data\n out_data = self.data\n if epsg and self.get_epsg() != epsg:\n out_data = reproject(self.data, epsg)\n return out_data", "def main():\n args = get_args()\n\n entries = []\n\n noe_dim = \"h1\" if args.hch else \"c1\" # save the name of the noe dimension\n\n with open(args.sparkylist) as lines:\n\n lines = lines.readlines()\n # lines = set(lines) # remove duplicate lines\n peak = 1\n for idx, line in enumerate(lines):\n idx = idx + 1\n\n try:\n label, c1, c2, h2, intensity, *rest = line.split()\n\n c1 = float(c1) # convert these to floats\n c2 = float(c2)\n h2 = float(h2)\n intensity = float(intensity)\n\n label = f\"peak{peak}\"\n peak += 1\n\n except ValueError:\n print(f\"invalid NOE definition on line {idx}\")\n continue\n\n dic = {\"label\": label, noe_dim: c1,\n \"c2\": c2, \"h2\": h2, \"intensity\": intensity}\n\n entries.append(dic)\n\n # create dataframe and write out\n csv = pd.DataFrame(entries)\n order = [\"label\", noe_dim, \"c2\", \"h2\", \"intensity\"]\n csv.to_csv(args.output, columns=order, index=False)", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def _read_file(self) -> Tuple[np.ndarray, h5py.File]:\n assert os.path.exists(self.datafile)\n LOGGER.info(f\"Found datafile: {self.datafile}\")\n\n # get ELM indices from datafile\n hf = h5py.File(self.datafile, \"r\")\n LOGGER.info(f\"Number of ELM events in the datafile: {len(hf)}\")\n elm_index = np.array([int(key) for key in hf], dtype=np.int32)\n return elm_index, hf", "def read_2D_comsol_data(self):\n x=[]\n y=[]\n z=[]\n with open(self.file, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n x.append(row[0])\n y.append(row[1])\n z.append(row[2])\n x = np.asarray((x),dtype=float)\n y = np.asarray((y),dtype=float)\n z = np.asarray((z),dtype=float)\n return x,y,z", "def readWaveform(self):\n # prepare data holder\n y = [ 0 for j in range(4) ]\n # in case of previous errors\n self.flushInput()\n for ch in self.chs:\n # mostly for TDS\n self.setCh(ch)\n # calibration factor we will need soon\n (vmult, voff) = self.calibV()\n # read and calibrate data\n data = (numpy.array(self.readData()) - voff) * vmult\n # This is from the formula in TDS manual, without the\n # \"vzero\" in it---I couldn't figure out when that wouldn't\n # be exactly zero.\n y[ch-1]=data[:]\n\n (hstep, hoff) = self.calibH()\n # initialize time array\n t = numpy.array(range(len(y[0])))\n t = (t * hstep) + hoff\n\n # update the sequence number (... for isUpdated())\n self.seq = self.readSeq()\n\n return (t, y)", "def _get_econt_info(self, out_log):\n f = open_general(out_log)\n tmptxt = f.readlines()\n f.close()\n econt = {}\n itmp = search_string('[read_energy] number of energy points', tmptxt)\n if itmp>=0: econt['Nepts'] = int(tmptxt.pop(itmp).split()[-1])\n itmp = search_string('energies and weights are:', tmptxt)\n if itmp>=0:\n tmp = []\n for ie in range(econt['Nepts']):\n tmpline = tmptxt[itmp+4+ie].split()[1:]\n tmp.append([float(tmpline[0]), float(tmpline[1]), float(tmpline[2]), float(tmpline[3])])\n tmp = array(tmp)\n econt['epts'] = tmp[:,:2]\n econt['weights'] = tmp[:,2:]\n econt['emin'] = tmp[0,0]\n return econt", "def readPubTator(args):\n if not os.path.exists('/'.join(args.output_file.split('/')[:-1])):\n os.makedirs('/'.join(args.output_file.split('/')[:-1]))\n\n abstracts = OrderedDict()\n entities = OrderedDict()\n relations = OrderedDict()\n\n with open(args.input_file, 'r') as infile:\n for line in tqdm(infile):\n\n # text\n if len(line.rstrip().split('|')) == 3 and \\\n (line.strip().split('|')[1] == 't' or line.strip().split('|')[1] == 'a'):\n line = line.strip().split('|')\n\n pmid = line[0]\n text = line[2] # .replace('>', '\\n')\n\n # replace weird symbols and spaces\n text = replace2symbol(text)\n text = replace2space(text)\n\n if pmid not in abstracts:\n abstracts[pmid] = [TextStruct(pmid, text)]\n else:\n abstracts[pmid] += [TextStruct(pmid, text)]\n\n # entities\n elif len(line.rstrip().split('\\t')) == 6:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n\n # currently consider each possible ID as another entity\n for k in kb_id:\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [k], -1, [], [])]\n\n elif len(line.rstrip().split('\\t')) == 7:\n line = line.strip().split('\\t')\n pmid = line[0]\n offset1 = int(line[1])\n offset2 = int(line[2])\n ent_name = line[3]\n ent_type = line[4]\n kb_id = line[5].split('|')\n extra_ents = line[6].split('|')\n\n # replace weird symbols and spaces\n ent_name = replace2symbol(ent_name)\n ent_name = replace2space(ent_name)\n for i, e in enumerate(extra_ents):\n if pmid not in entities:\n entities[pmid] = [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n else:\n entities[pmid] += [EntStruct(pmid, ent_name, offset1, offset2, ent_type, [kb_id[i]], -1, [], [])]\n\n # relations\n elif len(line.rstrip().split('\\t')) == 4:\n line = line.strip().split('\\t')\n pmid = line[0]\n rel_type = line[1]\n arg1 = tuple((line[2].split('|')))\n arg2 = tuple((line[3].split('|')))\n\n if pmid not in relations:\n relations[pmid] = [RelStruct(pmid, rel_type, arg1, arg2)]\n else:\n relations[pmid] += [RelStruct(pmid, rel_type, arg1, arg2)]\n\n elif line == '\\n':\n continue\n\n return abstracts, entities, relations" ]
[ "0.59410346", "0.567626", "0.5638466", "0.55620134", "0.55526614", "0.5529649", "0.54932714", "0.54829437", "0.5475036", "0.5426845", "0.5419794", "0.5365769", "0.5346987", "0.5330866", "0.5302111", "0.52904403", "0.5289346", "0.527683", "0.5269207", "0.5269175", "0.52437896", "0.52366996", "0.5210274", "0.5207666", "0.51970017", "0.51861376", "0.51619816", "0.51554906", "0.51498806", "0.5148883", "0.514553", "0.5141779", "0.51336795", "0.51228446", "0.5119803", "0.511237", "0.5107044", "0.5102768", "0.50953084", "0.50946134", "0.50920856", "0.50803953", "0.5077382", "0.50719017", "0.50682896", "0.50659347", "0.5057592", "0.50512135", "0.5038282", "0.50350326", "0.5031053", "0.50220376", "0.5015904", "0.501049", "0.5009224", "0.499767", "0.49864975", "0.49799168", "0.49603194", "0.49585485", "0.4956867", "0.49548838", "0.49530014", "0.49419314", "0.49375027", "0.49354318", "0.49321705", "0.49295288", "0.492834", "0.49245706", "0.49207258", "0.49206236", "0.49152392", "0.4913053", "0.49120188", "0.49055758", "0.49030536", "0.49017242", "0.49006736", "0.48972288", "0.48913255", "0.48904252", "0.48825255", "0.4879674", "0.48667866", "0.48657885", "0.4858678", "0.48529387", "0.48478162", "0.48477572", "0.4844443", "0.48434407", "0.48346314", "0.4833389", "0.48307407", "0.48287186", "0.48283052", "0.4826438", "0.48232758", "0.48203254" ]
0.55652803
3
trace finds the line, the filename and error message and returns it to the user
def trace(): import traceback tb = sys.exc_info()[2] tbinfo = traceback.format_tb(tb)[0] # script name + line number line = tbinfo.split(", ")[1] # Get Python syntax error # synerror = traceback.format_exc().splitlines()[-1] return line, __file__, synerror
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror", "def __call__(self, line):\n marker = self.marker\n stripped_line = line.strip()\n if marker == stripped_line:\n assert not self.traceback_section\n self.traceback_section = True\n # print(\"XXX: TRACEBACK-START\")\n elif self.traceback_section:\n matched = self.file_pattern.match(line)\n if matched:\n # matched_range = matched.regs[1]\n filename = matched.groups()[0]\n new_filename = posixpath_normpath(filename)\n if new_filename != filename:\n # print(\"XXX: %r => %r\" % (filename, new_filename))\n line = line.replace(filename, new_filename)\n elif not stripped_line or line[0].isalpha():\n # -- DETECTED TRCAEBACK-END: exception-description\n # print(\"XXX: TRACEBACK-END\")\n self.traceback_section = False\n return line", "def traceback(self):", "def format_backtrace(trace):\n backtrace = []\n for filename, line, func, _ in traceback.extract_tb(trace):\n desc = {'file': filename,\n 'line': line,\n 'function': func,\n 'text': _}\n backtrace.append(desc)\n return backtrace", "def gettrace(): # real signature unknown; restored from __doc__\n pass", "def trace(context=1):\r\n return getinnerframes(sys.exc_info()[2], context)", "def trace(self, trace=...):\n ...", "def report(self, line: int, where: str, message: str):\n output = f'[line {line}] Error{where}: {message}'\n print(output, file=sys.stderr)\n self.had_error = True", "def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()", "def _exceptionStackTTB(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s]\" % (sourcefile,line,function)\n else:\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n #endIf\n else:\n if (not stack):\n # Leave out the newline for the bottom line on the stack\n stack = \"\\t%s(%s) [%s] - %s\" % (sourcefile,line,function,text)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endIf\n #endFor\n stack = \"\\tFrame stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting frame stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n\n try:\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"\\t%s(%s) [%s]\\n%s\" % (sourcefile,line,function,stack)\n else:\n stack = \"\\t%s(%s) [%s] - %s\\n%s\" % (sourcefile,line,function,text,stack)\n #endIf\n #endFor\n stack = \"\\tException stack (most recent call first):\\n%s\" % stack\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"\\tException getting exception stack. Type: %s, Value: %s\\n%s\" % (exc_type,exc_value,stack)\n #endTry\n \n # At the very top - put the exception string\n stack = \"\\t%s\\n%s\" % (exc,stack)\n \n return stack", "def _parse_traceback(self, trace):\n p_traceback = [ \"%s:%d:in `%s'\" % (filename, lineno, funcname) \n for filename, lineno, funcname, _\n in traceback.extract_tb(trace) ]\n p_traceback.reverse()\n\n return p_traceback", "def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )", "def StandViz_ReportError( errorobj, args, Header = None ): # error reporting and traceback function\n (MyPath, MyFile) = os.path.split( args[0] ) # retrieve filename and path of running python script\n (MyBaseName, MyExt) = os.path.splitext( MyFile ) # separate basefilename from extension\n errorfilename = \"{}.txt\".format(MyBaseName) # create new error filename based on base of script filename\n ERRFILE = open( errorfilename, 'w' ) # open text file for writting\n if( Header != None ): ERRFILE.write( '%s\\n' % Header ) # if Header defined, write Header to file\n ERRFILE.write( \"Error running '{}'\\n\".format(MyFile) ) # write error message with filename\n MyTrace = errorobj[2] # retrieve error object\n while( MyTrace != None ): # loop through stack trace\n (line, file, name) = ( MyTrace.tb_lineno, MyTrace.tb_frame.f_code.co_filename, MyTrace.tb_frame.f_code.co_name ) # extract line, file, and error name\n F = open( file, 'r' ) # open source file of Python script\n L = F.readlines() # read scripot source into memory\n F.close() # close script file\n code = L[line-1].strip() # extract line of source code that caused error\n ERRFILE.write( \" File '{}', line {}, in {}\\n {}\\n\".format(file, line, name, code) ) # write filename, source code line, error name, and error code\n MyTrace = MyTrace.tb_next # step to next level of call stack trace\n ERRFILE.write( \"errorobj: {}\\n\".format(errorobj) ) # write error object and arguments for call\n ERRFILE.write( \"Calling Argument Vector: {}\\n\".format(args) ) # write calling arguments\n ERRFILE.close() # close text file with error stack trace\n os.system( \"notepad.exe {}\".format(errorfilename) ) # display error log file with notepad.exe", "def localTraceback(self, alwaysPrint = False):\n self.log( \"DEBUG TRACEBACK: \" )\n for line in traceback.format_stack():\n self.logPre( line, alwaysPrint )", "def lineno():\n return \"line \" + str(inspect.currentframe().f_back.f_lineno) + \": \"", "def _exceptionStackBTT(self,methodName,exc,depth=10):\n stack = \"\"\n # Reconstruct the call stack from where the trace of the exception was initiated by invoking \n # Trace.error() or Trace.severe().\n stackList = traceback.extract_stack()\n try:\n stack = \"\\tFrame stack (most recent call last):\\n\"\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n if (sourcefile.endswith(\"Trace.py\") and (function == \"error\" or function == \"severe\")): break\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else:\n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\n\\tException getting frame stack. Type: %s, Value: %s\" % (stack,exc_type,exc_value)\n #endTry\n \n try:\n stack = \"%s\\tException stack (most recent call last):\\n\" % stack\n tb = sys.exc_info()[2]\n stackList = traceback.extract_tb(tb,depth)\n for stackData in stackList:\n sourcefile,line,function,text = stackData\n sepIndex = sourcefile.rfind(os.sep)\n if (sepIndex >=0 and Trace.SourceFileStyle == Trace.NameOnly):\n sourcefile = sourcefile[sepIndex+1:]\n #endIf\n if (text == None):\n stack = \"%s\\t%s(%s) [%s]\\n\" % (stack,sourcefile,line,function)\n else: \n stack = \"%s\\t%s(%s) [%s] - %s\\n\" % (stack,sourcefile,line,function,text)\n #endIf\n #endFor\n except:\n # This shouldn't happen, but in case it does...\n exc_type,exc_value = sys.exc_info()[:2]\n stack = \"%s\\tException getting exception stack. Type: %s, Value: %s\\n\" % (stack,exc_type,exc_value)\n #endTry\n\n # At the very end - put the exception string\n stack = \"%s\\t%s\" % (stack,exc)\n \n return stack", "def lineno():\n return str(' - Principal - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def find_traceback_start(self):\n ### FILL IN ###", "def lineno():\n linenum = inspect.currentframe().f_back.f_lineno\n frameinfo = inspect.getframeinfo(inspect.currentframe())\n filename = frameinfo.filename\n return str(\"File: \" + str(filename) + \" Line: \" + str(linenum))", "def tidy_error(ex=None) -> str:\r\n from sys import exc_info\r\n from os.path import join, abspath, dirname\r\n from traceback import extract_tb, format_list, format_exception_only\r\n\r\n show = join(dirname(abspath(__file__)), '')\r\n\r\n def _check_file(name):\r\n return name and name.startswith(show)\r\n\r\n def _print(typ, value, tb): # If not debug, generator expression: filter trace to my files.\r\n show = extract_tb(tb) if DEBUG else (fs for fs in extract_tb(tb, limit=3) if _check_file(fs.filename))\r\n fmt = format_list(show) + format_exception_only(typ, value)\r\n return ''.join((f.strip('\"\\'').replace('\\\\n', '') for f in fmt))\r\n\r\n args = ex or exc_info()\r\n return _print(*args)", "def lineno():\n return str(' - SecurityGroupIngressPortRangeRule - caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def tb():\n etype, value, tb = sys.exc_info()\n return \"%s: %s (%s@%s:%d)\" % (etype.__name__, value, tb.tb_frame.f_code.co_name, os.path.basename(tb.tb_frame.f_code.co_filename), tb.tb_lineno)", "def exceptionTraceback(self, alwaysPrint = False):\n self.logPre( traceback.format_exc(), alwaysPrint )", "def trace_function(frame, event, arg):\n co = frame.f_code\n func_name = co.co_name\n if func_name == 'write':\n # Ignore write() calls from print statements\n return\n filename = co.co_filename\n if event == 'call':\n # decend into the stack...\n return trace_function\n elif event == 'return':\n if isinstance(arg, basestring) and 'inputlocator' in filename.lower() and not func_name.startswith('_'):\n results_set.add((func_name, arg))\n # print('%s => %s' % (func_name, arg))\n return", "def debug(line):\n sys.stderr.write(line + \"\\n\")\n sys.stderr.flush()", "def trace(string):\n if trace_enabled:\n print(string)", "def lineno():\n return str(' - RDSInstanceMasterUserPasswordRule- caller: '+str(inspect.stack()[1][3])+' - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def error(*args, noContext: bool=True, showLineNumber: bool=True, **kwargs)->None:\n pass", "def _debug_calc_error(self, line):\n debug(\"RPN Calculator Error: %s\" % line)", "def _print_caller(self):\n import traceback\n print '\\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()])", "def _log(self,methodName,eventType,msg,exc=None): \n stackDump = None\n threadId = (\"%x\" % thread.get_ident()).rjust(12).replace(\" \",\"0\")\n traceString = \"%s %s %s %s\" % (self._getTimeStamp(),threadId,eventType,self.entityName)\n traceString = \"%s(%s) %s\" % (traceString,self._sourceLineNumber(),methodName)\n \n if (exc):\n # Get stack dump now to keep it with msg text\n if (Trace.StackTraceStyle == Trace.TopToBottom):\n stackDump = self._exceptionStackTTB(methodName,exc)\n elif (Trace.StackTraceStyle == Trace.BottomToTop):\n stackDump = self._exceptionStackBTT(methodName,exc)\n else:\n raise TraceConfigurationException(\"'%s', is not a valid stack trace style. Expected one of %s\" % (Trace.StackTraceStyle,Trace.StackTraceStyles))\n #endIf\n #endIf\n \n # If a logFile was provided then send all trace to trace log\n if (Trace.traceFile):\n # file IO is not thread safe\n # The Jython 2.1 that comes with WAS doesn't support the \"with\" statement.\n try:\n Trace.traceFileLock.acquire()\n if (stackDump):\n Trace.traceFile.write(\"%s : %s\\n%s\\n\" % (traceString, msg, stackDump))\n else:\n Trace.traceFile.write(\"%s : %s\\n\" % (traceString, msg))\n #endIf\n Trace.traceFile.flush()\n finally:\n Trace.traceFileLock.release()\n #endTry\n #endIf\n \n if (not Trace.traceFile or eventType in [\"S\", \"E\", \"W\", \"I\"]):\n # Send severe, error, warning and info trace to stdout\n if (stackDump):\n print \"%s : %s\\n%s\" % (traceString, msg, stackDump)\n else:\n print \"%s : %s\" % (traceString, msg)\n #endIf\n #endIf", "def _errpos(self, fpos):\r\n filename, string = self._includestack[-1]\r\n return filename, srow(string, fpos), scol(string, fpos)", "def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION", "def _get_debug_text(self, text):\n\n func = inspect.currentframe().f_back.f_back.f_code\n return \"{}: Function {} in {}:{}\".format(text, func.co_name, os.path.basename(func.co_filename), func.co_firstlineno)", "def get_line(cls, frame, sys_context=None):\n\t\tcode = cls._dispatch_frame(frame)\n\n\t\tif not code: \n\t\t\treturn ''\n\t\t\n\t\treturn code.splitlines()[frame.f_lineno]", "def trace(self, frame, event, arg):\n if event == \"call\":\n if frame.f_code.co_filename.startswith(\"<memory/\"):\n return self.tracerobot\n else:\n return None\n return trace", "def print_error(error: str) -> None:\n # We get the 2nd item in the call stack to find where this function is called from\n frame = inspect.stack()[1]\n # We get the module of the caller\n module = inspect.getmodule(frame[0])\n if module is None:\n filename = \"Unknown\"\n else:\n # We get the file of the module\n filepath = module.__file__\n # We get the relative file path of the module\n filename = os.path.relpath(filepath)\n # We print the info with some color\n rich.print(f\"[bold underline red]ERROR:[/] [blue]({filename})[/] : {error}\")", "def getframeinfo(frame, context=1):\r\n if istraceback(frame):\r\n lineno = frame.tb_lineno\r\n frame = frame.tb_frame\r\n else:\r\n lineno = frame.f_lineno\r\n if not isframe(frame):\r\n raise TypeError('{!r} is not a frame or traceback object'.format(frame))\r\n\r\n filename = getsourcefile(frame) or getfile(frame)\r\n if context > 0:\r\n start = lineno - 1 - context//2\r\n try:\r\n lines, lnum = findsource(frame)\r\n except IOError:\r\n lines = index = None\r\n else:\r\n start = max(start, 1)\r\n start = max(0, min(start, len(lines) - context))\r\n lines = lines[start:start+context]\r\n index = lineno - 1 - start\r\n else:\r\n lines = index = None\r\n\r\n return Traceback(filename, lineno, frame.f_code.co_name, lines, index)", "def logline(msg):\n print msg", "def lineno():\n return str(' - Statement - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def exception_handler(exctype, val, trace):\n logger.info(\n ''.join(traceback.format_exception(exctype, val, trace)))", "def log_trace(self, msg):\n self.log(msg, level=LOG_TRACE)", "def trace_line(self, frame, event, arg):\n\n self.active_frame = frame\n\n self.base_trace(frame, event, arg)", "def trace(self, module, message):\n if self.log_level <= consts.LOG_LEVEL_TRACE:\n print(\"TRACE : %s: %s\" % (module, message))", "def get_lineage_trace(self) -> str:\n # TODO why do we not want to store the results? The execution script will should stay the same\n # therefore we could cache the result.\n raise NotImplementedError", "def print_debug(e):\n print '1', e.__doc__\n print '2', sys.exc_info()\n print '3', sys.exc_info()[0]\n print '4', sys.exc_info()[1]\n print '5', traceback.tb_lineno(sys.exc_info()[2])\n _, _, tb = sys.exc_info()\n print '6', traceback.print_tb(tb)", "def _trace(self, message: str, level: Level = Level.DEBUG, exact: bool = False) -> None:\n if level > self._verbosity:\n return\n if exact:\n if level == self._verbosity:\n stderr(message)\n return\n if level <= Level.ERROR:\n stderr(\"error: %s\\n\" % message)\n elif level == Level.INFO:\n stderr(\"info: %s\\n\" % message)\n elif level >= Level.DEBUG:\n stderr(\"debug: %s\\n\" % message)", "def user_line(self, frame):\r\n if \"__exc_tuple__\" in frame.f_locals:\r\n del frame.f_locals['__exc_tuple__']\r\n\r\n if self._wait_for_mainpyfile:\r\n if (self.mainpyfile != self.canonic(frame.f_code.co_filename)\r\n or frame.f_lineno <= 0):\r\n return\r\n self._wait_for_mainpyfile = False\r\n self.bottom_frame = frame\r\n\r\n if self.get_break(self.canonic(frame.f_code.co_filename), frame.f_lineno):\r\n self.current_bp = (\r\n self.canonic(frame.f_code.co_filename), frame.f_lineno)\r\n else:\r\n self.current_bp = None\r\n self.ui.update_breakpoints()\r\n\r\n self.interaction(frame)", "def trace(fn):\n @functools.wraps(fn)\n def wrapped(*args, **kwds):\n global PREFIX\n reprs = [repr(e) for e in args]\n reprs += [repr(k) + '=' + repr(v) for k, v in kwds.items()]\n log('{0}({1})'.format(fn.__name__, ', '.join(reprs)) + ':')\n PREFIX += ' '\n try:\n result = fn(*args, **kwds)\n PREFIX = PREFIX[:-4]\n except Exception as e:\n log(fn.__name__ + ' exited via exception')\n PREFIX = PREFIX[:-4]\n raise\n # Here, print out the return value.\n log('{0}({1}) -> {2}'.format(fn.__name__, ', '.join(reprs), result))\n return result\n return wrapped", "def record_line(self, frame, event, arg): # pylint: disable=unused-argument\n if event == 'line':\n if self.prev_timestamp:\n runtime = time.time() - self.prev_timestamp\n self.lines.append([self.prev_path, self.prev_lineno, runtime])\n self.prev_lineno = frame.f_lineno\n self.prev_path = frame.f_code.co_filename\n self.prev_timestamp = time.time()\n return self.record_line", "def my_err_handler(traceback, exec_info):\n print \"Custom function invoked\"\n print \"Formatted exception\"\n print traceback.format_exc()\n print \"System exec info\"\n print exec_info\n exp_type, exp_value, exp_traceback = exec_info\n print \"String formatted exception\"\n print traceback.format_exception(exp_type, exp_value, exp_traceback)\n print \"End of custom function\"", "def configureTrace(self,traceString):\n configureTrace(traceString)", "def do_read_trace(self, arg):\n try:\n results = self.phil.read_trace()\n except KeyError as exc:\n print('Could not parse argument {}'.format(exc))\n except (TypeError, ValueError, SyntaxError) as exc:\n print(exc)\n else:\n if len(results) == 0:\n return\n headers = ['time', 'diff', 'source_diff', 'source', 'event']\n table_data = []\n diffs = []\n for event in results[\"data\"]:\n row_data = []\n for key_name in headers:\n if key_name == 'diff':\n diffs.append(event[key_name])\n row_data.append(event[key_name])\n table_data.append(row_data)\n print(tabulate(table_data, headers=headers, floatfmt=\".9f\"))\n\n try:\n if len(diffs) > 1:\n diffs = diffs[1:]\n print(\"\\nDifference Stats\")\n print(\" min: {:.9f}\".format(min(diffs)))\n print(\" max: {:.9f}\".format(max(diffs)))\n print(\" mean: {:.9f}\".format(sta.mean(diffs)))\n print(\" median: {:.9f}\".format(sta.median(diffs)))\n print(\" stdev: {:.9f}\".format(sta.stdev(diffs)))\n print(\"variance: {:.9f}\".format(sta.variance(diffs)))\n except ValueError:\n pass", "def trace(self, *args, **kwargs): # real signature unknown\n pass", "def get_trace_string(self):\n return (\"%s -> %s(0x%s) addr:0x%s\" %\n (self.instr_str, self.rd, self.rd_val, self.addr))", "def lineno():\n return str(' - IpAddr - line number: '+str(inspect.currentframe().f_back.f_lineno))", "def stack_trace(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:", "def showtraceback(self,exc_tuple = None):\n\n # Though this won't be called by syntax errors in the input line,\n # there may be SyntaxError cases whith imported code.\n if exc_tuple is None:\n type, value, tb = sys.exc_info()\n else:\n type, value, tb = exc_tuple\n if type is SyntaxError:\n self.showsyntaxerror()\n else:\n sys.last_type = type\n sys.last_value = value\n sys.last_traceback = tb\n self.InteractiveTB()\n if self.InteractiveTB.call_pdb and self.has_readline:\n # pdb mucks up readline, fix it back\n self.readline.set_completer(self.Completer.complete)", "def filename_line(skip: int = 2) -> Tuple[str, int]:\n stack = inspect.stack()\n start = skip\n parentframe = stack[start][0]\n\n filename = 'N/A'\n module = inspect.getmodule(parentframe)\n if module:\n filename = os.path.basename(os.path.realpath(module.__file__))\n\n return filename, parentframe.f_lineno", "def _get_caller_detail(n=2):\n if not _show_caller_details:\n return None\n s = inspect.stack()[:n + 1]\n try:\n frame = s[n]\n try:\n return frame[1]\n # WARNING(dhellmann): Using frame.lineno to include the\n # line number in the return value causes some sort of\n # memory or stack corruption that manifests in values not\n # being cleaned up in the cfgfilter tests.\n # return '%s:%s' % (frame[1], frame[2])\n finally:\n del frame\n finally:\n del s", "def with_traceback(self, tb): # real signature unknown; restored from __doc__\n pass", "def trace(msg):\n import datetime\n print('[{:%Y-%m-%d %H:%M:%S}]: '.format(datetime.datetime.now()) + msg)", "def instrument_fail(self, req, where):\n\n if where in req[\"file_details\"][\"backend_filename\"]:\n raise Exception(\"Instrumented Failure: %s\" % where)", "def _clean_onerror(func, path, excinfo):\n print(\"%s encountered error when processing %s: %s\" % (func, path, excinfo))", "def text(eparams, context=5):\n import os\n import types\n import time\n import traceback\n import linecache\n import inspect\n import pydoc\n\n etype, evalue, etb = eparams\n if isinstance(etype, types.ClassType):\n etype = etype.__name__\n pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable\n date = time.ctime(time.time())\n head = \"%s\\n%s\\n%s\\n\" % (str(etype), pyver, date) + '''\nA problem occurred in a Python script. Here is the sequence of\nfunction calls leading up to the error, in the order they occurred.\n'''\n\n frames = []\n records = inspect.getinnerframes(etb, context)\n for frame, file, lnum, func, lines, index in records:\n file = file and os.path.abspath(file) or '?'\n args, varargs, varkw, locals = inspect.getargvalues(frame)\n call = ''\n if func != '?':\n call = 'in ' + func + \\\n inspect.formatargvalues(args, varargs, varkw, locals,\n formatvalue=lambda value: '=' + pydoc.text.repr(value))\n\n highlight = {}\n\n def reader(lnum=[lnum]):\n highlight[lnum[0]] = 1\n try:\n return linecache.getline(file, lnum[0])\n finally:\n lnum[0] += 1\n vars = scanvars(reader, frame, locals)\n\n rows = [' %s %s' % (file, call)]\n if index is not None:\n i = lnum - index\n for line in lines:\n num = '%5d ' % i\n rows.append(num + line.rstrip())\n i += 1\n\n done, dump = {}, []\n for name, where, value in vars:\n if name in done:\n continue\n done[name] = 1\n if value is not __UNDEF__:\n if where == 'global':\n name = 'global ' + name\n elif where == 'local':\n name = name\n else:\n name = where + name.split('.')[-1]\n dump.append('%s = %s' % (name, pydoc.text.repr(value)))\n else:\n dump.append(name + ' undefined')\n\n rows.append('\\n'.join(dump))\n frames.append('\\n%s\\n' % '\\n'.join(rows))\n\n exception = ['%s: %s' % (str(etype), str(evalue))]\n if isinstance(evalue, types.InstanceType):\n for name in dir(evalue):\n value = pydoc.text.repr(getattr(evalue, name))\n exception.append('\\n%s%s = %s' % (\" \" * 4, name, value))\n\n return head + ''.join(frames) + ''.join(exception) + '''\n\nThe above is a description of an error in a Python program. Here is\nthe original traceback:\n\n%s\n''' % ''.join(traceback.format_exception(etype, evalue, etb))", "def parse_line_err(self, match):\n self.line = match.group(2)\n self.filename = match.group(1)\n self.message = match.group(4)\n self.keyword = match.group(3)\n\n self.fix_filename()\n self.fix_namespaces()\n self.fix_nonerrors()\n\n if self.message.strip() == \"ld returned 1 exit status\":\n self.error_type = \"Linking Error\"\n self.signal_eof = True\n return \"\"\n\n return_value = \"\"\n if self.filename is not None:\n return_value += \"{}:\".format(self.filename)\n if self.line is not None:\n return_value += \"{}: \".format(self.line)\n if self.keyword is not None:\n return_value += \"{}: \".format(self.keyword)\n if self.message is not None:\n return_value += self.message\n\n if return_value != \"\":\n return_value += \"\\n\"\n return return_value", "def _extract_thread_stack_trace(\n self, thread: str, lines: List[str]\n ) -> Optional[List[str]]:\n thread_str = f\"Thread {thread} \"\n i: int = 0\n while i < len(lines) and thread_str not in lines[i]:\n i += 1\n if i != len(lines) and thread_str in lines[i]:\n j: int = i\n while j < len(lines) and lines[j] != \"\\n\":\n j += 1\n start = i - 1\n end = j\n return lines[start:end]\n return None", "def find_backtrace(self):\n return [ft for ft in os.listdir(self.output_dir)\n if os.path.isfile(ft) and ft.startswith(\"Backtrace.\")]", "def getLineInformation(line):\n \n pass", "def trace(msg, minLevel=1):\n global verbose\n if verbose >= minLevel:\n tracePrint(msg)", "def dbtrace_show_output(trace_object, output_file):\n\n pass", "def handle_line(line):\n print line,\n\n line_parts = log_re.match(line).groupdict()\n action = line_parts['action']\n attacker = line_parts['attacker']\n\n (cmd, status, output) = interact(line_parts, method=opts.method)\n\n if status > 0:\n warnings.warn('IP %s (%s)' % (attacker, output), InteractionWarning)\n print '\\t%s FAILURE' % attacker\n else:\n print '\\t%s SUCCESS' % attacker\n \n print", "def print_error(self, msg, line_num=False, errorFunc=SystemError):\n if line_num is False: line_num = self.line_num\n bad_line_ind = self.line_nums[line_num]\n\n err_msg = \"\\n\\n\\n############ ERROR #############\\n\"\n err_msg += \"Error in input_file '%s'\\n\\n---\\n\" % self.inp_filename\n err_msg += msg.strip(\"\\n\")\n err_msg += \"\\n---\\n\\nline number: %i\\n\" % self.line_nums[line_num]\n err_msg += f\"line: '{self.file_ltxt_orig[bad_line_ind]}'\"\n err_msg += \"\\n\"\n err_msg += f\"err id: {self.E_str}\"\n err_msg += \"\\n#################################\\n\\n\"\n raise errorFunc(err_msg)", "def traceback(self):\r\n clean = self.raw_traceback\r\n lines = ['Traceback (most recent call last):\\n']\r\n lines += traceback.format_list(clean)\r\n msg = str(self.error)\r\n lines += traceback.format_exception_only(self.exc_info[0], msg)\r\n return ''.join(lines)[:-1]", "def degsOutput(err, globalNameSpace):\n lineNumber = err.lineNumber\n columnNumber = err.columnNumber\n err.msg = '\\n' + err.msg + '\\n'\n print(err.msg, file=sys.stderr)\n if not lineNumber == None:\n positionReference = [\"Error caused at line %(lineNumber)i\" % locals()]\n if not columnNumber == None:\n positionReference.append(\", column %(columnNumber)i\" % locals())\n positionReference.append(\":\\n\")\n positionReference.append(globalNameSpace['inputScript'].splitlines(True)[lineNumber-1])\n if not columnNumber == None:\n positionReference.append(\" \"*(columnNumber-1) + \"^~~ here.\")\n print(''.join(positionReference) + '\\n', file=sys.stderr)\n if err.element:\n print(\"In element: \" + err.element.userUnderstandableXPath(), file=sys.stderr)\n else:\n print(\"Unknown element. Please report this error to %s\" % globalNameSpace['bugReportAddress'], file=sys.stderr)", "def parse_stack_trace(self, it, line):\n events = []\n stack_traces = []\n\n while self.stack_trace_re.match(line):\n event = self.parse_stack_trace_line(line)\n if event:\n events.append(event)\n\n stack_traces.append(line)\n line = get_next(it)\n\n events.reverse()\n\n return stack_traces, events, line", "def log_line(self, line):\n print '%s%s' % (LOG_LINE_PREFIX, line)", "def stacktrace(self):\n stacktrace = self.StacktraceParser().Parse(\n self._raw_stacktrace,\n self._dependency_analyzer.regression_version_deps,\n signature=self.signature, top_n_frames=self._top_n_frames)\n if not stacktrace:\n logging.warning('Failed to parse the stacktrace %s',\n self._raw_stacktrace)\n return stacktrace", "def ProcessLine(fn, filename, file_extension, clean_lines, line,\n include_state, function_state, nesting_state, error,\n extra_check_functions=[]):\n fn(filename, file_extension, clean_lines, line,\n include_state, function_state, nesting_state,\n makeErrorFn(error, [], [r'(.*)should be indented \\+1 space inside(.*)']),\n extra_check_functions=[])", "def settrace(function): # real signature unknown; restored from __doc__\n pass", "def handle_awful_failure(fail_text):\r\n if g.debug:\r\n import sys\r\n s = sys.exc_info()\r\n # reraise the original error with the original stack trace\r\n raise s[1], None, s[2]\r\n try:\r\n # log the traceback, and flag the \"path\" as the error location\r\n import traceback\r\n g.log.error(\"FULLPATH: %s\" % fail_text)\r\n g.log.error(traceback.format_exc())\r\n return redditbroke % fail_text\r\n except:\r\n # we are doomed. Admit defeat\r\n return \"This is an error that should never occur. You win.\"", "def myHandleError(self, record):\n if raiseExceptions:\n ei = sys.exc_info()\n try:\n traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)\n except IOError:\n pass # see issue 5971\n finally:\n del ei\n raise", "def myHandleError(self, record):\n if raiseExceptions:\n ei = sys.exc_info()\n try:\n traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)\n except IOError:\n pass # see issue 5971\n finally:\n del ei\n raise", "def log_diagnostics(self, paths):\n\t\tpass", "def dispatch_line(self, frame):\n if self.stop_here(frame) or self.break_here(frame):\n self.user_line(frame)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch", "def error(self, value='', line_before=False):\n self.errors += 1\n if line_before:\n print('\\n')\n print(Fore.RED + '!!! ' + value)", "def log_error(e):\n\tprint(e)", "def log_error(e):\n\tprint(e)", "def log_error(self, fmt, *args):\r\n pass\r\n # log_error\r", "def _show_err(self, msg, lineno, lexpos):\n # get the entire string we just tried to parse\n data = self.lexerObj.lexer.lexdata\n s = data.split('\\n')\n\n col = _find_column(data, lexpos)\n line = s[lineno-1]\n\n leader = 3*' '\n print \"-\"*72\n print \"cvx4py error on line %s:\" % lineno\n print leader, \"\"\">> %s \"\"\" % line.strip()\n print leader, \" \" + (\" \"*(col-1)) + \"^\"\n print\n print \"ERROR:\", msg\n print \"-\"*72", "def format_debug(e):\n _, _, tb = sys.exc_info()\n return '1: {doc} \\n2: {exec_info} \\n3: {exec_0} \\n 4: {exec_1} \\n5: {lineno} \\n6: {stack}'.format(\n doc=e.__doc__,\n exec_info=sys.exc_info(),\n exec_0=sys.exc_info()[0],\n exec_1=sys.exc_info()[1],\n lineno=traceback.tb_lineno(sys.exc_info()[2]),\n stack=traceback.print_tb(tb))", "def get_err_source_info(original_traceback=None) -> dict:\n try: # carefully try to get the actual place where the error happened\n if not original_traceback:\n original_traceback = sys.exc_info()[2] # class, exc, traceback\n first_call = traceback.extract_tb(original_traceback)[-1]\n return dict(\n src_module=first_call[0],\n src_linenr=first_call[1],\n src_func=first_call[2],\n src_code=first_call[3],\n )\n except Exception as e:\n current_app.warning(\n \"I was unable to retrieve error source information: %s.\" % str(e)\n )\n return dict(module=\"\", linenr=0, method=\"\", src_code=\"\")", "def format_h5_backtrace(self, backtrace=None): # reliably restored by inspect\n pass", "def show_error(self):\n logging.error('=> ', self.test_script_source.line)\n total_len = 0\n i = 0\n if self.test_script_source.current_pos <= self.test_script_source.total_num_seg:\n if i < self.test_script_source.current_pos - 1:\n total_len = total_len + len(self.test_script_source.line_segments[i])\n i += 1\n else:\n total_len = len(self.test_script_source.line)\n\n if self.test_script_source.current_pos > 1:\n logging.error('=> ', (' ' * (total_len + 1)) + '^')\n else:\n logging.error('=> ', (' ' * total_len) + '^')", "def get_caller_context(depth=None, **kwarg):\r\n if TIK_ERROR_MSG.api_source_info is not None:\r\n return TIK_ERROR_MSG.api_source_info\r\n if depth is None:\r\n raise RuntimeError(\"There are two reasons for the error:\\n\"\r\n \"If it is called by the user, please register source\"\r\n \" info before entering decorators;\\n\"\r\n \"If it is an internal call, please specify \"\r\n \"the stack depth;\")\r\n additional_stack = kwarg.get('stack_depth', 0)\r\n depth += additional_stack\r\n if ERROR_MSG_LEVEL.err_msg_level == 0:\r\n caller = stack(depth)\r\n else:\r\n caller = current_frame(depth)\r\n return caller", "def get_addr2line(traceback, binfile=\"\", search_dirs=[], shlib_db=None):\n tokens = traceback.split(\"+\")\n afile = tokens[0]\n thefile = \"\"\n if not afile:\n thefile = binfile\n else:\n if shlib_db and afile in shlib_db:\n thefile = shlib_db[afile]\n else:\n thefile = find_shlib(g_search_dirs, afile)\n verbose(\"decoding traceback: \" + traceback + \" file: \" + afile + \" => \" + str(thefile), LEVEL_1)\n if not thefile or not os.path.exists(thefile):\n verbose(\"Failed to decode because \" + afile + \" and \" + str(thefile) + \" do not exist!\", LEVEL_1)\n return traceback + \"\\n\"\n offset = tokens[1]\n elf_type = get_elf_type(thefile)\n verbose(thefile + \" elf_type: \" + elf_type, LEVEL_1)\n if elf_type == \"EXEC\":\n # Calculate absolute address for EXEC type binary, which is then fed to addr2line\n base_addr = get_elf_load_base_addr(thefile)\n verbose(\"The LOAD base address or the rounded down entry address is: \" + hex(base_addr), LEVEL_1)\n offset = hex(int(offset, 0) + base_addr)\n #print (\"the absolute address is: \" + offset)\n addr2line_prog = get_config_value(\"addr2line\")\n if not addr2line_prog:\n addr2line_prog = \"addr2line\"\n cmd = addr2line_prog + \" -f -i -e \" + cmd_quote(thefile) + \" \" + offset + \" || true\"\n verbose(\"The traceback decode cmd is: \" + cmd, LEVEL_1)\n output = subprocess.check_output(cmd, shell=True, universal_newlines=True, stderr=open(os.devnull, 'w'))\n return output", "def addExceptionMessage(self, q, inst, traceback):\n self.fail('FAIL: Exception raised: %s' % inst)\n self.addMessage('')\n for line in traceback.format_exc().split('\\n'):\n self.addMessage(line)", "def showDebugSource(self, fn, line):\n if not fn.startswith('<'):\n self.openSourceFile(fn, line)\n self.setFileLine(fn, line)", "def on_error(self, exception):\n traceback.print_exc()", "def report_next_line_error(\n self, context, column_number, line_number_delta=0, extra_error_information=None\n ):\n context.add_triggered_rule(\n context.scan_file,\n context.line_number + line_number_delta,\n column_number,\n self.get_details().plugin_id,\n self.get_details().plugin_name,\n self.get_details().plugin_description,\n extra_error_information,\n )" ]
[ "0.7392801", "0.7135186", "0.6529018", "0.63906014", "0.6296253", "0.6159555", "0.6074369", "0.6055162", "0.6053701", "0.60512197", "0.60511047", "0.60449284", "0.6025194", "0.5981327", "0.5965538", "0.592666", "0.5902943", "0.5831507", "0.580618", "0.5780158", "0.57784814", "0.57647014", "0.5753568", "0.5750067", "0.5749855", "0.5743602", "0.57275677", "0.57223684", "0.57033956", "0.57029104", "0.5697544", "0.5690092", "0.568533", "0.56794834", "0.56768554", "0.56722647", "0.56659317", "0.5658137", "0.56529206", "0.5645151", "0.5626171", "0.56129295", "0.56121814", "0.56075066", "0.5604556", "0.5601082", "0.55999154", "0.5582503", "0.5544587", "0.55318636", "0.552164", "0.55152524", "0.5492879", "0.54899037", "0.54861367", "0.54846215", "0.5483101", "0.5479823", "0.5477309", "0.54702896", "0.54634887", "0.54480475", "0.54472756", "0.54454714", "0.544275", "0.544184", "0.54409266", "0.5438819", "0.5426612", "0.54259", "0.5423951", "0.5410504", "0.54096586", "0.5409225", "0.5399472", "0.53978556", "0.5396943", "0.53968686", "0.5394926", "0.53930175", "0.53849334", "0.5378057", "0.5378057", "0.53736264", "0.53674847", "0.5367169", "0.5365625", "0.5365625", "0.53635585", "0.53543454", "0.5348614", "0.5345943", "0.5341363", "0.5340771", "0.5337111", "0.53295404", "0.53262603", "0.5325475", "0.53171486", "0.53160095" ]
0.7310826
1
Validates and ensures output workspace exists
def validate_workspace(wrksp): try: if wrksp.lower().endswith('.gdb') and \ os.path.isdir(wrksp) == False: return arcpy.CreateFileGDB_management(out_folder_path=os.path.dirname(wrksp), out_name=os.path.basename(wrksp))[0] elif wrksp.lower().endswith('.sde') and \ os.path.isfile(wrksp) == False: raise ValueError("SDE workspace must exist before using it.") elif os.path.isdir(wrksp) == False: os.makedirs(wrksp) return wrksp else: return wrksp except: line, filename, synerror = trace() raise FunctionError( { "function": "validate_workspace", "line": line, "filename": filename, "synerror": synerror, "arc" : str(arcpy.GetMessages(2)) } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_missing_output_workspace(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['output_workspaces'] = {}\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertFalse(results['is_valid'])\n self.assertEqual(len(results['errors']), 1)\n self.assertEqual(results['errors'][0]['name'], 'MISSING_WORKSPACE')", "def check_output(self):\n directory, file = split(self.target)\n if not exists(directory):\n mkdir(directory)\n if exists(self.target):\n unlink(self.target)", "def test_invalid_output_workspace(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['output_workspaces'] = {\n 'default': 'bad_name'\n }\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertFalse(results['is_valid'])\n self.assertEqual(len(results['errors']), 1)\n self.assertEqual(results['errors'][0]['name'], 'INVALID_WORKSPACE')", "def test_create_workspace():\n\n bambi = create_test_bambi()\n bambi.create_workspace(bambi.name)\n success = check_workspace_existance(bambi)\n\n assert success", "def make_sure_path_exists(out_path):\n try:\n os.makedirs(out_path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n print \"Errors in output folder path! please change the output path or analysis name\\n\"\n exit()", "def test_deprecated_output_workspace(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['output_workspaces'] = {\n 'default': 'inactive'\n }\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'DEPRECATED_WORKSPACE')", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def test_init_does_not_create(self):\n self.logger.info(\"STEP: Initialize the workspace library.\")\n self.workspace = Workspace(Mock())\n\n self.logger.info(\n \"STEP: Verify that the workspace library did not create a workspace \"\n \"directory.\"\n )\n self.assertFalse(Path.cwd().joinpath(\"workspace\").exists())", "def test_not_ready_if_insufficient_output_space(self):\n self.command.package = self.input_ovf\n\n self.command.ui.default_confirm_response = False\n # Make working directory requirements negligible but output huge\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertFalse(ready)\n self.assertRegex(reason, \"Insufficient disk space available\"\n \" to guarantee successful output\")\n\n # User can opt to continue anyway\n self.command.ui.default_confirm_response = True\n self.command._cached_disk_requirements.clear()\n with mock.patch.object(self.command,\n \"working_dir_disk_space_required\",\n return_value=0), \\\n mock.patch.object(self.command.vm,\n 'predicted_output_size',\n return_value=(1 << 60)):\n ready, reason = self.command.ready_to_run()\n\n self.assertTrue(ready)", "def check_workspace ():\n\n try:\n ex (\"cd $DOC_ROOT/ACE_TAO && git pull -p\")\n print (\"Successfully updated ACE/TAO working copy\")\n except:\n print (\"Unable to update ACE/TAO workspace at \" + doc_root)\n raise\n\n try:\n ex (\"cd $DOC_ROOT/MPC && git pull -p\")\n print (\"Successfully updated MPC working copy to revision \")\n except:\n print (\"Unable to update the MPC workspace at \" + doc_root + \"/ACE/MPC\")\n raise\n\n vprint (\"Repos root URL = \" + opts.repo_root + \"\\n\")\n vprint (\"Repos MPC root URL = \" + opts.mpc_root + \"\\n\")", "def __manage_output_folder(self):\n if not os.path.exists(self.output_folder):\n os.makedirs(self.output_folder)", "def test_output_exists():\n global out_dir, cor_dir\n assert(path.exists(path.join(out_dir, 'oshea_similarity.json')))", "def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def validate(cls, output_destination):\n # nothing to check :)\n pass", "def _check_save_directory_path(self):\n if self.save_directory_path is not None:\n if os.path.exists(self.save_directory_path):\n raise ValueError(\n 'You must provide non-existing save output directory, '\n '{} given.'.format(self.save_directory_path))\n else:\n os.makedirs(self.save_directory_path)", "def validate(self):\n self.__log('Validating whether all conditions are met.')\n if not self.config['OUT_FOLDER'] or not self.config['OUTPUT_FOLDER']:\n self.__log('The path to the output folder cannot be found.', 'error')\n raise FileNotFoundError\n\n try:\n if '.' in self.output_filename:\n self.__log('The output filename should not contain an extension.', 'error')\n raise ValueError\n except TypeError:\n pass\n\n if not self.output_filename:\n self.__log('The output filename has not been specified.', 'warning')\n self.output_filename = self.hash_time()\n i = 0\n while self.output_file_exists():\n self.__log('Adding a unique identifier to current filename.', 'warning')\n self.output_filename = self.output_filename + '-' + i\n i += 1\n self.__log(f'Continuing with file: \"{self.output_filename}\"', 'success')\n\n # Iterate over options to check for required parameters, as to not waste requests\n self.__log('Starting to check if all required parameters are set')\n for key, value in self.options.items():\n if key in self.config['REQUIRED_PARAMETERS'] and not value:\n self.__log(f'Missing a required parameter: {key}', 'error')\n raise MissingRequiredParameterError(key)\n\n self.__log('All validation successful.', 'success')", "def setup_molecule_output_check(exp_builder_db, mol_id, output_path):\n exp_builder_db._setup_molecules(mol_id)\n assert os.path.exists(output_path)\n assert os.path.getsize(output_path) > 0", "def env_check(self):\n b_status : bool = True\n str_error : str = \"no error\"\n if not os.path.exists(self.str_inputDir):\n b_status = False\n if self.toConsole():\n error.warn(self, 'inputDirFail', exitToOS = True, drawBox = True)\n str_error = 'error captured while accessing input directory'\n return {\n 'status' : b_status,\n 'error' : str_error\n }", "def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")", "def _validate_output_file_path(file_path: str):\n file_dir = os.path.dirname(file_path)\n if not os.path.isdir(file_dir):\n try:\n os.makedirs(file_dir)\n except Exception as e:\n utils.error(f\"Failed to create parent directory {file_dir} for file {file_path}. Reason: {e}\")\n if not os.access(file_dir, os.W_OK):\n utils.error(f\"Cannot write file: {file_path}. {file_dir} is not writeable.\")", "def check(self):\n super().check()\n\n # scratch directory\n if 'ORTHO' not in PATH:\n setattr(PATH, 'ORTHO', join(PATH.SCRATCH, 'ortho'))", "def init(self):\n\n self.checkDirectory(self.output_dir,\"output\")\n self.checkDirectory(self.working_dir,\"working\")", "def validate(self):\n variables = ['waterThickness', 'waterPressure']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')", "def _check_before_run(self):\n if not osp.exists(self.root):\n raise RuntimeError(\"'{}' is not available\".format(self.root))\n if not osp.exists(self.train_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.train_name_path))\n if not osp.exists(self.test_name_path):\n raise RuntimeError(\"'{}' is not available\".format(self.test_name_path))\n if not osp.exists(self.track_train_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_train_info_path))\n if not osp.exists(self.track_test_info_path):\n raise RuntimeError(\"'{}' is not available\".format(self.track_test_info_path))\n if not osp.exists(self.query_IDX_path):\n raise RuntimeError(\"'{}' is not available\".format(self.query_IDX_path))", "def check_out_dir_exists(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.split_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.split_dir))", "def check_config(outconfig):\n self.log.info(\"Checking if all the necessary files exist.\")\n\n # Perform necessary checks\n\n log.info(\"All necessary files exist for {} configuration.\".format(outconfig[\"Flavor\"]))\n\n return", "def __check_exist_path(self):\n if 'path_out' not in self.params:\n raise ValueError('missing \"path_out\" among parameters')\n self.params['path_out'] = update_path(self.params.get('path_out'))\n list_names = [n for n in self.params if any(m in n.lower() for m in ['path', 'dir', 'file'])]\n for n in list_names:\n p = os.path.abspath(os.path.expanduser(self.params[n]))\n if not os.path.exists(p):\n raise FileNotFoundError('given path/file/dir \"%s\" does not exist!' % p)\n self.params[n] = p\n for n in [n for n in self.params if 'exec' in n]:\n # in case you define executable in your home\n if os.path.expanduser(self.params[n]) != self.params[n]:\n self.params[n] = os.path.expanduser(self.params[n])", "def ensure_dirs_exists(self):\n os.makedirs(os.path.join(self.location, \"batches\"), exist_ok=True)\n os.makedirs(os.path.join(self.location, \"results\"), exist_ok=True)", "def sanity_check(loadfile, queryfile):\n if not os.path.exists(loadfile):\n print(\"Error: The path for the load csv file does not exist\")\n return False\n\n if not os.path.exists(queryfile): \n print(\"Error: The path for the query csv file does not exist\")\n return False\n\n\n \"\"\"\n if not os.path.exists(\"/plots\"):\n os.mkdir(\"/plots\")\n \"\"\" \n return True", "def check_outpath(self, outpath):\n if not os.path.isdir(outpath+str(self.ar_no)):\n ar_outpath = os.path.join(outpath,str(self.ar_no))\n ar_outpath_video = os.path.join(outpath,str(self.ar_no)+'_video')\n os.makedirs(ar_outpath)\n os.makedirs(ar_outpath_video)\n print(\"Path does not exist, create: \")\n print(ar_outpath)\n print(ar_outpath_video)", "def check_filesystem():\n if FLAGS.continue_run:\n # start a new run, set flag to continue, so there is nothing\n # check if something there, if not, create, but don't delete\n if not tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if not tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)\n else:\n # delete checkpoints and event summaries because training restarted\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'train'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'validation'))\n tf.gfile.MakeDirs(os.path.join(FLAGS.summaries_dir, 'test'))\n if tf.gfile.Exists(FLAGS.checkpoint_dir):\n tf.gfile.DeleteRecursively(FLAGS.checkpoint_dir)\n tf.gfile.MakeDirs(FLAGS.checkpoint_dir)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.data_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.data_dir))\n if not osp.exists(self.raw_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.raw_mat_path))\n if not osp.exists(self.split_new_det_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_det_mat_path))\n if not osp.exists(self.split_new_lab_mat_path):\n raise RuntimeError(\"'{}' is not available\".format(self.split_new_lab_mat_path))", "def check_some_assertions():\n if FLAGS.input_width is None:\n FLAGS.input_width = FLAGS.input_height\n if FLAGS.output_width is None:\n FLAGS.output_width = FLAGS.output_height\n\n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n if not os.path.exists(FLAGS.sample_dir):\n os.makedirs(FLAGS.sample_dir)", "def check_some_assertions():\n if FLAGS.input_width is None:\n FLAGS.input_width = FLAGS.input_height\n if FLAGS.output_width is None:\n FLAGS.output_width = FLAGS.output_height\n\n if not os.path.exists(FLAGS.checkpoint_dir):\n os.makedirs(FLAGS.checkpoint_dir)\n if not os.path.exists(FLAGS.sample_dir):\n os.makedirs(FLAGS.sample_dir)", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def flush_outputs():\n try:\n shutil.rmtree(ROOT_OUTPUT_DIR)\n print(\"Removed directory '{}'!\".format(ROOT_OUTPUT_DIR))\n return True\n except FileNotFoundError:\n print(\"Directory '{}' already removed!\".format(ROOT_OUTPUT_DIR))\n return False", "def test_set_output_invalid(self):\n # Nonexistent output location, regardless of package\n with self.assertRaises(InvalidInputError):\n self.command.output = \"/foo/bar/baz\"\n\n self.command.package = self.input_ovf\n # Nonexistent output location with package set\n with self.assertRaises(InvalidInputError):\n self.command.output = \"/foo/bar/baz.ova\"\n\n # Output to directory instead of file (currently unsupported)\n with self.assertRaises(InvalidInputError):\n self.command.output = self.temp_dir\n\n # Output to \"directory\" under a file\n with self.assertRaises(InvalidInputError):\n self.command.output = os.path.join(self.input_ovf, \"foo.ova\")", "def check_xshear_output(self):\n lens_nchunk=self['lens_conf']['nchunk']\n tilenames=scat.get_tilenames(self['source_conf']['scat_table'])\n\n ntile=len(tilenames)\n for lens_chunk in xrange(lens_nchunk):\n print(\" checking chunk: %d/%d\" % (lens_chunk+1, lens_nchunk))\n for i,tilename in enumerate(tilenames):\n # first check if this source catalog exists\n if self._scat_exists(tilename):\n job=XShearWQJob(self['run'],\n lens_chunk,\n tilename)\n info=job.get_info()\n if not os.path.exists(info['output_file']):\n print(\"missing output:\",info['output_file'])", "def sanity_check_step(self):\n custom_paths = {\n 'files': ['bin/bazel'],\n 'dirs': [],\n }\n super(EB_Bazel, self).sanity_check_step(custom_paths=custom_paths)", "def _validate_init_args(self):\n\n if self.outdir.exists() and not self.outdir.is_dir():\n raise Error(f\"path '{self.outdir}' already exists and it is not a directory\")\n\n # Ensure that results are compatible.\n rname, rver = self._refinfo[\"toolname\"], self._refinfo[\"toolver\"]\n for res in self.rsts:\n name, ver = res.info[\"toolname\"], res.info[\"toolver\"]\n if name != rname:\n raise Error(f\"the following test results are not compatible:\\n\"\n f\"1. {self._refres.dirpath}: created by '{rname}'\\n\"\n f\"2. {res.dirpath}: created by '{name}'\\n\"\n f\"Cannot put incompatible results to the same report\")\n if ver != rver:\n _LOG.warning(\"the following test results may be not compatible:\\n\"\n \"1. %s: created by '%s' version '%s'\\n\"\n \"2. %s: created by '%s' version '%s'\",\n self._refres.dirpath, rname, rver, res.dirpath, name, ver)\n\n # Ensure the report IDs are unique.\n reportids = set()\n for res in self.rsts:\n reportid = res.reportid\n if reportid in reportids:\n # Try to construct a unique report ID.\n for idx in range(1, 20):\n new_reportid = f\"{reportid}-{idx:02}\"\n if new_reportid not in reportids:\n _LOG.warning(\"duplicate reportid '%s', using '%s' instead\",\n reportid, new_reportid)\n res.reportid = new_reportid\n break\n else:\n raise Error(f\"too many duplicate report IDs, e.g., '{reportid}' is problematic\")\n\n reportids.add(res.reportid)\n\n if self.title_descr and Path(self.title_descr).is_file():\n try:\n with open(self.title_descr, \"r\") as fobj:\n self.title_descr = fobj.read()\n except OSError as err:\n raise Error(f\"failed to read the report description file {self.title_descr}: {err}\")\n\n for res in self.rsts:\n if res.dirpath.resolve() == self.outdir.resolve():\n # Don't create report in results directory, use 'html-report' subdirectory instead.\n self.outdir = self.outdir.joinpath(\"html-report\")", "def validate_run_results(input_file_parameters, dir_stack):\r\n prev_command_had_output_dir = True\r\n dir_stack_index = -1\r\n command_index = 0\r\n for current_command in input_file_parameters.commands:\r\n # Skip over SPLIT commands\r\n if current_command == 'SPLIT':\r\n continue\r\n\r\n command_index += 1\r\n\r\n if prev_command_had_output_dir:\r\n dir_stack_index += 1\r\n\r\n # Keep track of number of commands created in the current workflow step\r\n number_of_successful_commands = 0\r\n\r\n # Infer command type, parameters, input and output directories\r\n command_type, command_parameters = \\\r\n utils.parse_staplefile_command_line(current_command)\r\n in_dir = dir_stack[dir_stack_index]\r\n if command_type.require_output_dir:\r\n out_dir = dir_stack[dir_stack_index+1]\r\n prev_command_had_output_dir = True\r\n else:\r\n out_dir = in_dir\r\n prev_command_had_output_dir = False\r\n\r\n # Read files until command class finds no more valid input files\r\n number_of_potential_commands = 0\r\n while True:\r\n try:\r\n # The command instance is generated without exceptions if the\r\n # command execution has failed (i.e. expected output\r\n # file does not exist). Otherwise NewFileError is raised.\r\n current_command = command_type(command_parameters, in_dir, out_dir)\r\n except STAPLERerror.NewFileExists:\r\n number_of_successful_commands += 1\r\n number_of_potential_commands += 1\r\n continue\r\n except STAPLERerror.VirtualIOError:\r\n break\r\n number_of_potential_commands += 1\r\n\r\n # Print validation results\r\n if not number_of_successful_commands:\r\n print '{0} command (step number {1}) has not been run.' \\\r\n .format(command_type.name, command_index)\r\n continue\r\n if number_of_successful_commands == number_of_potential_commands:\r\n print '{0} command (step number {1}) has been run succesfully.' \\\r\n .format(command_type.name, command_index)\r\n else:\r\n print '{0} command (step number {1}) workflows have failed {2}/{3} times' \\\r\n .format(command_type.name, command_index,\r\n number_of_potential_commands - number_of_successful_commands,\r\n number_of_potential_commands)", "def ensure_out_dir(out_dir):\n if not os.path.isdir(out_dir):\n os.makedirs(out_dir)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.query_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n if not osp.exists(self.gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def __validate(self):\n self.report(self.name).receive_info_from_gui(self.tournament.get())\n msg.showinfo(title=None, message=self.message_path_to_folder)\n self.master.master.launch()\n self.master.destroy()", "def current_dir_ok() :\n\n current_ok = True\n\n current_dir = str(os.environ['PWD'])\n expected_dir = str(os.path.abspath(out_dir))\n\n if not current_dir == expected_dir :\n print \"ERROR current directory (%s) is not the output directory for the ntuples (%s)\"%(current_dir, expected_dir)\n current_ok = False\n\n return current_ok", "def _check_numpy_output(self, cwd):\n\n for ii, refname in enumerate(self.files):\n if self.forms[ii] == \"numpy\":\n try:\n ref_output = np.loadtxt(\n Path(cwd) / refname, usecols=self.usecol[ii]\n )\n except IOError:\n raise IOError(\n 'Please provide a reference properties output named \"{}\"'.format(\n refname\n )\n )\n except ValueError:\n raise ValueError(\n \"Please check ref_simulation.out in {}\".format(\n str((self.parent / cwd).absolute())\n )\n )\n\n fname = refname[4:]\n test_output = np.loadtxt(self.tmp_dir / fname, usecols=self.usecol[ii])\n\n try:\n np.testing.assert_allclose(\n test_output, ref_output, rtol=1.0e-7, atol=1.0e-15\n )\n # print(\"No anomaly during the regtest for {}\".format(refname))\n except AssertionError:\n raise AssertionError(\n \"ANOMALY: Disagreement between reference and {} in {}\".format(\n fname, str((self.parent / cwd).absolute())\n )\n )", "def test_outputs_not_created(self):\n one_process_workflow = \"\"\"file://B <- file://A\n echo A does not produce B\n \"\"\"\n process = run_first_process(one_process_workflow)\n assert process.success is False, process.error_message\n assert process.error_message.find(\"these resources should have been created\") >= 0, process.error_message\n assert process.error_message.find(\"* file://B\") >= 0, process.error_message", "def test_make_output_folder_blank_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=\"\"),\n \"Should get back an empty string for an output \"\n \"folder specified as ''\")", "def sanity_check_step(self):\n custom_paths = {\n 'files':[\"%s/%s\" % (self.bindir, x) for x in [\"convert\", \"cplex\", \"cplexamp\"]],\n 'dirs':[],\n }\n super(EB_CPLEX, self).sanity_check_step(custom_paths=custom_paths)", "def test_make_output_folder_undefined_path(self):\n test_object = Maic()\n expected_result = \"\"\n self.assertEqual(expected_result,\n test_object.make_output_folder(output_folder=None),\n \"Should get back an empty string for an undefined \"\n \"output folder\")", "def validate(ctx):\n handler = ValidateCommandHandler(ctx.obj['qa_dir'])\n exit(0 if handler.validate() else 1)", "def workspace_exists(client, workspace):\n data = {\"workspace\": workspace}\n return client._creoson_post(\"windchill\", \"workspace_exists\", data, \"exists\")", "def _check_before_run(self):\n\t\tif not osp.exists(self.dataset_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n\t\tif not osp.exists(self.train_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n\t\tif not osp.exists(self.query_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.query_dir))\n\t\tif not osp.exists(self.gallery_dir):\n\t\t\traise RuntimeError(\"'{}' is not available\".format(self.gallery_dir))", "def check_dest_root(self):\n dest_root = self.view.folder_line.text()\n if not os.path.isdir(dest_root):\n try:\n os.makedirs(dest_root)\n except (WindowsError, TypeError):\n self.view.message.setText('Please input a valid folder path.')\n return False\n return True", "def test_invalid_workspace(self):\n self.assertFalse(\n cifuzz.build_fuzzers(\n EXAMPLE_PROJECT,\n 'oss-fuzz',\n 'not/a/dir',\n commit_sha='0b95fe1039ed7c38fea1f97078316bfc1030c523',\n ))", "def test_failedCommandProvidesOutput(self):\n bookTeX = FilePath(self.mktemp() + \".tex\")\n builder = BookBuilder()\n inputState = bookTeX.parent().children()\n exc = self.assertRaises(\n CommandFailed,\n builder.buildPDF,\n bookTeX, self.howtoDir, FilePath(self.mktemp()))\n self.assertTrue(exc.output)\n newOutputState = set(bookTeX.parent().children()) - set(inputState)\n self.assertEqual(len(newOutputState), 1)\n workPath = newOutputState.pop()\n self.assertTrue(\n workPath.isdir(),\n \"Expected work path %r was not a directory.\" % (workPath.path,))", "def prepare_run(input_path: str, output_path: str, tmp: str) -> None:\n input_file_exists(input_path)\n if os.path.isdir(output_path) and len(os.listdir(output_path)) != 0:\n raise AssertionError(\"output folder must be empty or non-existent.\")\n set_tempdir(tmp)\n os.makedirs(output_path, exist_ok=True)", "def ensure_data_folder_existence() -> None:\n folder_name = params.DATA_FOLDER_NAME\n if not folder_name in os.listdir('.'):\n os.mkdir(folder_name)", "def __init_output_folder():\n try:\n os.makedirs(Result.__json_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e", "def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))", "def ensure_exists(output_dir):\n try:\n makedirs(output_dir)\n except OSError:\n if not isdir(output_dir):\n raise", "def santityCheckInitialization(self):\r\n\r\n for obj in self.config[\"repos\"]:\r\n if not isdir(obj[\"path\"]):\r\n print(\"ERROR : Initialization Failed missing {} at path {}\".format(obj[\"name\"], obj[\"path\"]))", "def check_working_directory():\n if not os.path.exists(CUCKOO_ROOT):\n raise CuckooStartupError(\"You specified a non-existing root directory: %s\" % CUCKOO_ROOT)\n\n cwd = os.path.join(os.getcwd(), \"cuckoo.py\")\n if not os.path.exists(cwd):\n raise CuckooStartupError(\"You are not running Cuckoo from it's root directory\")", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.list_query_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_query_path))\n if not osp.exists(self.list_gallery_path):\n raise RuntimeError(\"'{}' is not available\".format(self.list_gallery_path))", "def check_for_setup_error(self):\n if self.share2nms:\n for nfs_share in self.share2nms:\n nms = self.share2nms[nfs_share]\n volume_name, dataset = self._get_share_datasets(nfs_share)\n if not nms.volume.object_exists(volume_name):\n raise LookupError(_(\"Volume %s does not exist in Nexenta \"\n \"Store appliance\"), volume_name)\n folder = '%s/%s' % (volume_name, dataset)\n if not nms.folder.object_exists(folder):\n raise LookupError(_(\"Folder %s does not exist in Nexenta \"\n \"Store appliance\"), folder)\n if (folder not in nms.netstorsvc.get_shared_folders(\n 'svc:/network/nfs/server:default', '')):\n self._share_folder(nms, volume_name, dataset)\n self._get_capacity_info(nfs_share)", "def test_raises_when_assignment_tests_directory_is_non_empty(\n self, tmp_path_factory, platform_url, workdir, rtd_path\n ):\n # arrange\n existing_assignment_dir = rtd_path / ASSIGNMENT_NAMES[0]\n existing_assignment_dir.mkdir(parents=True)\n\n # act/assert\n result = run_generate_rtd(\n base_url=platform_url, rtd=rtd_path, workdir=workdir\n )\n\n assert result.status == plug.Status.ERROR\n assert existing_assignment_dir.name in result.msg\n assert \"delete\" in result.msg", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.test_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.test_dir))", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.probe_gallery_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.probe_gallery_dir))", "def _VerifySDKEnvironment(self):\n # If the environment wasn't set up, then the output directory wouldn't be\n # created after 'gn gen'.\n # TODO: Make this check actually look at the environment.\n if not os.path.exists(self.out_board_dir):\n raise AssertionError('%s not created!' % self.out_board_dir)\n # Log args.gn for debugging.\n logging.info('ARGS.GN=\\n%s',\n osutils.ReadFile(os.path.join(self.out_board_dir, 'args.gn')))", "def initialized(self):\n return self.workspace == find_enclosing_workspace(self.workspace)", "def check_working_dir(directory):\n filepath = os.path.join(directory, \"text.txt\")\n try:\n open(filepath, \"w\")\n except IOError:\n sys.exit('Unable to write to directory {0} \\n Exiting Drupdates'.format(directory))\n return False\n return True", "def cleanOutputDir(output):\n if os.path.exists(output) and os.path.isdir(output):\n shutil.rmtree(output)", "def test_output_directory_with_space(self):\n temp_out_dir = \"xxmotif test\"\n input_file = self.copy_and_mark_for_cleanup(\"Fasta/f002\")\n\n try:\n XXmotifCommandline(outdir=temp_out_dir, seqfile=input_file)\n except ValueError:\n pass\n else:\n self.fail(\"expected ValueError\")", "def validate_full_design(self, name=None, outputdir=None, ports=None):\n if name is None:\n name = self.design_name\n if outputdir is None:\n outputdir = self.project_path\n\n self.logger.glb.info(\"#### Design Validation Checks###\")\n #\n # Routine outputs to the validation info to a log file in the project directory and also\n # returns the validation info to be used to update properties.xml file\n\n validation_ok = True\n\n #\n # Write an overall validation log file with all output from all checks\n # The design validation inside HFSS outputs to a separate log file which we merge into this overall file\n #\n val_list = []\n all_validate = outputdir + \"\\\\all_validation.log\"\n with open(all_validate, \"w\") as validation:\n\n # Desktop Messages\n msg = \"Desktop Messages:\"\n validation.writelines(msg + \"\\n\")\n val_list.append(msg)\n msgs = self._desktop.GetMessages(name, \"HFSSDesign1\", 0)\n # need to check if design name is always this default name HFSSDesign1\n for msg in msgs:\n self.logger.glb.info(msg)\n # msg = msg.replace('\"','')\n msg = msg.rstrip(\"\\r\\n\")\n val_list.append(msg)\n validation.writelines(msg + \"\\n\")\n\n # Run Design Validation and write out the lines to the logger\n\n ret = self._odesign.ValidateCircuit()\n msg = \"Design Validation Messages:\"\n validation.writelines(msg + \"\\n\")\n val_list.append(msg)\n if ret == 0:\n msg = \"**** ERRORS Present - please check and confirm\"\n self.logger.glb.error(msg)\n else:\n msg = \"**** Validation Completed Correctly\"\n self.logger.glb.info(msg)\n\n # Find the Excitations and check or list them out\n msg = \"Excitation Messages:\"\n validation.writelines(msg + \"\\n\")\n val_list.append(msg)\n numportsdefined = int(len(self.get_excitations_name))\n if ports is not None and ports != numportsdefined:\n msg = \"**** Port Number Error! - Please check model\"\n self.logger.glb.error(msg)\n validation.writelines(msg + \"\\n\")\n val_list.append(msg)\n validation_ok = False\n # need to stop the simulation athis point\n else:\n msg1 = \"Ports Requested: \" + str(ports)\n msg2 = \"Ports Defined: \" + str(numportsdefined)\n self.logger.glb.info(msg1)\n validation.writelines(msg1 + \"\\n\")\n val_list.append(msg1)\n self.logger.glb.info(msg2)\n validation.writelines(msg2 + \"\\n\")\n val_list.append(msg2)\n\n excitation_names = self.get_excitations_name\n for excitation in excitation_names:\n msg = \"Excitation name: \" + str(excitation)\n self.logger.glb.info(msg)\n validation.writelines(msg + \"\\n\")\n val_list.append(msg)\n validation.close()\n return val_list, validation_ok # return all the info in a list for use later", "def test_local_validation(tmpdir):\n with tmpdir.as_cwd():\n with pytest.raises(FileNotFoundError):\n validate_nagl_model_path(\"test.pt\")\n\n with open(\"test.pt\", \"w\") as f:\n f.write(\"test\")\n model_path = validate_nagl_model_path(\"test.pt\")\n assert os.path.exists(model_path)", "def prepare_supplemental_output_directory():\n output_dir = workspace_path('%s/%s' % (scenario_filename(), \"Supplemental Output Files\")) # this does not have the .db suffix\n output_args = ['--output-dir', output_dir] # to be returned and passed to adsm_simulation.exe\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n return output_args", "def check_grid(gridname,modeldirs=None):\n\n chgriddir(gridname)\n \n # guess the model directories \n modeldirs = get_modeldirs(modeldirs)\n \n for modeldir in modeldirs:\n if not os.path.isfile(modeldir+\"/finished.out\"):\n print(\"Model \"+modeldir+\" failed:\")", "def check_output_dir(args, expected_items=0):\n if (\n os.path.exists(args.output_dir)\n and len(os.listdir(args.output_dir)) > expected_items\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n f\"Output directory ({args.output_dir}) already exists and \"\n f\"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). \"\n \"Use --overwrite_output_dir to overcome.\"\n )", "def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})", "def __checkDestination(self):\n return os.path.exists(self.__targetPath)", "def sense(self):\n\n partition_folder = self.getPartitionFolder()\n log_folder = os.path.join(partition_folder, 'var/log')\n log_name = 'slapgrid-%s-error.log' % self.getConfig('partition-id')\n slapgrid_error_log_file = os.path.join(partition_folder, '.%s' % log_name)\n link_file = os.path.join(log_folder, log_name)\n monitor_url = self.getConfig('monitor-url')\n message = ''\n if os.path.exists(slapgrid_error_log_file) and \\\n os.stat(slapgrid_error_log_file).st_size:\n message = 'Buildout failed to process %s.' % self.getConfig('partition-id')\n if monitor_url:\n message += '\\nSee %s/log/%s for more information.' % (monitor_url, log_name)\n if not os.path.exists(link_file):\n os.symlink(slapgrid_error_log_file, link_file)\n else:\n if os.path.exists(link_file):\n os.unlink(link_file)\n\n if message:\n self.logger.error(message)\n else:\n self.logger.info(\"buildout is OK\")", "def validate_input(self):\n inputs = self.inputs\n if 'remote_data' in inputs:\n input_ok = True\n else:\n input_ok = False\n return self.exit_codes.ERROR_NO_INPUT_REMOTE_DATA # pylint: disable=no-member\n input_remote = self.inputs.remote_data\n parents = input_remote.get_incoming(node_class=CalcJobNode)\n nparents = len(parents.all_link_labels())\n\n if nparents != 1:\n # extract parent workflow and get uuid of last calc from output node\n parent_workflow = input_remote.inputs.last_RemoteData\n if not isinstance(parent_workflow, WorkChainNode):\n return self.exit_codes.ERROR_INVALID_REMOTE_DATA_TPYE # pylint: disable=no-member\n\n parent_workflow_out = parent_workflow.outputs.output_kkr_scf_wc_ParameterResults\n uuid_last_calc = parent_workflow_out.get_dict().get('last_calc_nodeinfo').get('uuid')\n last_calc = load_node(uuid_last_calc)\n\n if not isinstance(last_calc, KkrCalculation) and not isinstance(last_calc, VoronoiCalculation):\n return self.exit_code.ERROR_INVALID_REMOTE_DATA_TPYE\n\n # overwrite remote_data node with extracted remote folder\n output_remote = last_calc.outputs.remote_folder\n\n self.inputs.remote_data = output_remote\n\n # extract structure\n struc_kkr, _ = VoronoiCalculation.find_parent_structure(self.inputs.remote_data)\n # save if structure is an alloy\n self.ctx.struc_is_alloy = struc_kkr.is_alloy\n\n # To validate for kpoints\n if 'kpoints' in inputs:\n self.ctx.BS_kpoints = inputs.kpoints\n input_ok = True\n self.ctx.structure_data = 'None (kpoints taken from input)'\n else:\n #create an auxiliary structure with unique kind_names, this leads to using the input structure in the seekpath method instead of finding the primitive one\n cell = np.array(struc_kkr.cell)\n if not struc_kkr.pbc[2]:\n # 2D structure, make sure the third bravais vector points along z\n cell[2] = np.cross(cell[0], cell[1])\n saux = StructureData(cell=cell)\n for isite, site in enumerate(struc_kkr.sites):\n kind = struc_kkr.get_kind(site.kind_name)\n saux.append_atom(\n name='atom' + str(isite) + ':' + site.kind_name,\n symbols=kind.symbols,\n weights=kind.weights,\n position=site.position\n )\n # use auxiliary structure inside k-point generator\n output = get_explicit_kpoints_path(saux)\n primitive_struc = output['primitive_structure']\n conventional_struc = output['conv_structure']\n kpoints_ok = True\n\n #check if primitive_structure and input structure are identical:\n maxdiff_cell = sum(abs(np.array(primitive_struc.cell) - np.array(saux.cell))).max()\n\n if maxdiff_cell > 3 * 10**-9:\n self.report(f'Error in cell : {maxdiff_cell}')\n self.report(\n 'WARNING : The structure data from the voronoi calc is not the primitive structure type and in come cases it is medatory'\n )\n self.report(f'prim: {primitive_struc.cell} {primitive_struc.sites}')\n self.report(f'conv: {conventional_struc.cell} {conventional_struc.sites}')\n self.ctx.structure_data = 'conventional_unit_cell '\n else:\n self.ctx.structure_data = 'primitive_unit_cell'\n\n if not kpoints_ok:\n return self.exit_codes.ERROR_INCORRECT_KPOINTS_EXTRACTED # pylint: disable=no-member\n else:\n kpts = output['explicit_kpoints']\n\n self.ctx.BS_kpoints = kpts\n if isinstance(KpointsData(), type(kpts)):\n input_ok = True\n else:\n input_ok = False\n return self.exit_codes.ERROR_NO_KPOINTS_EXTRACTED # pylint: disable=no-member\n\n # To validate for kkr\n if 'kkr' in inputs:\n try:\n test_and_get_codenode(inputs.kkr, 'kkr.kkr', use_exceptions=True)\n except ValueError:\n input_ok = False\n return self.exit_codes.ERROR_KKRCODE_NOT_CORRECT # pylint: disable=no-member\n\n # set self.ctx.input_params_KKR\n self.ctx.input_params_KKR = get_parent_paranode(self.inputs.remote_data)\n self.report(f'The validation input_ok {input_ok}')", "def check_training_package(folder, ruleset, quiet, werror):\n check_training_result_files(folder, ruleset, quiet, werror)", "def assert_sources_folder_exist(command: commands.FilesRelatedCommand,\n filesystem: infra.Filesystem,\n stdout: infra.STDOut) -> None:\n if filesystem.not_exists(command.sources_folder):\n stdout.red(f'Sources folder does not exist: {command.sources_folder}')\n sys.exit(1)", "def ValidateOutput(self, stdout, stderr, result):\n # Store .ref and .log files in a platform-specific subdirectory\n # (avoid possible clashes if several platforms are tested)\n if \"CMTCONFIG\" in os.environ:\n try: os.mkdir( os.environ['CMTCONFIG'] )\n except OSError: pass\n stdout_log_path=os.environ['CMTCONFIG']+os.sep\n else:\n stdout_log_path=''\n # Maybe some verbosity is needed here\n if not(self.stdout_tag==''):\n strlog='the tag is ' + self.stdout_tag\n logger.debug('ExecTestBase2:ValidateOutput: '+strlog)\n if not(self.stdout_tol==0):\n strlog='the tolerance is ' + repr(self.stdout_tol) \n logger.debug('ExecTestBase2:ValidateOutput: '+strlog)\n if not(self.stdout_ref==''):\n if not(self.stdout_ref_path==''):\n self.reference_file=self.stdout_ref_path+os.sep+self.stdout_ref \n else:\n self.reference_file=self.stdout_ref \n if os.path.abspath(self.reference_file) != os.path.abspath(stdout_log_path+str(self.stdout_ref)):\n shutil.copyfile(os.path.abspath(self.reference_file),\n stdout_log_path+str(self.stdout_ref))\n ref_file_stdout=''\n for l in fileinput.input(stdout_log_path+str(self.stdout_ref)): \n ref_file_stdout=ref_file_stdout+l.strip()+'\\n'\n if not(self.excluded_lines==''):\n strlog='the excluded lines are ' + self.excluded_lines \n logger.debug('ExecTestBase2:ValidateOutput: '+strlog) \n # Copy the log for later use as ref \n f_ouput=open(stdout_log_path+self.stdout_ref.rstrip('ref')+'log', 'w')\n f_ouput.write(stdout)\n f_ouput.close()\n # Check to see if the standard output matches.\n self.causes = []\n if not(self.stdout=='*'):\n if not(self.stdout_ref==''):\n # the reference output is described in a\n # external reference file \n if not self.__CompareText1(stdout, ref_file_stdout, result):\n self.causes.append(\"standard output\") \n result[\"ExecTest.expected_stdout\"] = result.Quote(self.stdout)\n else:\n # the reference output is described in the test-case\n if not self.__CompareText1(stdout, self.stdout,result):\n self.causes.append(\"standard output\") \n result[\"ExecTest.expected_stdout\"] = result.Quote(self.stdout)\n else:\n result[\"ExecTest.expected_stdout\"] = result.Quote(self.stdout)\n \n # Check to see if the standard error matches.\n if not(self.stderr=='*'):\n if not self.__CompareText(stderr, self.stderr):\n self.causes.append(\"standard error\")\n result[\"ExecTest.expected_stderr\"] = result.Quote(self.stderr)\n else: \n result[\"ExecTest.expected_stderr\"] = result.Quote(self.stderr)\n #\n return self.causes", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def checkGlobalConflicts(self):\n\t\tappionScript.AppionScript.checkGlobalConflicts(self)\n\n\t\tif self.params['runname'] is None:\n\t\t\tapDisplay.printError(\"please enter a runname, example: 'runname=run1'\")\n\t\tif self.params['runname'] == 'templates':\n\t\t\tapDisplay.printError(\"templates is a reserved runname, please use another runname\")\n\t\tif self.params['runname'] == 'models':\n\t\t\tapDisplay.printError(\"models is a reserved runname, please use another runname\")\n\t\tif self.params['rundir']is not None and self.params['runname'] != os.path.basename(self.params['rundir']):\n\t\t\tapDisplay.printError(\"runname and rundir basename are different: \"\n\t\t\t\t+self.params['runname']+\" vs. \"+os.path.basename(self.params['rundir']))\n\t\tif self.params['mrcnames'] and self.params['preset']:\n\t\t\tapDisplay.printError(\"preset can not be specified if particular images have been specified\")\n\t\tif self.params['sessionname'] is None and self.params['mrcnames'] is None:\n\t\t\tapDisplay.printError(\"please specify an mrc name or session\")\n\t\tif self.params['sessionname'] is not None and self.params['projectid'] is not None:\n\t\t\t### Check that project and tilt series are in sync\n\t\t\tseriesproject = apProject.getProjectIdFromSessionName(self.params['sessionname'])\n\t\t\tif seriesproject and seriesproject != self.params['projectid']:\n\t\t\t\tapDisplay.printError(\"project id and session do not correlate\")", "def check_settings(args):\n print('')\n print('-'*80)\n print('CHECK SETTINGS:\\n')\n\n pypath= os.path.abspath(os.path.split(oxbs_qc.__file__)[0])\n checkList= []\n sys.stdout.write('Check output directory \"%s\" is writable... ' %(args.outdir))\n passed= os.access(args.outdir, os.W_OK)\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('outdir', passed))\n\n ## samtools\n ## -----------\n tg= spawn.find_executable('samtools')\n\n if tg is None:\n passed= False\n else:\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check samtools \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('samtools', passed))\n\n ## samtools\n ## -----------\n tg= spawn.find_executable('bedtools')\n\n if tg is None:\n passed= False\n else:\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check bedtools \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED (methyaltion can\\'t be called)\\n')\n checkList.append(('bedtools', passed))\n\n\n ## cutadapt\n ## trim_glore doesn't have a cutadapt path options. So it must be on the PATH\n ## -----------\n tg= spawn.find_executable('cutadapt')\n if tg is None:\n passed= False\n else:\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check cutadapt \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('cutadapt', passed))\n\n ## Trim galore\n ## -----------\n if args.trim_galore_path is None:\n tgpath= os.path.abspath(os.path.split(oxbs_qc.__file__)[0])\n else:\n tgpath= args.trim_galore_path \n tg= os.path.join(tgpath, 'trim_galore')\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check trim_galore \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('trim_galore', passed))\n\n ## bismark\n ## -------\n if args.bismark_path is None:\n tgpath= os.path.abspath(os.path.split(oxbs_qc.__file__)[0])\n else:\n tgpath= args.bismark_path \n tg= os.path.join(tgpath, 'bismark')\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check bismark \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('bismark', passed))\n\n ## clipOverlap\n ## -----------\n if args.clipoverlap_path == '':\n tg= spawn.find_executable('bam')\n else:\n tg= os.path.join(args.clipoverlap_path, 'bam')\n \n if tg is None:\n passed= False\n else:\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check bam clipOverlap \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('clipOverlap', passed))\n\n ## R\n ## -----------\n if args.rscript_path == '':\n tg= spawn.find_executable('Rscript')\n else:\n tg= os.path.join(args.rscript_path, 'Rscript')\n\n if tg is None:\n passed= False\n else:\n passed= os.path.isfile(tg)\n\n sys.stdout.write('Check R/Rscript \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('R/Rscript', passed))\n\n\n ## Custom scripts\n ## --------------\n tg= os.path.join(pypath, 'FastQC/fastqc')\n passed= os.path.isfile(tg)\n sys.stdout.write('Check fastqc \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('fastqc', passed))\n\n tg= os.path.join(pypath, 'ShortenFastq.jar')\n passed= os.path.isfile(tg)\n sys.stdout.write('Check ShortenFastq.jar \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('ShortenFastq.jar', passed))\n\n tg= os.path.join(pypath, 'MarkDuplicates.jar')\n passed= os.path.isfile(tg)\n sys.stdout.write('Check MarkDuplicates.jar \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('MarkDuplicates.jar', passed))\n\n tg= os.path.join(pypath, 'cleanReadNames.py')\n passed= os.path.isfile(tg)\n sys.stdout.write('Check cleanReadNames.py \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('cleanReadNames.py.py', passed))\n\n #tg= os.path.join(pypath, 'mpileup2methylation.py')\n #passed= os.path.isfile(tg)\n #sys.stdout.write('Check mpileup2methylation.py \"%s\"... ' %(tg))\n #if passed:\n # sys.stdout.write('OK\\n')\n #else:\n # sys.stdout.write('FAILED\\n')\n #checkList.append(('mpileup2methylation.py', passed))\n \n tg= os.path.join(pypath, 'bam2methylation.py')\n passed= os.path.isfile(tg)\n sys.stdout.write('Check bam2methylation.py \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('bam2methylation.py', passed))\n \n tg= os.path.join(pypath, 'oxbs_report.R')\n passed= os.path.isfile(tg)\n sys.stdout.write('Check oxbs_report.R \"%s\"... ' %(tg))\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('oxbs_report.R', passed))\n\n ## Reference FASTA\n sys.stdout.write('Check reference FASTA \"%s\"... ' %(args.ref))\n if args.ref is None:\n tg= None\n passed= False\n sys.stdout.write('FAILED\\n')\n elif os.path.isfile(args.ref):\n sys.stdout.write('OK\\n')\n passed= True\n else:\n sys.stdout.write('FAILED\\n')\n passed= False\n checkList.append(('Ref. FASTA', passed))\n \n ## Reference TXT\n sys.stdout.write('Check reference TXT \"%s\"... ' %(args.ref))\n if args.ref is None:\n tg= None\n passed= False\n sys.stdout.write('FAILED\\n')\n else:\n txt= os.path.splitext(args.ref)[0] + '.txt'\n if os.path.isfile(txt):\n sys.stdout.write('OK\\n')\n passed= True\n else:\n sys.stdout.write('FAILED\\n')\n passed= False\n checkList.append(('Ref. TXT', passed))\n \n ## List of BED positions:\n if args.listpos is not None:\n sys.stdout.write('Check bed file of positions \"%s\"... ' %(args.listpos))\n if os.path.isfile(args.listpos):\n sys.stdout.write('OK\\n')\n passed= True\n else:\n sys.stdout.write('FAILED\\n')\n passed= False\n checkList.append(('Bed file of positions', passed))\n \n ## Check bowtie2 indexes\n ##refbt2= os.path.split(args.ref)[0] + 'Bisulfite_Genome'\n ## ...\n \n sys.stdout.write('Check prefix \"%s\"... ' %(args.prefix))\n try:\n passed= validate_prefix(args.prefix)\n except ValidArgException:\n passed= False\n if passed:\n sys.stdout.write('OK\\n')\n else:\n sys.stdout.write('FAILED\\n')\n checkList.append(('prefix', passed))\n \n print('')\n print('-'*80)\n for x in checkList:\n if not x[1]:\n print(x[0] + ' ' + 'FAILED')\n print('')\n return(checkList)", "def _assert_build_info(self):\n if not self.path.exists():\n from zensols.pybuild import SetupUtil\n self.path.parent.mkdir(parents=True, exist_ok=True)\n if not self.rel_setup_path.exists():\n raise OSError('configuration file does not ' +\n f'exist: {self.rel_setup_path}')\n su = SetupUtil.source(rel_setup_path=self.rel_setup_path)\n logger.info(f'saving build info to {self.path}')\n with open(self.path, 'w') as f:\n su.to_json(writer=f)", "def validate(self):\n variables = ['bottomDepth', 'ssh', 'layerThickness', 'zMid',\n 'maxLevelCell', 'temperature', 'salinity']\n compare_variables(\n test_case=self, variables=variables,\n filename1='initial_state/initial_state.nc')\n\n variables = ['temperature', 'salinity', 'layerThickness',\n 'normalVelocity']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/output.nc',\n filename2='restart_run/output.nc')\n\n variables = ['ssh', 'landIcePressure', 'landIceDraft',\n 'landIceFraction',\n 'landIceMask', 'landIceFrictionVelocity', 'topDrag',\n 'topDragMagnitude', 'landIceFreshwaterFlux',\n 'landIceHeatFlux', 'heatFluxToLandIce',\n 'landIceBoundaryLayerTemperature',\n 'landIceBoundaryLayerSalinity',\n 'landIceHeatTransferVelocity',\n 'landIceSaltTransferVelocity',\n 'landIceInterfaceTemperature',\n 'landIceInterfaceSalinity', 'accumulatedLandIceMass',\n 'accumulatedLandIceHeat']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/land_ice_fluxes.nc',\n filename2='restart_run/land_ice_fluxes.nc')\n\n variables = ['accumulatedFrazilIceMass',\n 'accumulatedFrazilIceSalinity',\n 'seaIceEnergy', 'frazilLayerThicknessTendency',\n 'frazilTemperatureTendency', 'frazilSalinityTendency',\n 'frazilSurfacePressure',\n 'accumulatedLandIceFrazilMass']\n compare_variables(test_case=self, variables=variables,\n filename1='full_run/frazil.nc',\n filename2='restart_run/frazil.nc')", "def _checkIntegrity(self):\n return (\n os.path.isfile(os.path.join(self._root, 'processed/train.pkl'))\n and os.path.isfile(os.path.join(self._root, 'processed/test.pkl')))", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()" ]
[ "0.6972508", "0.65746725", "0.65183914", "0.6397043", "0.6383192", "0.6307421", "0.60515106", "0.60397726", "0.5996995", "0.5954663", "0.5945204", "0.59358025", "0.58788586", "0.58496416", "0.58496416", "0.577803", "0.572291", "0.56724334", "0.5628311", "0.56132805", "0.55874103", "0.5541713", "0.5536222", "0.5505645", "0.5494801", "0.54901916", "0.5467604", "0.54672295", "0.5461337", "0.5450424", "0.54499495", "0.543761", "0.54301184", "0.54046744", "0.537678", "0.537678", "0.53734046", "0.5360128", "0.53588945", "0.53519976", "0.5349346", "0.5342365", "0.5339571", "0.5337366", "0.5331806", "0.5331806", "0.5331806", "0.5331806", "0.5331806", "0.5331806", "0.5331806", "0.5327412", "0.53218377", "0.5315302", "0.5313363", "0.5310116", "0.5303679", "0.5302499", "0.5300942", "0.5298201", "0.5292504", "0.526105", "0.525253", "0.52461797", "0.52356625", "0.5226631", "0.5224625", "0.5224186", "0.5210588", "0.5209039", "0.5208011", "0.52011806", "0.5193056", "0.5191687", "0.51871824", "0.5182416", "0.5181366", "0.5161747", "0.5160764", "0.5160578", "0.5154812", "0.51371455", "0.5134891", "0.51318324", "0.51314414", "0.5131182", "0.5128631", "0.51260924", "0.5125647", "0.5125474", "0.5124305", "0.5123971", "0.51220125", "0.5121909", "0.5115323", "0.5115142", "0.51094776", "0.5104965", "0.5101601", "0.5100693" ]
0.6396854
4
Adds the required columns to the table and appends new records if given.
def extend_table(table, rows=None): try: if rows is None: rows = [] dtypes = np.dtype( [ ('_ID', np.int), ('MEAN_DEF_CNT', np.float64), ('MEDIAN_DEF_CNT', np.int32), ('MIN_DEF_CNT', np.int32), ('MAX_DEF_CNT', np.int32), #STandard deviation ('PRI_NUM_DEF', np.int32), ('SEC_NUM_DEF', np.int32), ('PER_PRI', np.float64), ('PER_SEC', np.float64), ("PRI_ATTR_DEF", '|S20'), # pri_attr ("SEC_ATTR_DEF", '|S20'), ('PRI_ATTR_DEF_PER', np.float64), ('SEC_ATTR_DEF_PER', np.float64), ('FEATURE_CNT', np.int32), ('PRI_ATTR_DEF_CNT', np.float64), ('SEC_ATTR_DEF_CNT', np.float64), ('LC_SCORE', np.int32) ] ) array = np.array(rows, dtypes) da.ExtendTable(table, "OID@", array, "_ID", False) return table except: line, filename, synerror = trace() raise FunctionError( { "function": "extend_table", "line": line, "filename": filename, "synerror": synerror, "arc" : str(arcpy.GetMessages(2)) } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_cols(self, source) :\n\n cols = source.get_cols()\n types = source.get_types()\n\n new_cols = []\n new_types = []\n for i in range(len(cols)) :\n if cols[i] not in self.cols :\n new_cols.append(cols[i])\n new_types.append(types[i])\n self.cols.extend(new_cols)\n self.types.extend(new_types)\n\n self._alter_table(new_cols, new_types)\n\n row_ids = self.get_values('__ROWID')\n \n for col in new_cols :\n new_vals = source.get_values(col)\n if len(row_ids) == 0 :\n for val in new_vals :\n self._insert_internal(['__ROWID', col], [0, val])\n\n row_ids = self.get_values('__ROWID')\n\n else :\n binds = zip(new_vals, row_ids)\n q = self._quoter(col)\n sql_base = 'UPDATE \"%s\" SET \"%s\" = %s WHERE \"__ROWID\" = %%d' % (self.name, col, q)\n cur = self.con.cursor()\n for bind in binds :\n if bind[0] :\n update_sql = sql_base % (str(bind[0]), bind[1])\n cur.execute(update_sql)\n\n self.version += 1", "def add_columns(self, table, col_data, col_type):\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n for data, typ in zip(col_data, col_type):\n c.execute(\"ALTER TABLE {tn} ADD COLUMN {cn} {ct}\".\n format(tn=table, cn=data, ct=typ))\n conn.commit() \n conn.close()", "def add_data(self, rowdata):\n if not rowdata.keys():\n # No columns were specified\n return\n for colnam in rowdata.keys():\n # Check the the column is actually defined in\n # in the table\n try:\n self.list_columns().index(colnam)\n except ValueError:\n # The column name wasn't found\n raise ValueError(\n \"Column \" + str(colnam) + \" is not defined in the table\"\n )\n for icol in range(0, self.ncolumns()):\n # Look up whether the column has an\n # explicit value assigned\n colnam = self.table_column(icol).title()\n if colnam in rowdata:\n self.table_column(icol).append(rowdata[colnam])\n else:\n # Assign a null value\n self.table_column(icol).append(\"*\")", "def AddColumns(sqlite_file, table_name):\r\n columns = ['cf_direct_parent','cf_kingdom','cf_superclass',\\\r\n 'cf_class','cf_subclass','cf_intermediate_0','cf_intermediate_1',\\\r\n 'cf_intermediate_2','cf_intermediate_3','cf_intermediate_4',\\\r\n 'cf_intermediate_5','cf_molecular_framework','cf_alternative_parents',\\\r\n 'cf_substituents', 'cf_description']\r\n column_type = 'TEXT'\r\n # Connecting to the database file\r\n conn = sqlite3.connect(sqlite_file) # Connecting to the database\r\n c = conn.cursor() # Adding a cursor to interact with the database\r\n # Adding new column, if it does not exist yet, without a row value\r\n for new_column_name in columns:\r\n try:\r\n c.execute(\"ALTER TABLE {tn} ADD COLUMN '{cn}' {ct}\"\\\r\n .format(tn=table_name, cn=new_column_name, ct=column_type))\r\n print(\"Column created: {cn}\".format(cn=new_column_name))\r\n except sqlite3.OperationalError:\r\n print(\"Column already exists: {cn}\".format(cn=new_column_name))\r\n conn.commit()\r\n conn.close()\r\n return None", "def _addcolumns(self, columnname, columndata=\"\"):\n self[columnname] = columndata", "def add_col(self):\r\n reader = csv.reader(open(self.in_csvfile, newline=''))\r\n rows = list(reader)\r\n rows[0].append(self.col_name)\r\n for i in range(1, len(rows)):\r\n rows[i].append(self.cell_filler(rows[i]))\r\n writer = csv.writer(open(self.out_csvfile, 'w', newline=''))\r\n writer.writerows(rows)", "def __append_columns(self, new_dataframe):\n self.dataframe = pd.merge(self.dataframe, new_dataframe)", "def _add_necessary_columns(args, custom_columns):\n # we need to add the variant's chrom, start and gene if \n # not already there.\n if custom_columns.find(\"gene\") < 0:\n custom_columns += \", gene\"\n if custom_columns.find(\"start\") < 0:\n custom_columns += \", start\"\n \n return custom_columns", "def add_rows(self):\n for row in self.rows:\n self.table.add_row(row)", "def append_data(self, table_name, df):\n\t\tself.__check_colnames(table_name, df)\n\t\tif self.__dbfile is not None:\n\t\t\tdf.to_sql(table_name, self._conn, index=False, if_exists=\"append\")", "def add_columns(self, **columns):\n return self.as_dataframe(self.data.assign(**columns))", "def _addColumn(self, table, column, init_data):\n\t\tcommand = \"ALTER TABLE \" + table + \" ADD COLUMN \" + str(column) + \" \" + getSQLiteType(init_data)\n\t\ttry:\n\t\t\tself._run_command(command)\n\t\texcept sqlite3.OperationalError:\n\t\t\tprint(\"Column \" + str(column) + \" already exists!\")", "def append_table(self, table):\n if not table:\n return\n\n indexes = []\n for idx in table.index:\n index = self.size + idx\n indexes.append(index)\n\n self.set(indexes=indexes, columns=table.columns, values=table.data)", "def addEntryToTable(self):\n self.table_view.table_model.insertRows(0, 1)", "def test_append(self):\n self.table.append(['Tom', 26])", "def add(table, record):\n\n table.append(record)\n\n return table", "def dbcolumns(con,table,**kw):\n cur = con.cursor()\n cols = list(cur.execute(\"pragma table_info(\"+table+\")\"))\n colnames = [col[1] for col in cols]\n if colnames==[]:\n cmd = \"create table \"+table+\" (id integer primary key\"\n for k,v in kw.items():\n cmd += \", %s %s\"%(k,v)\n cmd += \")\"\n cur.execute(cmd)\n else:\n # table already exists; add any missing columns\n for k,v in kw.items():\n if not k in colnames:\n cmd = \"alter table \"+table+\" add column \"+k+\" \"+v\n cur.execute(cmd)\n con.commit()\n del cur", "def test_add_new_no_dupl_w_optional(self):\n new_df = pd.DataFrame(np.eye(3) * 2, index=range(3, 6),\n columns=self.req_cols + self.opt_cols)\n self.table.add_new(new=new_df)\n self.assertEqual(len(self.table.index), 6)", "def add(table):\n\n # your code\n\n \n \n\n return_inputs = ui.get_inputs(['Name', 'Year'],\"Please enter a new record.\")\n key = str(common.generate_random(table))\n table.append([key,return_inputs[FIRST_PROP] , str(return_inputs[SECOND_PROP])])\n data_manager.write_table_to_file('hr/persons.csv', table)\n\n\n return table", "def add_feature_columns(self, feature_columns: typing.List[str]):\n self.feature_columns += feature_columns", "def test_add_column(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n\n genre_column = Varchar()\n genre_column._meta.name = \"genre\"\n\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column, genre_column],\n )\n ]\n schema_snapshot: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\",\n tablename=\"band\",\n columns=[name_column],\n )\n ]\n\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n self.assertTrue(len(schema_differ.add_columns.statements) == 1)\n self.assertEqual(\n schema_differ.add_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='genre', db_column_name='genre', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )", "def __mag_table_append(self, table_new):\n for r in table_new[self.__mag_colnames]:\n self.__mags.add_row(r)\n self.__mags.sort(['ra','dec','MJD'])", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def add_record(self, data):\n if not self._validate_columns(data):\n raise ValueError('Invalid column names')\n formatted_data = [str(data[column]) for column in self.column_names]\n utils.write_line(','.join(formatted_data) + '\\n', self.filename, 'a')", "def add_blank_data_column(self):\n\n header_title, ok_pressed = QInputDialog.getText(self, \"Add Column\", \"Enter heading for the column:\",\n QLineEdit.Normal, \"\")\n if ok_pressed and header_title != '':\n # print(header_title)\n\n default_value, set_default_pressed = QInputDialog.getText(self, \"Set Default Value\",\n \"Enter default value to set for column if any:\",\n QLineEdit.Normal, \"\")\n\n row_count = self.csv_data_table.rowCount()\n last_column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertColumn(last_column_count)\n for empty_row in range(0, row_count):\n item = QTableWidgetItem(default_value)\n self.csv_data_table.setItem(empty_row, last_column_count, item)\n\n # TODO: fix untraced bug present in show/hide columns\n self.column_headers.append(header_title)\n self.column_headers_all.append(header_title)\n # print(self.column_headers)\n # print(self.column_headers_all)\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)", "def addTableColumn(self, tablename, columnname, columntype):\n\n # Check if the table exists\n if tablename in self.getTableNames():\n\n # Check that the column does not already exist\n if columnname not in self.getColumnNames(tablename):\n\n #Allow columnames with spaces\n columnname = '`'+columnname+'`'\n\n \"\"\"# Fit characters to the allowed format if necessary\n fmt = ''\n if (self.connector == 'mysql' and\n ('TEXT' in columntype or 'VARCHAR' in columntype) and\n not ('CHARACTER SET' in columntype or\n 'utf8mb4' in columntype)):\n\n # We enforze utf8mb4 for mysql\n fmt = ' CHARACTER SET utf8mb4'\n\n\n sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +\n columnname + ' ' + columntype + fmt)\"\"\"\n sqlcmd = ('ALTER TABLE ' + tablename + ' ADD COLUMN ' +\n columnname + ' ' + columntype) \n self._c.execute(sqlcmd)\n\n # Commit changes\n self._conn.commit()\n\n else:\n print((\"WARNING: Column {0} already exists in table {1}.\"\n ).format(columnname, tablename))\n\n else:\n print('Error adding column to table. Please, select a valid ' +\n 'table name from the list')\n print(self.getTableNames())\n\n return", "def add(table):\n\n # 2\n id = common.generate_random(table)\n addnew = ui.get_inputs(\n ['name: ', 'birth_year: '],\n 'Adding entry to hr')\n addnew.insert(0, id)\n table.append(addnew)\n data_manager.write_table_to_file('hr/persons.csv', table)\n return table", "def columns_setup(self):\n self.required = None\n self.addition = None\n self.deletion = None\n self.retention = None\n self.rename = None", "def addCommonExtraColumn(self, req, study_id, found_extra_table, column_name, data_type, description):\n debug = False\n common_extra_table_name = None\n min_column_count = None\n quoted_column_name = '\"{0}\"'.format(column_name.upper())\n \n if 'SAMPLE' in found_extra_table:\n common_extra_table_name = 'COMMON_EXTRA_SAMPLE'\n min_column_count = 2\n elif 'PREP' in found_extra_table:\n common_extra_table_name = 'COMMON_EXTRA_PREP'\n min_column_count = 3\n \n if common_extra_table_name == None:\n raise Exception('Error: Could not determine the common extra table name. The found extra table is: %s' % found_extra_table)\n \n # Set the database data type:\n database_data_type = ''\n if data_type == 'text' or database_data_type == 'range':\n database_data_type = 'varchar2(4000)'\n elif data_type == 'numeric':\n database_data_type = 'int'\n elif data_type == 'date':\n database_data_type = 'date'\n \n if database_data_type == '':\n raise Exception('Could not determine common extra column data type.')\n\n # Create the column if it doesn't already exist\n statement = \"\"\"\n select count(*) \n from all_tab_columns \n where column_name = '{0}' \n and table_name = '{1}'\n \"\"\".format(column_name.upper(), common_extra_table_name)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().execute(statement).fetchone()\n if results[0] == 0:\n statement = 'alter table %s add %s %s' % (common_extra_table_name, quoted_column_name, database_data_type)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Copy the data found in the found extra_table\n if common_extra_table_name == 'COMMON_EXTRA_SAMPLE':\n statement = \"\"\"\n MERGE INTO common_extra_sample e\n USING (\n SELECT sample_id, {0}\n FROM {1}\n ) x\n ON (e.sample_id = x.sample_id)\n WHEN MATCHED THEN \n UPDATE SET e.{0} = x.{0}\n WHEN NOT MATCHED THEN \n INSERT (e.sample_id, e.{0})\n VALUES (x.sample_id, x.{0})\n \"\"\".format(quoted_column_name, found_extra_table)\n else:\n statement = \"\"\"\n MERGE INTO common_extra_prep e\n USING (\n SELECT sample_id, row_number, {0}\n FROM {1}\n ) x\n ON (e.sample_id = x.sample_id and e.row_number = x.row_number)\n WHEN MATCHED THEN \n UPDATE SET e.{0} = x.{0}\n WHEN NOT MATCHED THEN \n INSERT (e.sample_id, e.row_number, e.{0})\n VALUES (x.sample_id, x.row_number, x.{0})\n \"\"\".format(quoted_column_name, found_extra_table)\n \n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n statement = 'commit'\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Remove the column from the found extra table. If it's the last custom column in the table, remove the table\n statement = \"select count(*) from all_tab_columns where table_name = '%s'\" % (found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n results = con.cursor().execute(statement).fetchone()\n if results[0] <= min_column_count:\n statement = 'drop table %s' % (found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n else:\n statement = 'alter table %s drop column %s' % (found_extra_table, quoted_column_name)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n \n # Clean up references in study_actual_columns\n extra_table_study_id = found_extra_table.split('_')[2]\n\n statement = \"\"\"\n update study_actual_columns \n set table_name = '\"{0}\"' \n where study_id = {1} \n and table_name = '\"{2}\"'\n \"\"\".format(common_extra_table_name, extra_table_study_id, found_extra_table)\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)\n statement = 'commit'\n if debug:\n req.write('<pre>' + statement + '</pre><br/>')\n con.cursor().execute(statement)", "def add_row(self, dict_with_columns_key):\n for column in self.columns:\n val = dict_with_columns_key.get(column)\n if val is None:\n val = \"\"\n self.data[column].append(val)", "def add(table):\n id_storage = common.get_values_from_column(table, 0)\n id_ = common.generate_random(table)\n table = manage_data_from_user(table, id_storage, id_, False)\n\n return table", "def append_row(self, row_dict):\n \n for k,v in row_dict.items():\n \n if(k not in self.table):\n # Heading does not exist yet. Fill in blanks for past items\n self.table[k] = [\"\"] * self.rowcount\n \n # clean up value\n v = v.strip()\n # try converting string to a number\n try:\n v = ast.literal_eval(v)\n except:\n pass\n \n self.table[k].append(v)\n \n self.rowcount = self.rowcount + 1\n \n # Even out any columns in DB that were not filled\n for hdr in self.table:\n if(len(self.table[hdr]) < self.rowcount):\n self.table[hdr].append(None)", "def add(table, record):\n index_id = 0\n record.insert(index_id, common.generate_random(table))\n table.append(record)\n data_manager.write_table_to_file(\"model/sales/sales.csv\", table)\n\n return table", "def append_columns(cls, columns, grid=None, grid_url=None):\n grid_id = parse_grid_id_args(grid, grid_url)\n\n grid_ops.ensure_uploaded(grid_id)\n\n # Verify unique column names\n column_names = [c.name for c in columns]\n if grid:\n existing_column_names = [c.name for c in grid]\n column_names.extend(existing_column_names)\n duplicate_name = utils.get_first_duplicate(column_names)\n if duplicate_name:\n err = exceptions.NON_UNIQUE_COLUMN_MESSAGE.format(duplicate_name)\n raise exceptions.InputError(err)\n\n # This is sorta gross, we need to double-encode this.\n body = {\"cols\": _json.dumps(columns, cls=PlotlyJSONEncoder)}\n fid = grid_id\n response = v2.grids.col_create(fid, body)\n parsed_content = response.json()\n\n cls._fill_in_response_column_ids(columns, parsed_content[\"cols\"], fid)\n\n if grid:\n grid.extend(columns)", "def _with_columns(self, columns):\n table = Table()\n for label, column in zip(self.column_labels, columns):\n self._add_column_and_format(table, label, column)\n return table", "def create_table_load_records(self, tablename, records, has_header=True):\n # Column names from header, or make up.\n if has_header:\n header = records.pop(0)\n header = [utils.name_cleaned(n) for n in header]\n if len(header) != len(set(header)):\n raise ValueError(\"non-unique header column names\")\n else:\n header = [f\"column{i+1}\" for i in range(len(records[0]))]\n\n # Infer column types and constraints.\n schema = {\"name\": tablename}\n schema[\"columns\"] = [{\"name\": name} for name in header]\n try:\n for i, column in enumerate(schema[\"columns\"]):\n type = None\n column[\"notnull\"] = True\n\n # First attempt: integer\n for n, record in enumerate(records):\n value = record[i]\n if value is None:\n column[\"notnull\"] = False\n elif isinstance(value, int):\n pass\n elif isinstance(value, str):\n try:\n int(value)\n except (ValueError, TypeError):\n break\n else:\n break\n else:\n type = constants.INTEGER\n\n # Next attempt: float\n if type is None:\n for n, record in enumerate(records):\n value = record[i]\n if value is None:\n column[\"notnull\"] = False\n elif isinstance(value, (float, int)):\n pass\n elif isinstance(value, str):\n try:\n float(value)\n except (ValueError, TypeError):\n break\n else:\n break\n else:\n type = constants.REAL\n\n # Default: text\n if type is None:\n column[\"type\"] = constants.TEXT\n if column[\"notnull\"]:\n for record in records:\n value = record[i]\n if value is None:\n column[\"notnull\"] = False\n break\n else:\n column[\"type\"] = type\n except IndexError:\n raise ValueError(f\"record {i+1} has too few items\")\n\n # Create the table.\n self.add_table(schema)\n\n # Actually convert values in records.\n for i, column in enumerate(schema[\"columns\"]):\n type = column[\"type\"]\n if type == constants.INTEGER:\n for n, record in enumerate(records):\n value = record[i]\n if value is not None:\n record[i] = int(value)\n elif type == constants.REAL:\n for n, record in enumerate(records):\n value = record[i]\n if value is not None:\n record[i] = float(value)\n\n # Insert the data.\n sql = 'INSERT INTO \"%s\" (%s) VALUES (%s)' % (\n tablename,\n \",\".join(['\"%(name)s\"' % c for c in schema[\"columns\"]]),\n \",\".join(\"?\" * len(schema[\"columns\"])),\n )\n with self.dbcnx:\n self.dbcnx.executemany(sql, records)\n self.update_table(schema)", "def add(table):\n\n list_labels = [\"Name: \", \"Manufacturer: \", \"purchase_date: \", \"Durability: \"]\n data_input = ui.get_inputs(list_labels, \"Add new record\")\n\n id_ = common.generate_random(table)\n is_date_number = data_input[2].isdigit() and len(data_input) == 4\n is_durability_number = data_input[3].isdigit()\n\n if is_date_number is True and is_durability_number is True:\n data_input.insert(0, id_)\n table.append(data_input)\n\n elif is_date_number is False:\n ui.print_error_message(\"Wrong year format! Record add failed!\")\n\n elif is_durability_number is False:\n ui.print_error_message(\"Wrong durability format! Record add failed!\")\n\n return table", "def test_dummydb_add_data_to_table_wrong_column_name(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n result = db.select(\"new_table\", four=1)", "def _alter_table(self, names, types) :\n\n cur = self.con.cursor()\n for i in range(min(len(names), len(types))) :\n alter_sql = 'ALTER TABLE \"%s\" ADD COLUMN \"%s\" %s' % (self.name, names[i], types[i])\n cur.execute(alter_sql)", "def append_names(self, excludes=None, append=\", \"):\n\n if self.columns and self.q_str.startswith(\"INSERT\"):\n q_temp = []\n if excludes and isinstance(excludes, str):\n for key in self.columns.keys():\n if key != excludes:\n q_temp.append(key)\n elif excludes and isinstance(excludes, list):\n for key in self.columns.keys():\n if key not in excludes:\n q_temp.append(key)\n elif excludes is None:\n for key in self.columns.keys():\n q_temp.append(key)\n else:\n raise ValueError(\"append_names accepts lists, dict keys and pd.DataFrame indexes\")\n else:\n raise ValueError(\"append_names does not accept NULL init_objections\")\n\n q_temp = \", \".join(q_temp)\n self.q_str = append.join([self.q_str, q_temp])\n self.q_str = \"\".join([self.q_str, \") VALUES \"])", "def __refmag_table_append(self, table_new): \n if not \"mag_calib_unc\" in table_new.colnames:\n table_new[\"mag_calib_unc\"] = [None for i in range(len(table_new))]\n \n for r in table_new[self.__mag_colnames]:\n self.__ref_mags.add_row(r)\n self.__ref_mags.sort(['ra','dec','MJD'])", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def AppendCols(self, numCols=1): # real signature unknown; restored from __doc__\n return False", "def project(self, name, cols) :\n\n ct = [v \n for v in list(zip(self.get_cols(), self.get_types()))\n if v[0] in cols]\n\n base_row = dict(zip(cols, itertools.repeat(None)))\n \n def make_new_row(r) :\n values = {}\n values.update(base_row)\n values.update(r.as_dict())\n return values\n new_rows = [make_new_row(row) for row in self]\n\n new_table = self.factory.new_table(name, ct)\n new_table.add_rows(new_rows)\n return new_table", "def add_column(self, schema):\n self[schema.name] = schema.copy()", "def add_table_column(\n self, table: Table, name: Optional[str] = None, values: Any = None\n ):\n self._requires_table(table)\n table.append_column(name, values)", "def deferred_to_columns_cb(self, target, model, fields):\n table = model._meta.db_table\n if table not in target:\n target[table] = set()\n for field in fields:\n if not hasattr(field.column, \"columns\"):\n target[table].add(field.column)\n else:\n target[table].update(field.column.columns)", "def add_row(self, *column_data):\n row = \"<tr>\"\n row += \" \".join(f\"<td>{header}</td>\" for header in column_data)\n row += \"</tr>\\n\"\n self.result += row", "def AddColumn(self, column):\n self.columns.append(column)\n self.column_dict[column.column_id] = column", "def add_select(self, *column):\n if not column:\n column = []\n\n self.columns += column\n\n return self", "def create_table_columns(database, table, columns):\r\n in_tests.test_create_table_columns(database, table, columns)\r\n\r\n connection = sqlite3.connect(database)\r\n cursor = connection.cursor()\r\n\r\n for column in columns:\r\n query = f\"ALTER TABLE {table} ADD COLUMN {column}\"\r\n cursor.execute(query)\r\n connection.commit()\r\n cursor.close()\r\n connection.close()\r\n\r\n out_tests.test_create_table_columns(\r\n get_table_columns_names(database, table), columns)\r\n return ()", "def test_query_all_new_column(self):\n session = self.prepare(user_table=True)\n\n self._insert_data(session)\n\n assert_one(\n session,\n \"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]\n )\n\n session.execute(\"ALTER TABLE users ADD first_name varchar;\")\n\n results = list(session.execute(\"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\"))\n assert len(results) == 1\n assert hasattr(results[0], 'first_name'), 'Column \"first_name\" not found'\n assert_one(\n session,\n \"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'\",\n ['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]\n )", "def append_row(self):\r\n values = []\r\n vals_to_insert = ''\r\n\r\n for key in Output.COLUMNS:\r\n values.append(str(self[key]))\r\n\r\n # Replace any Quotes in parsed record with double quotes\r\n for i in values:\r\n vals_to_insert += i.replace('\"', '\"\"') + '\",\"'\r\n\r\n vals_to_insert = '\"' + vals_to_insert[:-3] + '\"'\r\n insert_sqlite_db(vals_to_insert)", "def add_column_into_source(self, tap_type, table, new_column):\n run_query_method = getattr(self, f'run_query_tap_{tap_type}')\n run_query_method(\n f'ALTER TABLE {table} ADD {new_column[\"name\"]} int'\n )\n run_query_method(\n f'UPDATE {table} set {new_column[\"name\"]}={new_column[\"value\"]} where 1=1'\n )", "def add(table):\n # 2\n id = common.generate_random(table)\n addnew = ui.get_inputs(\n ['month: ',\n 'day: ',\n 'year: ',\n 'type (in=income, out= outflow): ',\n 'amount (of transaction in USD): '],\n 'Adding item to Accounting table')\n addnew.insert(0, id)\n table.append(addnew)\n data_manager.write_table_to_file('accounting/items.csv', table)\n\n return table", "def add_row(self, row):\n \n new_row = pd.DataFrame(data=[row], columns = self.table.columns) \n self.table = self.table.append(new_row, ignore_index=True)", "def add(table):\n\n new_list_to_add = []\n\n new_list_to_add.append(common.generate_random(table))\n new_list_to_add.extend(ui.get_inputs([\"Please add the Name: \"],\"\"))\n new_list_to_add.extend(ui.get_inputs([\"Please add the Manufacturer: \"],\"\"))\n new_list_to_add.extend(ui.get_inputs([\"Please add the Year of Purchase: \"],\"\"))\n new_list_to_add.extend(ui.get_inputs([\"Please add the Durability Time in Year/s: \"],\"\"))\n \n table.append(new_list_to_add) # hozzáadni a csv filehoz\n data_manager.write_table_to_file(\"inventory/inventory.csv\", table)\n\n return table", "def setAllColumns(self, newAllColumns):\n \n pass", "def add_numeric_cols(self):\n self.create_numeric_status()\n self.create_date_integer()", "def appendData(self, dataframe, tableName, truncate=False):\n if truncate:\n truncateSetting = 'replace'\n else:\n truncateSetting = 'append'\n dataframe.to_sql(name=tableName, con=self.writeConn, if_exists=truncateSetting, index=False)", "def __update_feature_table_columns(self):\n self.__init_table()\n\n feature_dict_sorted_keys = feature_extractor_definition.keys()\n feature_dict_sorted_keys.sort()\n for key in feature_dict_sorted_keys:\n if not self.__has_feature_column(key):\n self.__add_feature_column(key, feature_extractor_definition[key])", "def add_row(self, *column_data):\n raise NotImplementedError", "def add_column(self, fieldname, column, align=..., valign=...):\n ...", "def add_header(self, *column_headers):\n raise NotImplementedError", "def add_rows(self, dict_with_columns_key):\n max_length = 0\n for vals in dict_with_columns_key.values():\n if len(vals) > max_length:\n max_length = len(vals)\n\n for column in self.columns:\n vals = dict_with_columns_key.get(column)\n vals.extend([\"\"] * (max_length - len(vals))) # Enforce all columns to have same length\n self.data[column].extend(vals)", "def populate_dyn(self, table):\n myrow = table.row\n myrow[\"sample_time\"] = int(time.time() - glob.base_time)\n myrow[\"available_bike_stands\"] = self.available_bike_stands\n myrow[\"available_bikes\"] = self.available_bikes\n myrow[\"last_update\"] = self.last_update\n myrow[\"status\"] = self.status\n myrow.append()\n table.flush()", "def add_all_lines(conn, table_values):\n\n column_list = table_values[0]\n column_row = \",\".join(column_list)\n qmark = \"?\"\n col_count = len(column_list)\n for cols in range(1, col_count):\n qmark += \", ?\"\n cols = cols\n\n cur = conn.cursor()\n cur.execute(\"DROP TABLE IF EXISTS ayasdi_table;\")\n cur.execute(\"CREATE TABLE ayasdi_table (\" + column_row + \");\")\n cur.executemany(\\\n \"INSERT INTO ayasdi_table (\" + column_row + \") VALUES (\" + qmark + \");\", \\\n table_values)", "def add_column(self, tap_column):\r\n self.__columns.append(tap_column)", "def append_table(self, table):\n\n self._db_manager.register_table(table)", "def setcolumns(self, columns):\n\n # Store the column titles (\"raw\" format)\n # This is a list of white-space separated strings\n self.__columns = columns\n # Create table_column objects\n for col in columns.split():\n self.addcolumn(col)\n # Attempt to populate the column objects\n if self.__data:\n self.__populate_columns()\n self.__nonzero = True", "def _bulk_add_rows(self, converted) :\n\n insert_sql = 'INSERT INTO \"%s\" VALUES (%s)' % (self.name, ','.join(['?'] * len(self.cols)))\n cur = self.con.cursor()\n cur.executemany(insert_sql, converted)", "def add_blank_data_row(self):\n last_row_count = self.csv_data_table.rowCount()\n column_count = self.csv_data_table.columnCount()\n self.csv_data_table.insertRow(last_row_count)\n for empty_col in range(0, column_count):\n item = QTableWidgetItem('')\n self.csv_data_table.setItem(last_row_count, empty_col, item)", "def select(self, keep_columns=None, additional_columns=None):\n if keep_columns is None:\n keep_columns = self.columns\n\n if additional_columns is None:\n additional_columns = {}\n\n result_table = Table(keep_columns + list(additional_columns.keys()))\n\n for row in self.rows:\n new_row = [row[column] for column in keep_columns]\n\n for column_name, calculation in additional_columns.items():\n new_row += [calculation(row)]\n\n result_table.insert(new_row)\n\n return result_table", "def visit_alter_table_append_command(element, compiler, **kw):\n if element.ignore_extra:\n fill_option = 'IGNOREEXTRA'\n elif element.fill_target:\n fill_option = 'FILLTARGET'\n else:\n fill_option = ''\n\n query_text = \\\n 'ALTER TABLE {target} APPEND FROM {source} {fill_option}'.format(\n target=compiler.preparer.format_table(element.target),\n source=compiler.preparer.format_table(element.source),\n fill_option=fill_option,\n )\n return compiler.process(sa.text(query_text), **kw)", "def populate_table(database, table, data):\n\n for row in data:\n database.session.add(table(row))\n database.session.commit()", "def add_table_column(self, schema, column):\n if not column[\"name\"] or not constants.NAME_RX.match(column[\"name\"]):\n raise ValueError(\"invalid column name\")\n if utils.name_in_nocase(column[\"name\"], [c[\"name\"] for c in schema[\"columns\"]]):\n raise ValueError(\"non-unique column name\")\n if column[\"type\"] not in constants.COLUMN_TYPES:\n raise ValueError(\"invalid column type\")\n sql = (\n f'''ALTER TABLE \"{schema['name']}\"'''\n f\"\"\" ADD COLUMN \"{column['name']}\" {column['type']}\"\"\"\n )\n if column.get(\"notnull\"):\n notnull = [\"NOT NULL\"]\n if column[\"type\"] == constants.INTEGER:\n notnull.append(\"DEFAULT 0\")\n elif column[\"type\"] == constants.REAL:\n notnull.append(\"DEFAULT 0.0\")\n elif column[\"type\"] in (constants.TEXT, constants.BLOB):\n notnull.append(\"DEFAULT ''\")\n sql += \" \" + \" \".join(notnull)\n self.dbcnx.execute(sql)\n schema[\"columns\"].append(column)\n self.update_table(schema)", "def add_transaction(table, id, store_id, hr_id, crm_id, quantity):\n record = [id, store_id, hr_id, crm_id, quantity]\n table.append(record)\n\n return table", "def set_columns(self, columns):\n self.columns = columns", "def addcolumn(self, column):\n if column not in self.headersindex:\n database = managers.database_manager.get_database(self.owner_id, self.database_id)\n cur = database.get_connection().cursor()\n cur.execute(\"ALTER TABLE \\'%s\\' ADD COLUMN %s\" % (self.name, column.to_declaration()))", "def add_extra_column(self, prof_gas, retrieval_date, mod_data, **kwargs):\n pass", "def add_column(self, name, **kwargs):\n\n import sqlalchemy.orm.session\n from dbexceptions import NotFoundError\n\n s = sqlalchemy.orm.session.Session.object_session(self)\n\n assert s, \"Can't create column with this method unless the table has a session\"\n\n name = Column.mangle_name(name)\n\n if not kwargs.get('fast', False):\n try:\n row = self.column(name)\n except NotFoundError:\n row = None\n else:\n row = None\n\n if row:\n extant = True\n\n else:\n row = Column(self, name=name, **kwargs)\n extant = False\n\n if kwargs.get('data', False):\n row.data = dict(row.data.items() + kwargs['data'].items())\n\n for key, value in kwargs.items():\n\n excludes = ['d_id', 't_id', 'name', 'schema_type', 'data']\n\n # Proto is the name of the object.\n if key == 'proto' and isinstance(value, basestring):\n key = 'proto_vid'\n\n if extant:\n excludes.append('sequence_id')\n\n if key[0] != '_' and key not in excludes:\n try:\n setattr(row, key, value)\n except AttributeError:\n raise AttributeError(\n \"Column record has no attribute {}\".format(key))\n\n if isinstance(value, basestring) and len(value) == 0:\n if key == 'is_primary_key':\n value = False\n setattr(row, key, value)\n\n # If the id column has a description and the table does not, add it to\n # the table.\n if row.name == 'id' and row.is_primary_key and not self.description:\n self.description = row.description\n s.merge(self)\n\n if extant:\n row = s.merge(row)\n else:\n s.add(row)\n\n if kwargs.get('commit', True):\n s.commit()\n\n return row", "def add(table):\n\n # your code\n row = []\n row.append(common.generate_random(table))\n\n inputs = ui.get_inputs([\"TITLE: \", \"MANUFACTURER: \", \"PRICE: \", \"STOCK: \"], \"Fill the records below: \")\n for i in inputs:\n row.append(i)\n\n table.append(row)\n\n return table", "def _insert_column(self, column_name, column_type, table, params=None, overwrite=False, after_col=None, verbose=True):\n \n not_null = ''\n auto_increment = ''\n \n if params != None and 'not_null' in params:\n not_null = 'NOT NULL'\n \n \n if params != None and 'auto_increment' in params:\n auto_increment = \"AUTO_INCREMENT\"\n \n \n ADD_COLUMN_COMMAND = \"ALTER TABLE {0} ADD {1} {2} {3} {4}\".format(table, column_name, column_type, not_null, auto_increment)\n \n if (after_col != None and type(after_col) is str):\n ADD_COLUMN_COMMAND += \" AFTER {0} \".format(after_col)\n \n \n self.cursor.execute(ADD_COLUMN_COMMAND)\n \n if verbose: \n print(\"Adding the column '{0}' to the table '{1}'...\".format(column_name, table))\n print(\"\\t\" + ADD_COLUMN_COMMAND) \n \n \n if params != None and 'foreign_key' in params:\n \n if 'references' not in params:\n raise InvalidParameterError\n \n referenced_table = params['references'].split('(')[0]\n referenced_column = params['references'].split('(')[1][:-1] \n \n \n if (not self.check_table(referenced_table, verbose=False)):\n raise(TableNotFoundError)\n \n \n if (not self.check_column(referenced_column, referenced_table, verbose=False)):\n raise(ColumnNotFoundError)\n \n \n ADD_FOREIGN_KEY_COMMAND = \"ALTER TABLE {0} ADD FOREIGN KEY ({1}) REFERENCES {2}({3})\".format(table, column_name, referenced_table, referenced_column)\n \n \n if verbose: \n print(\"\\t\" + ADD_FOREIGN_KEY_COMMAND) \n \n self.cursor.execute(ADD_FOREIGN_KEY_COMMAND)", "def add_records(self):\n\n self.setup_progressbar(\"Loading {} records from table {}...\"\n .format(self.record_count, self.lyr.name()),\n self.record_count)\n\n provider = self.lyr.dataProvider()\n for i, row in enumerate(self.cur):\n feature = QgsFeature()\n feature.setGeometry(QgsGeometry())\n feature.setAttributes([flds for flds in row])\n provider.addFeatures([feature])\n self.update_progressbar(i)\n\n iface.messageBar().clearWidgets()\n iface.messageBar().pushMessage(\"Ready\", \"{} records added to {}\".format(str(self.record_count), self.lyr.name())\n , level=QgsMessageBar.INFO)", "def add_new_cols(cat, prefix=\"\", floatcols=None, boolcols=None):\n\t\n\tif floatcols != None:\n\t\tfor col in floatcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=float, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)\n\tif boolcols != None:\n\t\tfor col in boolcols:\n\t\t\tcat.add_column(astropy.table.MaskedColumn(name=prefix+col, dtype=bool, length=len(cat)))\n\t\t\tcat[prefix+col].mask = [True] * len(cat)", "def update_table_columns(self, table_name, table):\n table_definition = self._table_definitions[table_name]\n new_columns = table.columns.difference(table_definition.c.keys())\n new_column_types = {c: table.dtypes[c] for c in new_columns}\n\n allows_covariates = table_definition.name in [\"avgint\", \"data\"]\n\n good_prefixes = [\"c_\"]\n if allows_covariates:\n good_prefixes.append(\"x_\")\n bad_column_names = [c for c in new_columns if c[:2] not in good_prefixes]\n if bad_column_names:\n msg = f\"\"\"\n Table '{table_definition.name}' has these columns {list(table_definition.c.keys())}.\n It allows additional comment columns, which must start 'c_'.\"\"\"\n if allows_covariates:\n msg += \" In addition it allows covariate columns, which must start with 'x_'.\"\n msg += f\" You supplied columns that don't meet those requirements: {bad_column_names}\"\n\n raise ValueError(dedent(msg))\n\n add_columns_to_table(table_definition, new_column_types)", "def _add_fields(self, fields):\n for field in fields:\n self.add(field)", "def __limmag_table_append(self, table_new): \n for r in table_new[self.__limmag_colnames]:\n self.__lim_mags.add_row(r)\n self.__lim_mags.sort(['ra','dec','MJD'])", "def test_add_table(self):\n name_column = Varchar()\n name_column._meta.name = \"name\"\n schema: t.List[DiffableTable] = [\n DiffableTable(\n class_name=\"Band\", tablename=\"band\", columns=[name_column]\n )\n ]\n schema_snapshot: t.List[DiffableTable] = []\n schema_differ = SchemaDiffer(\n schema=schema, schema_snapshot=schema_snapshot, auto_input=\"y\"\n )\n\n create_tables = schema_differ.create_tables\n self.assertTrue(len(create_tables.statements) == 1)\n self.assertEqual(\n create_tables.statements[0],\n \"manager.add_table('Band', tablename='band')\",\n )\n\n new_table_columns = schema_differ.new_table_columns\n self.assertTrue(len(new_table_columns.statements) == 1)\n self.assertEqual(\n new_table_columns.statements[0],\n \"manager.add_column(table_class_name='Band', tablename='band', column_name='name', db_column_name='name', column_class_name='Varchar', column_class=Varchar, params={'length': 255, 'default': '', 'null': False, 'primary_key': False, 'unique': False, 'index': False, 'index_method': IndexMethod.btree, 'choices': None, 'db_column_name': None})\", # noqa\n )", "def append(self, other, ensureFilled = True):\n\t\tself.rows.extend(other.rows)\n\t\tself.headers.extend([x for x in other.headers if not x in self.headers])\n\t\tif(ensureFilled):\n\t\t\tself.ensureFilled()\n\t\treturn self", "def add_column_to_staging_table(cursor,table_schema,table_name,column_name):\n if not check_if_column_exists(cursor, table_schema, table_name, column_name):\n add_column = \"ALTER TABLE \" + table_schema + \".\" + table_name + \" ADD COLUMN \" + column_name + \" text;\"\n cursor.execute(add_column)", "def columns(self, *args):\n column_set = set(self._columns)\n for c in args:\n if c in column_set:\n continue\n else:\n self._columns.append(c)\n # column_set.add(c) # FIXME failing tests\n return self", "def _add_cols(df: pandas.DataFrame, scope = (globals(), locals())) -> None:\n command : str = input(\"\\nAdd a column:\\n\")\n if command.lower() in ['n', 'no', 'quit()', 'exit', 'return']:\n return\n\n col_name : str = command[ \\\n re.search(r'[\\w\\.\\(\\)]+', command).start(): \\\n re.search(r'[\\w\\.\\(\\)]+', command).end() \\\n ]\n # new column's name\n\n arg : str = command[re.search(r'[=,;]', command).end():]\n # the new column's \"function\"\n ref_cols = re.findall(r'(?<=\\{)\\w[\\w\\.\\(\\)]*(?=\\})', arg)\n # df column names that are referenced to create new columns\n\n for i in range(len(ref_cols)):\n arg = re.sub(\n f'{{{ref_cols[i]}}}',\n f'df[\\'{ref_cols[i]}\\']',\n arg\n )\n # substituting references\n\n scope[0].update(globals())\n scope[1].update(locals())\n\n col_arg = eval(arg, scope[0], scope[1])\n # pandas.Series for type checking\n df[col_name] = col_arg\n # creating column\n\n more : str = input(\"\\nWould you like to add more columns?\\n\")\n if more.lower() in ['y', 'yes', 'continue', 'true']:\n return _add_cols(df)\n return", "def test_dummydb_add_data_to_table(self):\n db = DummyDB()\n columns = {\n \"one\": int,\n \"two\": str,\n \"three\": bool,\n }\n db.create_table(\"new_table\", columns)\n db.insert(\"new_table\", one=1, two=\"haunted\", three=True)\n result = db.select(\"new_table\", one=1)\n self.assertEqual(result[0]['two'], \"haunted\")" ]
[ "0.6977524", "0.66601425", "0.6458155", "0.63593054", "0.6348672", "0.63241756", "0.63117015", "0.62366897", "0.62343997", "0.6218119", "0.61731595", "0.61588264", "0.60610455", "0.6052164", "0.6035236", "0.60302246", "0.5991479", "0.59869546", "0.5986548", "0.5966985", "0.58958644", "0.5880533", "0.5879068", "0.5862508", "0.58427334", "0.5828609", "0.58023477", "0.57889324", "0.57874537", "0.5785345", "0.57832515", "0.5767601", "0.57629925", "0.575807", "0.5742575", "0.5731693", "0.57316893", "0.5731494", "0.5723864", "0.57087386", "0.56923956", "0.5686829", "0.5686829", "0.5686829", "0.5686829", "0.5686829", "0.5686829", "0.5686829", "0.5686829", "0.5686829", "0.5684389", "0.5675704", "0.56706196", "0.5667847", "0.5666918", "0.56627643", "0.5660885", "0.5656983", "0.56354594", "0.56335616", "0.563104", "0.56303525", "0.5619057", "0.56130564", "0.5607931", "0.5602491", "0.56007135", "0.5579626", "0.55789834", "0.55769867", "0.55712295", "0.55686224", "0.55666274", "0.5550861", "0.5549117", "0.5507906", "0.5502468", "0.5499345", "0.5491019", "0.5490691", "0.5484255", "0.5479541", "0.5478231", "0.54618263", "0.5461425", "0.54568934", "0.5451791", "0.54502076", "0.54494864", "0.54466987", "0.5444382", "0.5444229", "0.5436978", "0.54364485", "0.5429888", "0.54257834", "0.5422738", "0.54225826", "0.5408795", "0.5407415", "0.5403843" ]
0.0
-1
main driver of program
def main(*argv): try: attr_features = argv[0] sql_clause = argv[1] polygon_grid = argv[2] error_field_count = str(argv[3]) #'NULL_COUNT'# error_field_def = str(argv[4]) #'NULL_COLUMNS'# output_fc = argv[5] out_fc_exists = arcpy.Exists(output_fc) # Local Variable # scratchFolder = env.scratchFolder scratchGDB = env.scratchGDB results = [] # Logic # if not out_fc_exists: output_gdb = validate_workspace(os.path.dirname(output_fc)) # Create the grid # out_grid = arcpy.CopyFeatures_management(polygon_grid, output_fc)[0] out_grid = extend_table(out_grid) where_clause=None else: arcpy.MakeFeatureLayer_management(output_fc, "lyr") arcpy.SelectLayerByLocation_management("lyr", "HAVE_THEIR_CENTER_IN", polygon_grid) oids = [row[0] for row in arcpy.da.SearchCursor("lyr", "OID@")] if len(oids) >1: oids_string = str(tuple(oids)) else: oids_string = str('('+ str(oids[0]) + ')') where_clause = 'OBJECTID IN ' + oids_string error_field = (error_field_def, error_field_count) # Process the Data # poly_desc = arcpy.Describe(output_fc) fc_desc = arcpy.Describe(attr_features) if poly_desc.extent.within(fc_desc.extent): temp_fc = 'in_memory/clip' arcpy.AddMessage('Clipping features to polygon') arcpy.Clip_analysis(attr_features, output_fc, temp_fc) arcpy.AddMessage('Created in_memory fc') #data_sdf = geomotion.SpatialDataFrame.from_featureclass(temp_fc, # fields=[value_field]) if sql_clause: attr_sdf = SpatialDataFrame.from_featureclass(temp_fc, fields=error_field, where_clause=sql_clause) else: attr_sdf = SpatialDataFrame.from_featureclass(temp_fc, fields=error_field) arcpy.AddMessage('features read into spatial dataframe after clipping') else: #data_sdf = geomotion.SpatialDataFrame.from_featureclass(, fields=[value_field]) arcpy.AddMessage('features read into spatial dataframe without clipping') if sql_clause: attr_sdf = SpatialDataFrame.from_featureclass(attr_features, fields=error_field, where_clause=sql_clause) else: attr_sdf = SpatialDataFrame.from_featureclass(attr_features, fields=error_field) grid_sdf = SpatialDataFrame.from_featureclass(filename=output_fc, where_clause=where_clause) index = attr_sdf.sindex for idx, row in enumerate(grid_sdf.iterrows()): errors = [] attrs = [] geom = row[1].SHAPE oid = row[1].OBJECTID print(str(oid)) ext = [geom.extent.lowerLeft.X, geom.extent.lowerLeft.Y, geom.extent.upperRight.X, geom.extent.upperRight.Y] row_oids = list(index.intersect(ext)) df_current = attr_sdf.loc[row_oids]#.copy() sq = df_current.geometry.disjoint(geom) == False fcount = len(df_current[sq]) # Total Count q2 = df_current[error_field_count] > 0 #& q2 df_current = df_current[sq].copy() # Get the # of features with deficiency_cnt > 0 #print("here") if fcount>0: #len(df_current) > 0: errors += df_current[error_field_count].tolist() arcpy.AddMessage(str(errors)) def process(x): print(x) return [va for va in x.replace(' ', '').split('|')[-1].split(',') if len(va) > 1] for e in df_current[error_field_def].apply(process).tolist(): attrs += e del e row = get_answers(oid=oid, err=errors, attr=attrs, feature_count=fcount) results.append(row) if len(results) > 250: extend_table(table=output_fc, rows=results) results = [] del idx del row del errors del attrs del geom del oid del ext del row_oids del df_current del sq del q2 if len(results) > 0: extend_table(table=output_fc, rows=results) del index del results del grid_sdf del attr_sdf except arcpy.ExecuteError: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror) arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2)) except FunctionError as f_e: messages = f_e.args[0] arcpy.AddError("error in function: %s" % messages["function"]) arcpy.AddError("error on line: %s" % messages["line"]) arcpy.AddError("error in file name: %s" % messages["filename"]) arcpy.AddError("with error message: %s" % messages["synerror"]) arcpy.AddError("ArcPy Error Message: %s" % messages["arc"]) except: line, filename, synerror = trace() arcpy.AddError("error on line: %s" % line) arcpy.AddError("error in file name: %s" % filename) arcpy.AddError("with error message: %s" % synerror)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n pass", "def main():\n run_program()", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():\n\tpass", "def main():\n return", "def main(self):\r\n pass", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main(self) -> None:\n pass", "def main(self):", "def run():\n main()", "def main():\n Main()", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n\n BASIC.run(PROGRAM)", "def main():\n\n pass", "def main():\n driver = Driver()\n driver.start()", "def main() -> None:\n return", "def main(args):", "def main(args):", "def main():\n ...", "def main(args=None):", "def main(args=None):", "def main():\n pass", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def main(self):\n pass", "def main(cls):\n raise NotImplementedError", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main(self, params):\n pass", "def main():\n return 0", "def main():\n return 0", "def run_main():\n main(sys.argv)", "def main():\n\tcli = Cli()\n\tcli.run()", "def main():\n print(\"is Running!\")", "def main():\n tng.api.runner()", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main_cli():\n pass", "def main(args=None):\n pass", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def main(self, params):\n raise NotImplementedError('main() must be implemented.')", "def run():\n # main(sys.argv[1:])\n main()", "def main():\r\n print(\"JoJo\")", "def main():\n pass\n\n if __name__ == \"__main)__\":\n main()", "def main(self, **kwargs) -> None:\n ..." ]
[ "0.8577987", "0.85386586", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.8478492", "0.838479", "0.8375622", "0.8302349", "0.8288314", "0.8288314", "0.8288314", "0.8288314", "0.8198463", "0.8155175", "0.8070857", "0.807074", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.8035722", "0.80341285", "0.803022", "0.79313624", "0.7917631", "0.78524107", "0.78524107", "0.78274816", "0.77894026", "0.77894026", "0.77853703", "0.7696014", "0.7696014", "0.76593953", "0.76228505", "0.76116407", "0.75854725", "0.75674856", "0.75674856", "0.75564826", "0.7547887", "0.7537844", "0.7523735", "0.74350876", "0.74350876", "0.74350876", "0.7414662", "0.7414662", "0.7414662", "0.7414662", "0.7414662", "0.7414662", "0.7414662", "0.7414662", "0.74079156", "0.7369287", "0.735195", "0.735195", "0.730511", "0.7289348", "0.72711736", "0.72628343", "0.7253919" ]
0.0
-1
The constructor of the class. Here you will need to create the attributes ("instance variables") that were described in the docstring. Note that some of the attributes are defined by parameters passed to this constructor method, but others are not.
def __init__(self, name, full_name, team, eye_color, hair_color, base): self.name = name self.full_name = full_name self.team = team self.eye_color = eye_color self.hair_color = hair_color self.base = base self.powers = [] self.nemeses = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwds):\n raise NotImplementedError", "def __init__(self) -> None:\n # TODO: Provide the complete constructor for this object", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs) -> None:\n pass", "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "def __init__(self, **kwargs):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, **kwargs):\n pass", "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __init__ (self, *args, **kw):\n self.__args = args\n self.__kw = kw", "def __init__(self, *args, **kwargs):\n raise NotImplementedError()", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs", "def __init__(name, title=\"\", description=\"\"):", "def __init__ (self):\n pass", "def __init__(self, **kwargs):\n self.__kwargs = kwargs", "def __init__(self, name, price, publisher):\n\n\t\t# passes self which means itself. only defines name, price, and publisher\n\t\tself.name = name\n\t\tself.price = price\n\t\tself.publisher = publisher\n\t\t# any of these variables are available with the instances below.\n\t\t# called attributes", "def __init__(self, **kwargs):\n self.swagger_types = {\n 'key': 'str',\n 'display_name': 'str',\n 'description': 'str',\n 'catalog_id': 'str',\n 'uri': 'str',\n 'job_type': 'str',\n 'lifecycle_state': 'str',\n 'is_sample_data_extracted': 'bool',\n 'time_created': 'datetime'\n }\n\n self.attribute_map = {\n 'key': 'key',\n 'display_name': 'displayName',\n 'description': 'description',\n 'catalog_id': 'catalogId',\n 'uri': 'uri',\n 'job_type': 'jobType',\n 'lifecycle_state': 'lifecycleState',\n 'is_sample_data_extracted': 'isSampleDataExtracted',\n 'time_created': 'timeCreated'\n }\n\n self._key = None\n self._display_name = None\n self._description = None\n self._catalog_id = None\n self._uri = None\n self._job_type = None\n self._lifecycle_state = None\n self._is_sample_data_extracted = None\n self._time_created = None", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__args = args\n self.__kwargs = kwargs", "def __init__(self, *args):\n pass", "def __init__(self, **kwargs):\n _declarative_constructor(self, **kwargs)", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self, *args):\r\n pass", "def __init__(self,*args):\n pass", "def __init__(self, name, typing, reflection, year):#Taking in parameters\n self.n = name#Assigning variables\n self.t = typing\n self.r = reflection\n self.y = year", "def __init__(self, make, model, year): # 定义类的默认方法\n self.make = make # 传入变量赋值给可供类中其他方法调用或实例调用的变量\n self.model = model\n self.year = year", "def __init__(self, **kwargs):\n pass", "def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated!\")", "def __init__(self, name, age):\r\n self.name = name\r\n self.age = age", "def __init__(self, first_name, last_name, age):\n\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n self.name = name\n self.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age", "def __init__(self, a, b, c):\r\n self.a = a\r\n self.b = b\r\n self.c = c", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def __init__(self, *args):\n\n self.args = args", "def __init__(self, *args):\n\n self.args = args", "def __init__(self, **parameters):\n self.parameters = parameters", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'name': 'str',\n 'channel_id': 'str',\n 'channel_secret': 'str',\n 'switcher_secret': 'str',\n 'service_code': 'str',\n 'self_uri': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'channel_id': 'channelId',\n 'channel_secret': 'channelSecret',\n 'switcher_secret': 'switcherSecret',\n 'service_code': 'serviceCode',\n 'self_uri': 'selfUri'\n }\n\n self._id = None\n self._name = None\n self._channel_id = None\n self._channel_secret = None\n self._switcher_secret = None\n self._service_code = None\n self._self_uri = None", "def __init__(self, *args, **kwargs):", "def __init__(self, *args, **kwargs):", "def __init__(self, *args, **kwargs):", "def __init__(self, *args, **kwargs):", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age", "def __init__(self,\n id=None,\n name=None,\n mtype=None,\n usage_bytes=None,\n ):\n\n # Initialize members of the class\n self.id = id\n self.name = name\n self.mtype = mtype\n self.usage_bytes = usage_bytes", "def __init__(self,title,description):\n self.title = title\n self.description = description\n \n\t#concept = Concept('title','description) = calls constructor \"__init__\" & pass func\n\t#concept.title = 'title'\n\t#concept.description = description", "def __init__(self,\n height=None,\n length=None,\n weight=None,\n width=None):\n\n # Initialize members of the class\n self.height = height\n self.length = length\n self.weight = weight\n self.width = width", "def __init__ (self) :", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def init(self, *args, **kwds):\n pass", "def __init__(self, **kw_args):\n self._isoFmt = \"%Y%m%dT%H%M%S%z\"\n\n self._init_client_id(kw_args)\n self._init_shared_secret(kw_args)\n self._init_counter_from_time(kw_args)\n self._init_last_count(kw_args)\n self._init_last_count_update_time(kw_args)\n self._init_period(kw_args)\n self._init_password_length(kw_args)\n self._init_tags(kw_args)\n self._init_note(kw_args)", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass" ]
[ "0.795267", "0.78457487", "0.7731138", "0.7731138", "0.7643051", "0.76117766", "0.7513325", "0.7513325", "0.7513325", "0.7513325", "0.7501279", "0.7501279", "0.7501279", "0.7375516", "0.73525584", "0.7352199", "0.73369604", "0.7306995", "0.7306995", "0.7306995", "0.7300619", "0.7215287", "0.7207899", "0.7166026", "0.7156388", "0.7152725", "0.7147736", "0.7131461", "0.7119932", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71170795", "0.71053123", "0.7101743", "0.7096952", "0.70938784", "0.7075368", "0.707356", "0.7067816", "0.70607895", "0.70607895", "0.70607895", "0.70607895", "0.7060403", "0.7060403", "0.7060403", "0.7053699", "0.7051492", "0.70451486", "0.70451486", "0.7043424", "0.70339364", "0.701632", "0.701632", "0.701632", "0.701632", "0.7008673", "0.7008673", "0.7008673", "0.7008673", "0.7008673", "0.7008673", "0.7008673", "0.7008673", "0.7004986", "0.7004972", "0.6996012", "0.69900286", "0.69865", "0.69865", "0.69865", "0.69865", "0.6976645", "0.6968906", "0.69596195", "0.69596195", "0.69596195", "0.69596195", "0.69596195", "0.69596195", "0.69596195" ]
0.0
-1
This is the string method for the class. Whenever an instance of is passed to the str() or print() functions, the return string from this method will be returned. Fill in the instance attributes that are outlined by the characters in the variable.
def __str__(self): description = f"{self.name} is a member of the {self.team} and possesses the following powers:\n{self.powers}" return description
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n # print(self.get_string())\n return self.get_string()", "def __str__(self):\n # print(self.get_string())\n return self.get_string()", "def __str__(self):\n return self.printable()", "def __str__(self):\n if self.f_has_range():\n lenstr = \"len:%d\" % self.f_get_range_length()\n else:\n lenstr = \"\"\n\n if self.v_comment:\n commentstr = \"`%s`\" % self.v_comment\n else:\n commentstr = \"\"\n\n if commentstr or lenstr:\n if commentstr and lenstr:\n combined_str = \"%s, %s\" % (lenstr, commentstr)\n elif commentstr:\n combined_str = commentstr\n elif lenstr:\n combined_str = lenstr\n else:\n raise RuntimeError(\"You shall not pass!\")\n\n infostr = \" (%s)\" % combined_str\n\n else:\n infostr = \"\"\n\n return_string = \"%s %s%s\" % (self.f_get_class_name(), self.v_full_name, infostr)\n\n if not self.f_is_empty():\n return_string += \": \" + self.f_val_to_str()\n\n return return_string", "def __str__(self):\n raise NotImplementedError('Subclasses must define how to layout Character printing')", "def __str__(self):\n return self.string", "def __str__(self):\r\n return repr(self)", "def __str__(self):\n bold = \"*\" if self.bold else ''\n italic = \"/\" if self.italic else ''\n underline = \"_\" if self.underline else ''\n return bold + italic + underline + self.character", "def __str__(self):\r\n to_print = (\"Name: \" + self.name + \", Age: \" +\r\n str(self.age) + \", Hobbys: \" + str(self.hobbys))\r\n return to_print", "def __str__(self) -> str:", "def __str__(self) -> str:", "def __str__(self) -> str:", "def __str__(self) -> str:", "def __str__(self): # pragma: no cover\n return self.display()", "def __str__(self):\n return str(self.__s)", "def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string", "def __str__(self):\n return repr(self)", "def __str__(self):\r\n myname = None\r\n for i in dir():\r\n if isinstance(eval(i), School):\r\n myname = i\r\n# printstring = \"%s\\n\" % self.__class__.__name__\r\n #wanted a way of finding out the name of the instance of the class, but couldn't figure out a way to do it...\r\n if myname == \"self\":\r\n myname = self.name_full\r\n printstring = \"%s (%s)\" % (myname, self.__class__.__name__)\r\n vars = self.__dict__.keys()\r\n vars.sort()\r\n for x in vars:\r\n z = x.ljust(20)\r\n try:\r\n printable_var = self.__dict__[x].encode(\"ASCII\")\r\n #printstring = \"%s\\n\\t%s: %s\" % (printstring, z, self.__dict__[x])\r\n printstring = \"%s\\n\\t%s: %s\" % (printstring, z, printable_var)\r\n except:\r\n printstring = \"%s\\n\\t%s: \" % (printstring, z)\r\n #print \"x\", x\r\n #print \"self.__dict__[x]\", self.__dict__[x]\r\n for dodgy_character in str(self.__dict__[x]):\r\n try:\r\n printable_char = dodgy_character.encode(\"ASCII\")\r\n printstring = \"%s%s\" % (printstring, printable_char)\r\n except:\r\n printstring = \"%s%s\" % (printstring, \"?\")\r\n return \"%s\\n\" % printstring", "def __str__(self):\n return self.get_string()", "def __str__(self):\n print_info = f\"\\nStudent ID: {self._id}, Name: {self._name}, \" \\\n f\"Year: {self._year} \\nPhone: {str(self._phone)}, \" \\\n f\"Address: {str(self._address)} \" \\\n f\"\\nClasses: {str(self._classes)}\" \\\n f\"\\nBirth Date: {self._date}\"\n return print_info", "def __str__(self):\n datastr = self.f_val_to_str()\n return_string = \"%s %s\" % (self.f_get_class_name(), self.v_full_name)\n if self.v_comment:\n return_string += \" (`%s`)\" % self.v_comment\n if datastr:\n return_string += \": \" + datastr\n\n return return_string", "def __str__(self) -> str:\r\n return self.process(self.string)", "def __str__(self):\n\n # This appears at the end of the fed method line\n strme = \"{} {} {} {}\"\\\n .format(self.n_itr, self.i_beg, self.i_end, self.omega)\n\n return strme", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self) -> str:\n return str(self.getvalue())", "def __str__(self):\n return self.format()", "def __str__(self):\n string = super().__str__()\n string += \"\\n\" + str(self.get_dict())\n return string", "def __repr__(self) -> str:\n\t\treturn \"\"", "def __repr__(self):\n\n # Sometimes, to avoid messy concatenation, it's ok to have your string\n # go past the 80-charcter line limit\n return f'<Animal animal_id={self.animal_id} name={self.name}> animal_species={self.animal_species}'", "def __str__(self):\n return self.get_str()", "def __repr__(self) -> str:\n\t\treturn \"- {}\\n{}\\n\".format(self.name, self.__str__())", "def __str__(self):\n class_name_str = str(self.__class__.__name__) + \": (\"\n head_str = str(self.head_vertex) + \", \"\n tail_str = str(self.tail_vertex) + \", \"\n weight_str = str(self.weight) + \")\"\n attributes_str = head_str + tail_str + weight_str\n str_rep = class_name_str + attributes_str\n return str_rep", "def __str__(self):\n class_name_str = str(self.__class__.__name__) + \": (\"\n head_str = str(self.head_vertex) + \", \"\n tail_str = str(self.tail_vertex) + \", \"\n weight_str = str(self.weight) + \")\"\n attributes_str = head_str + tail_str + weight_str\n str_rep = class_name_str + attributes_str\n return str_rep", "def __str__(self):\n # Notice the use of self to get the class method\n return self.__repr__()", "def __repr__(self):\r\n return str(self)", "def __str__(self):\n raise NotImplementedError(\"__str__ not implemented for \"+str(type(self)))", "def __str__(self) -> str:\n pass", "def __str__(self):\n temp = super().__str__()\n return temp + '\\nB:\\n' + str(self.B)", "def __repr__(self):\n args = []\n if self.name != \"alpha\":\n args.append(repr(self.name))\n if self.propertiesstr:\n args.append(repr(self.propertiesstr))\n elif self.propertiesstr:\n args.append(\"attr=%r\" % self.propertiesstr)\n return \"%s(%s)\" % (type(self).__name__, \", \".join(args))", "def __str__(self):\r\n return self.__repr__()", "def __str__(self):\n return str(self.GetString())", "def __str__(self):\n astr = '[\\n name: [ ' + self.name + ' ]\\n'\n astr += ' variables: [ '\n for var, init in self.variables:\n astr += '(' + var + ' := ' + init + '), '\n astr = astr[:-2] + ' ]\\n assumptions: [ '\n for assumption in self.assumptions:\n astr += assumption + ', '\n astr = astr[:-2] + ' ]\\n guarantees: [ '\n for guarantee in self.guarantees:\n astr += guarantee + ', '\n return astr[:-2] + ' ]\\n]'", "def __repr__(self):\r\n\t\treturn str(self)", "def __str__(self):\n # newline-delimited values of all the attributes\n return \">%s\\n%s\" % (self.Label, self.Sequence)", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __repr__(self) -> str:\n return self.__str__()", "def __str__(self) -> str:\n result = \"\"\n for attr in self.ATTRS:\n if len(result) != 0:\n result += \"\\n\"\n result += f\"{attr}: {getattr(self, attr)}\"\n\n return result", "def toString():", "def __str__(self):\n _str = indent(self.name, color = self.color)\n _str += indent(\"-\"*len(self.name))\n _str += indent(\"( \" + self.movemement + \" - \"+ self.symbole + \" )\")\n _str += indent(\"Awareness: \" + str(self.awareness))\n for _iel in self.abilities:\n _str += indent(_iel)\n if self.description is not None:\n _str += indent(\"-\"*45 + \"\\n\")\n _str += indent(self.description)\n _str += indent(\"-\"*45 + \"\\n\")\n _str += indent(BOLD_BLACK + \" \"*15 + \"Combat Stats\" + RESET)\n _str += indent(\"-\"*45 + \"\\n\")\n _str += indent(BLUE + \" Horror\" + RESET + \" Toughness\"\\\n + RED + \" Combat\" + RESET)\n _str += indent(BLUE + \"Rating Damage \"\\\n + RED + \"Rating Damage\")\n _str += indent(BLUE + \" \" + str(self.horror_rating)\\\n + \" \"*8 + str(self.horror_damage) + RESET\\\n + \" \"*8 + str(self.toughness) + RED\\\n + \" \"*8 + str(self.combat_rating)\\\n + \" \"*8 + str(self.combat_damage) + RESET)\n _str += indent(\"-\"*45 + \"\\n\")\n return _str", "def __repr__(self) -> str:\n return f\"{self.text}\"", "def __repr__(self) -> str:\n return str(self)", "def __repr__(self):\n return \"\\'{}\\'\".format(self.__str__())", "def __repr__(self) -> str:\n return f\"<Class[{self.name}](line:{self.line})>\"", "def __repr__(self):\n\t\treturn str(self)", "def __repr__(self):\n\t\treturn str(self)", "def __str__(self):\n print_string = 'key: {} | value: {}'.format(\n str(self.key), str(self.value)\n )\n return print_string", "def __str__(self):\n class_name_str = str(self.__class__.__name__) + \": (\"\n attributes_str = str(self.head_vertex) + \", \" + \\\n str(self.tail_vertex) + \")\"\n str_rep = class_name_str + attributes_str\n return str_rep", "def __repr__(self):\n str(self)", "def __str__(self):\n return self._str", "def __str__(self): # reliably restored by inspect\n pass", "def __repr__(self) -> str:\n return f\"{self.title} that starts at {self.start} and ends at {self.end} and is taught by {self.instructor}\"", "def __str__(self):\n # add a dagger symbol to the class name if needed\n temp = super().__str__()\n if self.dagger:\n temp += \".H\"\n return temp", "def __str__(self):\n if self.__description:\n return self.__description\n return repr(self)", "def basestr(cls: Any) -> str:\n return baserepr(cls)", "def __repr__(self):\r\n return self.__str__()", "def __str__(self) -> str:\n\t\treturn \"\"", "def __str__(self) :\n raise NotImplementedError", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __str__(self):\n return self.toString()", "def __str__(self):\n raise NotImplementedError(\"Implemented in a subclass\")", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __repr__(self):\n return str(self)", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return self.__repr__()", "def __str__(self):\n return f'''\n {super().__str__()}\n Brand: {self._breand}\n Pover: {self._power} (W)\n Nozzle: {self._nozzle} (pieces)\n '''", "def __str__(self):\n raise NotImplementedError(\"Should be implemented by subclass\")", "def __repr__(self):\n string_representation = self.__str__().replace(\"{\", \"(\").replace(\n \"}\", \")\").replace(\":\", \"=\")\n return f\"{type(self).__name__}{string_representation}\"" ]
[ "0.77815866", "0.77815866", "0.7584849", "0.74873406", "0.7468666", "0.74566126", "0.7438483", "0.7398238", "0.73615706", "0.7347233", "0.7347233", "0.7347233", "0.7347233", "0.7336336", "0.7325533", "0.7299921", "0.7297643", "0.7283104", "0.72561014", "0.72357535", "0.7232867", "0.72177285", "0.7196232", "0.71950704", "0.71950704", "0.71950704", "0.71894294", "0.71894294", "0.71894294", "0.71689457", "0.71684647", "0.71235204", "0.7115328", "0.7101173", "0.70946443", "0.709321", "0.709321", "0.7079545", "0.7062991", "0.70546013", "0.7052219", "0.70382094", "0.70352745", "0.70332736", "0.70305324", "0.7030146", "0.7020428", "0.7016177", "0.70133907", "0.70133907", "0.70133907", "0.70133907", "0.70132935", "0.7010538", "0.7005002", "0.69987684", "0.69918585", "0.698798", "0.69864964", "0.6980585", "0.6980585", "0.69736636", "0.6973032", "0.69651556", "0.6962518", "0.69617313", "0.6960051", "0.69469965", "0.69440514", "0.6940224", "0.69296855", "0.69295424", "0.6919902", "0.6916447", "0.6916447", "0.6916447", "0.6916447", "0.6916447", "0.69087756", "0.69085747", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.69037735", "0.6880875", "0.6880875", "0.6880875", "0.6880875", "0.6880875", "0.6871453", "0.6869943", "0.6867495" ]
0.0
-1
This method will modify the attribute by appending the parameter to it.
def add_power(self, power): self.powers.append(power)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(self, name, value):\n if name in ['parameters', 'program_name']: # Allowed attributes\n self.__dict__[name] = value\n else:\n self.set_parameter(name, value) # treat as a parameter", "def put_param(self, attr_name, val):\n self._params[attr_name] = val", "def add_attribute(self, attr):\n self.add(attr)", "def __setattr__(self,attributeName,attributeValue):\n if (attributeName in StackParameterNames):\n StackParameters[attributeName] = attributeValue\n else:\n object.__setattr__(self, attributeName, attributeValue)\n #endIf", "def add_attribute(self, attr):\n self.attrs.add_attribute(attr)", "def __setitem__(self, name: str, value):\n super(Parameter, self).__setitem__(name, value)", "def append_attribute(myobj, attrib_k, val):\n vals = getattr(myobj, attrib_k, [])\n if val not in vals:\n vals.append(val)\n setattr(myobj, attrib_k, vals)", "def add_attribute(self, attr):\n self.attrs.add(attr)", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def __setattr__(self, attr, value):\n super().__setattr__(attr, value)", "def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})", "def set_attribute(self, name, value):\n\n pass", "def __setitem__(self, attribute_name, value):\n pass # pragma: no cover", "def addAttr(self, *args):\n return _libsbml.XMLToken_addAttr(self, *args)", "def add_attribute(self, attr: ResourceAttributeDescriptor) -> None:\n self._attributes[assert_not_none(attr.name)] = attr.bind(self)", "def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)", "def setParameter(self, name, value):", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])", "def __setitem__(self, name, value):\n self.gattrs[name] = value", "def _setAttribute(self, attribute, value):\n\n # if multiple values found\n if hasattr(self, attribute):\n\n # make sure attribute is a list\n values = getattr(self, attribute)\n if not isinstance(values, list):\n setattr(self, attribute, [values])\n\n # append value to list\n getattr(self, attribute).append(value)\n\n # single value found\n else:\n setattr(self, attribute, value)", "def write_parameter(self, path, value, attr=None):\n if path.startswith('sample'):\n entry = self.entry.nxroot['entry']\n else:\n entry = self.entry\n if value is not None:\n if attr and path in entry:\n entry[path].attrs[attr] = value\n elif path in entry:\n if isinstance(entry[path], NXgroup):\n del entry[path]\n entry[path] = value\n else:\n entry[path].replace(value)\n elif attr is None:\n entry[path] = value", "def __setattr__(self, attribute, value):\n\t\tassert ltrace_func(TRACE_BASE)\n\n\t\tif attribute[0] == '_' or callable(value) \\\n\t\t\t\t\t\tor attribute in self.__class__._licorn_protected_attrs:\n\t\t\tdict.__setattr__(self, attribute, value)\n\n\t\telse:\n\t\t\tdict.__setitem__(self, attribute, value)", "def addattribute(self, uid, field, value):\n\n raise NotImplementedError", "def __setattr__(self, attr, value):\n self[attr] = value", "def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value", "def _add_argument(hparams, key, value, update=True):\n if hasattr(hparams, key):\n if update:\n setattr(hparams, key, value)\n else:\n hparams.add_hparam(key, value)", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def modify_argument(parser, arg_name, attribute, new_value):\n for action in parser._actions:\n if action.dest == arg_name:\n setattr(action, attribute, new_value)\n return", "def __setitem__(self, key, value):\n if key not in self.attribute_dict.keys():\n raise KeyError(\"Object does not have parameter: {}.\".format(key))\n self.attribute_dict[key] = value", "def addParameter(cTag, name, value): #@NoSelf", "def __setattr__(self, name, value):\n raise AttributeError(\"You cannot modify attributes on a %s\" % self.__class__.__name__)", "def add_attribute(self, key, value):\n self.attributes[key] = value", "def setAttributeValue(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def __setattr__ (self, attr, value):\n self.set_value (attr, value)", "def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))", "def __setattr__(self, attr, value):", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def add_param(element):\n nonlocal params\n name = element.attrib.get(\"name\", None)\n value = element.attrib.get(\"value\", \"\")\n if name:\n params[name] = value", "def _insert_parametric_attribute(self,attr_name,attr_def_str):\n self._check_for_definition_correctness(attr_name,attr_def_str)\n self._attrs_def.update([(attr_name,attr_def_str)])\n for attr_def in attr_def_str.split():\n if '$' in attr_def:\n self._params_in_attrs.setdefault(attr_def,set()).add(attr_name)", "def __setitem__(self, key, value) :\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n attribute[j][1].append(value)\n return\n attribute.append((key, [value]))", "def __setitem__(self, arg, value):\n setattr(self.args, arg, value)", "def nma_attribute(self, stmt, p_elem, pset=None):\n att = \"nma:\" + stmt.keyword\n if att not in p_elem.attr:\n p_elem.attr[att] = stmt.arg", "def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")", "def add_attribute(self, attr_type, name, components):\n self.attributes[attr_type] = {\"name\": name, \"components\": components}", "def __setattr__(self, name, value):\n if hasattr(self, name):\n super(JobSubmission, self).__setattr__(name, value)\n\n else:\n self.params[str(name)] = str(value) #TODO: resolve parameter cases", "def propagate_attribute(self, attr, val):\n self.activities.propagate_attribute(attr, val)", "def add_attr(self, attr, value, position=None, extra=None):\n # pylint: disable=eval-used\n if attr.startswith(\"*\"):\n attr = attr[1:]\n if attr not in self._attributes:\n self._attributes[attr] = []\n if len(self._attributes[attr]) != position:\n raise TypeError(\"AST Node lost in conversion!\")\n self._attributes[attr].append(value)\n elif extra is not None:\n self._attributes[attr] = eval(extra)\n else:\n self._attributes[attr] = value", "def set(self, attr, val):\r\n self.__dict__[attr] = val", "def set(self, attribute: str, value: Any):\n return setattr(self, attribute, value)", "def add_attribute(self, key, value):\n if key == \"process_step\":\n self.process_step = value\n elif key == \"source\":\n self.source = value\n else:\n self.attributes[key] = value", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def __setattr__(self,name,val):\n # use dir() not hasattr() because hasattr uses __getattribute__\n if name in dir(self):\n\n if name in self.params():\n self.set_parameter_value(name,val,self)\n else:\n object.__setattr__(self,name,val)\n\n elif name in dir(self._extraPO):\n\n if name in self._extraPO.params():\n self.set_parameter_value(name,val,self._extraPO)\n else:\n object.__setattr__(self._extraPO,name,val)\n\n else:\n\n # name not found, so set on this object\n object.__setattr__(self,name,val)", "def __setattr__(self, attr: str, _value: t.Any) -> t.NoReturn:\n raise AttributeError(attr)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def addParameter(self, *args):\n return _libsbml.Model_addParameter(self, *args)", "def setAttribute(self, username, attribute, value=''):\n if username in self.contents:\n self.contents[username][attribute] = value\n else:\n self.contents.__setitem__(username, {attribute: value})", "def __setattr__ (self, name, value):\n\t\ttry:\n\t\t\tself.__dict__[name] # Do not delete this line (it verifies the existence of an attribute)\n\t\t\t# Positioning of the existing attribute\n\t\t\tself.__dict__[name] = value\n\t\texcept KeyError:\n\t\t\t# The attribute does not exist is probably value of the structure\n\t\t\tself.__dict__[\"value\"][name] = value", "def add_param(self, param):\n self.params.append(param)\n return self", "def add_attribute(self, key: str, value: Union[str, int]) -> None:\n self.span_instance.add_attribute(key, value)", "def __setattr__(self, name, value):\n try:\n self[self.sig.argpos(name)] = value\n return value\n except:\n pass\n list.__setattr__(self, name, value)", "def regattr(self, attr):\n\n return super().regattr(attr=attr)", "def set_attr(self, name, value):\n setattr(self, name, value)", "def set_param(self, name, value, *, distrib=None, ref=None):\n raise NotImplementedError", "def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)", "def __setattr__(self, attribute: str, value: Any) -> None:\n if hasattr(self, attribute) or self.contents is None:\n object.__setattr__(self, attribute, value)\n else:\n object.__setattr__(self.contents, attribute, value)", "def __setattr__(self, name: str, value: Any) -> None:\n super().__setattr__(name, value)", "def set(self, attribute, value):\n self.__setattr__(attribute, value)", "def addParameter(self, *args):\n return _libsbml.KineticLaw_addParameter(self, *args)", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_parameter(self, parameter):\n self._pkey += 1\n self.parameters[self._pkey] = parameter", "def __setattr__(self, name, value):\n self.set(**{name: value})", "def add_attrib(self, key, func, func_args):\n if key in self.aux_attrib:\n raise KeyError(\"Attribute '{0}' already exists, please use 'set_attrib'.\".format(key))\n else:\n self.set_attrib(key, func, func_args)", "def add_param(self, param):\n self._params.append(param)\n self.add_decompostion(param)", "def SetAttribute(self, attr, val):\n attrs = self.GetAttributes()\n attrs[attr] = val\n return self.SetAttributes(attr, attrs)", "def update(self, attribute: str, result: ProcessorResult) -> None:\n pass", "def setParameter(self,arg,value):\n self._params[arg] = value\n return self._params", "def add(self, param):\n self._data.add(param)", "def __setitem__(self, key, value):\n self.params[key].value = value", "def __setitem__(self, feature, value):\n setattr(self, feature, value)", "def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)", "def __setattr__(self, key, value):\n resp = f'Attribute {key} can not be '\n if key in self.__dict__:\n resp += 'changed'\n else:\n resp += 'added'\n raise AttributeError(resp)", "def setattr(self, node, attr, value):\n node.set(attr, value)", "def __setattr__(self, name, value, overwrite=False):\n\n if \"attributes\" in dir(self):\n if name not in self.attributes:\n raise ImplementationError(name)\n elif \"_init_attrs\" in dir(self) and name in self._init_attrs and name != \"_init_attrs\" and not overwrite:\n message = f'\"{name}\" was used for two different argument names. Make sure each argument is unique.'\n message += f\"Current value is {getattr(self, name)}, proposed value is {value}\"\n raise ImplementationError(name, message)\n\n super().__setattr__(name, value)", "def add_attr(self, key: str, value):\n if key in self._attr_names():\n raise ValueError(\"Already have an attribute called '{}'\".format(key))\n self._attributes.append((key, value))", "def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database, self.newParam)", "def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")", "def set_attrib(self, key, func, func_args):\n self.aux_attrib[key] = func\n self.aux_attrib_args[key] = func_args", "def addParam(self, var: IRVariable):\n self.params[var.name] = var", "def add_attribute(self, name, value, modify=False, sources=None, published_at=None):\n\n # Find attributes and remove if matches\n if modify:\n if not self.current_attributes:\n res = self.tq.get('{}/attributes'.format(self._get_api_endpoint()))\n self.current_attributes = res.get('data', [])\n\n for attr in self.current_attributes:\n if attr['name'] == name and attr['value'].lower() != value.lower():\n attribute_id = attr['id']\n self.tq.delete(\n '{}/attributes/{}'.format(self._get_api_endpoint(), attribute_id))\n # break\n\n data = {'name': name, 'value': value}\n if sources and isinstance(sources, str):\n data['sources'] = [{'name': sources}]\n elif sources and isinstance(sources, dict):\n data['sources'] = [sources]\n elif sources and isinstance(sources, list):\n data['sources'] = sources\n if published_at:\n data['published_at'] = published_at\n\n res = self.tq.post('{}/attributes'.format(self._get_api_endpoint()), data=data)\n\n # Add the newly added indicator to the cache\n if res.get('total', 0) > 0:\n self.current_attributes.append(res['data'][0])" ]
[ "0.6987088", "0.6885637", "0.6829756", "0.6731595", "0.67283607", "0.67017716", "0.66079473", "0.6570364", "0.6501009", "0.64624107", "0.6447641", "0.64455295", "0.64400303", "0.64285815", "0.6388802", "0.6383218", "0.637725", "0.63599354", "0.63599354", "0.63599354", "0.6348706", "0.6334282", "0.6324778", "0.63077027", "0.63015425", "0.6297457", "0.62894857", "0.62894225", "0.6288043", "0.62850267", "0.62850267", "0.6283991", "0.6283991", "0.6283991", "0.6283991", "0.6283991", "0.6267434", "0.62598014", "0.6241654", "0.62307155", "0.62281984", "0.6200556", "0.6194948", "0.6185698", "0.6171319", "0.6165554", "0.6161077", "0.61583555", "0.61568445", "0.6156378", "0.6144151", "0.6138369", "0.61282396", "0.6119409", "0.6116306", "0.6105505", "0.61039174", "0.60875696", "0.6080773", "0.6075971", "0.6075971", "0.6074163", "0.6074056", "0.6062446", "0.6061021", "0.6060032", "0.60585636", "0.6052972", "0.6050929", "0.6047986", "0.6036571", "0.6022522", "0.60196245", "0.5996791", "0.599197", "0.5986874", "0.59852153", "0.59834486", "0.59749854", "0.59749854", "0.59749854", "0.59730667", "0.59730536", "0.59728646", "0.5972033", "0.5961318", "0.5957416", "0.5957364", "0.5946024", "0.5942907", "0.5933817", "0.5932546", "0.59298784", "0.5922056", "0.592187", "0.59217393", "0.59162265", "0.5913795", "0.59058076", "0.5891436", "0.58882475" ]
0.0
-1
This method will modify the attribute by appending the parameter to it.
def add_nemesis(self, nemesis): self.nemeses.append(nemesis)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(self, name, value):\n if name in ['parameters', 'program_name']: # Allowed attributes\n self.__dict__[name] = value\n else:\n self.set_parameter(name, value) # treat as a parameter", "def put_param(self, attr_name, val):\n self._params[attr_name] = val", "def add_attribute(self, attr):\n self.add(attr)", "def __setattr__(self,attributeName,attributeValue):\n if (attributeName in StackParameterNames):\n StackParameters[attributeName] = attributeValue\n else:\n object.__setattr__(self, attributeName, attributeValue)\n #endIf", "def add_attribute(self, attr):\n self.attrs.add_attribute(attr)", "def __setitem__(self, name: str, value):\n super(Parameter, self).__setitem__(name, value)", "def append_attribute(myobj, attrib_k, val):\n vals = getattr(myobj, attrib_k, [])\n if val not in vals:\n vals.append(val)\n setattr(myobj, attrib_k, vals)", "def add_attribute(self, attr):\n self.attrs.add(attr)", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def __setattr__(self, attr, value):\n super().__setattr__(attr, value)", "def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})", "def set_attribute(self, name, value):\n\n pass", "def __setitem__(self, attribute_name, value):\n pass # pragma: no cover", "def addAttr(self, *args):\n return _libsbml.XMLToken_addAttr(self, *args)", "def add_attribute(self, attr: ResourceAttributeDescriptor) -> None:\n self._attributes[assert_not_none(attr.name)] = attr.bind(self)", "def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)", "def setParameter(self, name, value):", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])", "def __setitem__(self, name, value):\n self.gattrs[name] = value", "def _setAttribute(self, attribute, value):\n\n # if multiple values found\n if hasattr(self, attribute):\n\n # make sure attribute is a list\n values = getattr(self, attribute)\n if not isinstance(values, list):\n setattr(self, attribute, [values])\n\n # append value to list\n getattr(self, attribute).append(value)\n\n # single value found\n else:\n setattr(self, attribute, value)", "def write_parameter(self, path, value, attr=None):\n if path.startswith('sample'):\n entry = self.entry.nxroot['entry']\n else:\n entry = self.entry\n if value is not None:\n if attr and path in entry:\n entry[path].attrs[attr] = value\n elif path in entry:\n if isinstance(entry[path], NXgroup):\n del entry[path]\n entry[path] = value\n else:\n entry[path].replace(value)\n elif attr is None:\n entry[path] = value", "def __setattr__(self, attribute, value):\n\t\tassert ltrace_func(TRACE_BASE)\n\n\t\tif attribute[0] == '_' or callable(value) \\\n\t\t\t\t\t\tor attribute in self.__class__._licorn_protected_attrs:\n\t\t\tdict.__setattr__(self, attribute, value)\n\n\t\telse:\n\t\t\tdict.__setitem__(self, attribute, value)", "def addattribute(self, uid, field, value):\n\n raise NotImplementedError", "def __setattr__(self, attr, value):\n self[attr] = value", "def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value", "def _add_argument(hparams, key, value, update=True):\n if hasattr(hparams, key):\n if update:\n setattr(hparams, key, value)\n else:\n hparams.add_hparam(key, value)", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def modify_argument(parser, arg_name, attribute, new_value):\n for action in parser._actions:\n if action.dest == arg_name:\n setattr(action, attribute, new_value)\n return", "def __setitem__(self, key, value):\n if key not in self.attribute_dict.keys():\n raise KeyError(\"Object does not have parameter: {}.\".format(key))\n self.attribute_dict[key] = value", "def addParameter(cTag, name, value): #@NoSelf", "def __setattr__(self, name, value):\n raise AttributeError(\"You cannot modify attributes on a %s\" % self.__class__.__name__)", "def add_attribute(self, key, value):\n self.attributes[key] = value", "def setAttributeValue(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def __setattr__ (self, attr, value):\n self.set_value (attr, value)", "def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))", "def __setattr__(self, attr, value):", "def add_attr(self, section_name: str, attr_name: str, value: str) -> None:\n pass", "def add_param(element):\n nonlocal params\n name = element.attrib.get(\"name\", None)\n value = element.attrib.get(\"value\", \"\")\n if name:\n params[name] = value", "def _insert_parametric_attribute(self,attr_name,attr_def_str):\n self._check_for_definition_correctness(attr_name,attr_def_str)\n self._attrs_def.update([(attr_name,attr_def_str)])\n for attr_def in attr_def_str.split():\n if '$' in attr_def:\n self._params_in_attrs.setdefault(attr_def,set()).add(attr_name)", "def __setitem__(self, key, value) :\n attributeslist = getattr(self.request, \"_%s_attributes\" % self.name)\n for i in range(len(attributeslist)) :\n attribute = attributeslist[i]\n for j in range(len(attribute)) :\n (attrname, attrvalue) = attribute[j]\n if attrname == key :\n attribute[j][1].append(value)\n return\n attribute.append((key, [value]))", "def __setitem__(self, arg, value):\n setattr(self.args, arg, value)", "def nma_attribute(self, stmt, p_elem, pset=None):\n att = \"nma:\" + stmt.keyword\n if att not in p_elem.attr:\n p_elem.attr[att] = stmt.arg", "def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")", "def add_attribute(self, attr_type, name, components):\n self.attributes[attr_type] = {\"name\": name, \"components\": components}", "def __setattr__(self, name, value):\n if hasattr(self, name):\n super(JobSubmission, self).__setattr__(name, value)\n\n else:\n self.params[str(name)] = str(value) #TODO: resolve parameter cases", "def propagate_attribute(self, attr, val):\n self.activities.propagate_attribute(attr, val)", "def add_attr(self, attr, value, position=None, extra=None):\n # pylint: disable=eval-used\n if attr.startswith(\"*\"):\n attr = attr[1:]\n if attr not in self._attributes:\n self._attributes[attr] = []\n if len(self._attributes[attr]) != position:\n raise TypeError(\"AST Node lost in conversion!\")\n self._attributes[attr].append(value)\n elif extra is not None:\n self._attributes[attr] = eval(extra)\n else:\n self._attributes[attr] = value", "def set(self, attr, val):\r\n self.__dict__[attr] = val", "def set(self, attribute: str, value: Any):\n return setattr(self, attribute, value)", "def add_attribute(self, key, value):\n if key == \"process_step\":\n self.process_step = value\n elif key == \"source\":\n self.source = value\n else:\n self.attributes[key] = value", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def __setattr__(self,name,val):\n # use dir() not hasattr() because hasattr uses __getattribute__\n if name in dir(self):\n\n if name in self.params():\n self.set_parameter_value(name,val,self)\n else:\n object.__setattr__(self,name,val)\n\n elif name in dir(self._extraPO):\n\n if name in self._extraPO.params():\n self.set_parameter_value(name,val,self._extraPO)\n else:\n object.__setattr__(self._extraPO,name,val)\n\n else:\n\n # name not found, so set on this object\n object.__setattr__(self,name,val)", "def __setattr__(self, attr: str, _value: t.Any) -> t.NoReturn:\n raise AttributeError(attr)", "def _wrap_attr(attr):\n return '={0},'.format(attr)", "def addParameter(self, *args):\n return _libsbml.Model_addParameter(self, *args)", "def setAttribute(self, username, attribute, value=''):\n if username in self.contents:\n self.contents[username][attribute] = value\n else:\n self.contents.__setitem__(username, {attribute: value})", "def __setattr__ (self, name, value):\n\t\ttry:\n\t\t\tself.__dict__[name] # Do not delete this line (it verifies the existence of an attribute)\n\t\t\t# Positioning of the existing attribute\n\t\t\tself.__dict__[name] = value\n\t\texcept KeyError:\n\t\t\t# The attribute does not exist is probably value of the structure\n\t\t\tself.__dict__[\"value\"][name] = value", "def add_param(self, param):\n self.params.append(param)\n return self", "def add_attribute(self, key: str, value: Union[str, int]) -> None:\n self.span_instance.add_attribute(key, value)", "def __setattr__(self, name, value):\n try:\n self[self.sig.argpos(name)] = value\n return value\n except:\n pass\n list.__setattr__(self, name, value)", "def regattr(self, attr):\n\n return super().regattr(attr=attr)", "def set_attr(self, name, value):\n setattr(self, name, value)", "def set_param(self, name, value, *, distrib=None, ref=None):\n raise NotImplementedError", "def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)", "def __setattr__(self, attribute: str, value: Any) -> None:\n if hasattr(self, attribute) or self.contents is None:\n object.__setattr__(self, attribute, value)\n else:\n object.__setattr__(self.contents, attribute, value)", "def __setattr__(self, name: str, value: Any) -> None:\n super().__setattr__(name, value)", "def set(self, attribute, value):\n self.__setattr__(attribute, value)", "def addParameter(self, *args):\n return _libsbml.KineticLaw_addParameter(self, *args)", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_parameter(self, parameter):\n self._pkey += 1\n self.parameters[self._pkey] = parameter", "def __setattr__(self, name, value):\n self.set(**{name: value})", "def add_attrib(self, key, func, func_args):\n if key in self.aux_attrib:\n raise KeyError(\"Attribute '{0}' already exists, please use 'set_attrib'.\".format(key))\n else:\n self.set_attrib(key, func, func_args)", "def add_param(self, param):\n self._params.append(param)\n self.add_decompostion(param)", "def SetAttribute(self, attr, val):\n attrs = self.GetAttributes()\n attrs[attr] = val\n return self.SetAttributes(attr, attrs)", "def update(self, attribute: str, result: ProcessorResult) -> None:\n pass", "def setParameter(self,arg,value):\n self._params[arg] = value\n return self._params", "def add(self, param):\n self._data.add(param)", "def __setitem__(self, key, value):\n self.params[key].value = value", "def __setitem__(self, feature, value):\n setattr(self, feature, value)", "def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)", "def __setattr__(self, key, value):\n resp = f'Attribute {key} can not be '\n if key in self.__dict__:\n resp += 'changed'\n else:\n resp += 'added'\n raise AttributeError(resp)", "def setattr(self, node, attr, value):\n node.set(attr, value)", "def __setattr__(self, name, value, overwrite=False):\n\n if \"attributes\" in dir(self):\n if name not in self.attributes:\n raise ImplementationError(name)\n elif \"_init_attrs\" in dir(self) and name in self._init_attrs and name != \"_init_attrs\" and not overwrite:\n message = f'\"{name}\" was used for two different argument names. Make sure each argument is unique.'\n message += f\"Current value is {getattr(self, name)}, proposed value is {value}\"\n raise ImplementationError(name, message)\n\n super().__setattr__(name, value)", "def add_attr(self, key: str, value):\n if key in self._attr_names():\n raise ValueError(\"Already have an attribute called '{}'\".format(key))\n self._attributes.append((key, value))", "def _paramUpdate(self):\n\n # Update the database attributes accordingly.\n dt.utilities.DB_attrs_save(self.Database, self.newParam)", "def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")", "def set_attrib(self, key, func, func_args):\n self.aux_attrib[key] = func\n self.aux_attrib_args[key] = func_args", "def addParam(self, var: IRVariable):\n self.params[var.name] = var", "def add_attribute(self, name, value, modify=False, sources=None, published_at=None):\n\n # Find attributes and remove if matches\n if modify:\n if not self.current_attributes:\n res = self.tq.get('{}/attributes'.format(self._get_api_endpoint()))\n self.current_attributes = res.get('data', [])\n\n for attr in self.current_attributes:\n if attr['name'] == name and attr['value'].lower() != value.lower():\n attribute_id = attr['id']\n self.tq.delete(\n '{}/attributes/{}'.format(self._get_api_endpoint(), attribute_id))\n # break\n\n data = {'name': name, 'value': value}\n if sources and isinstance(sources, str):\n data['sources'] = [{'name': sources}]\n elif sources and isinstance(sources, dict):\n data['sources'] = [sources]\n elif sources and isinstance(sources, list):\n data['sources'] = sources\n if published_at:\n data['published_at'] = published_at\n\n res = self.tq.post('{}/attributes'.format(self._get_api_endpoint()), data=data)\n\n # Add the newly added indicator to the cache\n if res.get('total', 0) > 0:\n self.current_attributes.append(res['data'][0])" ]
[ "0.6987088", "0.6885637", "0.6829756", "0.6731595", "0.67283607", "0.67017716", "0.66079473", "0.6570364", "0.6501009", "0.64624107", "0.6447641", "0.64455295", "0.64400303", "0.64285815", "0.6388802", "0.6383218", "0.637725", "0.63599354", "0.63599354", "0.63599354", "0.6348706", "0.6334282", "0.6324778", "0.63077027", "0.63015425", "0.6297457", "0.62894857", "0.62894225", "0.6288043", "0.62850267", "0.62850267", "0.6283991", "0.6283991", "0.6283991", "0.6283991", "0.6283991", "0.6267434", "0.62598014", "0.6241654", "0.62307155", "0.62281984", "0.6200556", "0.6194948", "0.6185698", "0.6171319", "0.6165554", "0.6161077", "0.61583555", "0.61568445", "0.6156378", "0.6144151", "0.6138369", "0.61282396", "0.6119409", "0.6116306", "0.6105505", "0.61039174", "0.60875696", "0.6080773", "0.6075971", "0.6075971", "0.6074163", "0.6074056", "0.6062446", "0.6061021", "0.6060032", "0.60585636", "0.6052972", "0.6050929", "0.6047986", "0.6036571", "0.6022522", "0.60196245", "0.5996791", "0.599197", "0.5986874", "0.59852153", "0.59834486", "0.59749854", "0.59749854", "0.59749854", "0.59730667", "0.59730536", "0.59728646", "0.5972033", "0.5961318", "0.5957416", "0.5957364", "0.5946024", "0.5942907", "0.5933817", "0.5932546", "0.59298784", "0.5922056", "0.592187", "0.59217393", "0.59162265", "0.5913795", "0.59058076", "0.5891436", "0.58882475" ]
0.0
-1
THIS SHOULD LOOK FAMILIAR, SO WE PROVIDED IT FOR YOU. This function reads a .csv file and parses it into a list of dictionaries, where each dictionary is formed from the data on one line of the .csv file. This function takes one argument , which is the path of the file to be read. You will need to use the "csv" module in this function.
def read_csv_file(input_filepath): out_list = [] with open(input_filepath, 'r', encoding = 'utf-8') as f: reader = csv.reader(f) for i,row in enumerate(reader): if i == 0: labels = row else: new_dict = {} for j,value in enumerate(row): new_dict[labels[j]] = value out_list.append(new_dict) return out_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_csv_as_dicts(csv_input_file_name):\n input_table = read_csv_as_table(csv_input_file_name, skip_first_line=False)\n\n # first line should contain headers\n header = input_table[0]\n # rest lines would contain actual data\n data = input_table[1:]\n\n output = []\n # process all lines with data\n for input_line in data:\n record = {}\n for i in range(len(header)):\n record[header[i]] = input_line[i]\n output.append(record)\n return output", "def read_from_csv(self, csv_file):\n data = []\n with codecs.open(csv_file, 'r', encoding='utf-8') as csvfile:\n header = None\n for i, line in enumerate(csvfile):\n line_split = [x.strip() for x in line.split(\"|\")]\n line_data = [x for x in line_split if len(x) > 0]\n if i == 0:\n header = line_data\n else:\n entry = {}\n for i,datum in enumerate(line_data):\n entry[header[i]] = datum\n data.append(entry)\n print \"Loaded %d entries from %s\" % (len(data), csv_file)\n return data", "def csv2dicts(csvfile, names=None):\n data = []\n for row_index, row in enumerate(csvfile):\n if row_index == 0:\n if names:\n keys = names\n else:\n keys = row\n print(keys)\n continue\n data.append({key: value for key, value in zip(keys, row)})\n return data", "def csv_dict_reader(file_path):\r\n with open(file_path, 'r') as file_obj:\r\n\r\n reader = csv.DictReader(file_obj, delimiter=',')\r\n for line in reader:\r\n #print(line[\"variable_name \"]),\r\n print(line[\"dataset\"])", "def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary", "def _read_csv_to_dictionary_list(file_name):\n catalog_list = []\n with open(file_name) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n catalog_list.append(item)\n return catalog_list", "def parse_csv_input_file(input_file):\n with open(input_file) as csvfile:\n reader = csv.DictReader(csvfile)\n for item in reader:\n dict = {i: x for i, x in item.items()}\n yield(dict)", "def csv_to_dict(filename):\n data_list = []\n \n with open(filename, 'rb') as datafile:\n data_reader = csv.DictReader(datafile, delimiter = ',')\n for row in data_reader:\n data_list.append(row)\n\n return data_list", "def csv_dict_reader(file_obj):\n #import re\n #file = open(file_obj)\n\n # reader = csv.DictReader(file_obj)\n # for line in reader:\n # print(line[\"Name\"])", "def dictparse(csvfilename, keyfield):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True)\n for row in csvreader:\n table[row[keyfield]] = row\n return table", "def read_csv(path: str) -> list[dict[str, str]]:\n with open(path, 'r') as f:\n return list(csv.DictReader(f))", "def load_csv(file):\n with open(file) as csvfile:\n reader = csv.DictReader(csvfile)\n return [dict(row) for row in reader]", "def read_list():\n import glob\n import csv\n\n slist = {}\n csvfile = glob.glob(_path + '/*.csv').pop()\n if csvfile is None:\n return slist\n _logger.info(\"Found file '%s'\" % csvfile)\n\n hcsv = csv.reader(open(csvfile), delimiter=';')\n # Skip header\n next(hcsv)\n # Work on every line\n for line in hcsv:\n if len(line) < 2:\n continue\n # print(line)\n slist[line[0]] = {'name': line[2], 'surname': line[1]}\n\n # print(slist)\n # print(\"LEN\", len(slist))\n # exit(1)\n return slist", "def parse_csvfile(self, csvfile):\n\n logging.info(\"Parseing csvfile: %s\" % basename(csvfile))\n fields = []\n data = {}\n try:\n with open(csvfile) as f:\n for line in f:\n line = line.strip()\n # Skip empty or commented line\n if not line or line[0] == \"#\":\n continue\n if not fields:\n # The first valid line defines fields.\n fields = [x.strip() for x in line.split(\",\")]\n for f in self.REQUIRED_FIELDS:\n if f not in fields:\n logging.error(\"Failed to find %s field. \"\n \"Aborted.\" % f)\n sys.exit(1)\n else:\n # The rest lines are data\n values = [x.strip() for x in line.split(\",\")]\n record = {}\n for k, v in zip(fields, values):\n record[k] = v\n # Convert date time string to epoch seconds\n record[\"time_h\"] = self.parse_timestr(record[\"time_h\"])\n node = record[\"name\"]\n if data.get(node, None):\n data[node].append(record)\n else:\n data[node] = [record]\n except Exception as e:\n logging.exception(\"Failed to parsing the csvfile. \"\n \"See stack trace below:\")\n sys.exit(1)\n\n # While it didn't occur often, I observed that data in CSV files\n # generated by cbtool monextrac command were not in time order.\n # So sort them.\n logging.debug(\"Sorting the data\")\n for node in data.keys():\n data[node].sort(lambda x, y: cmp(int(x[\"time\"]), int(y[\"time\"])))\n\n return data, fields", "def _read_csv(file_name):\n with open(file_name) as boards:\n rows = csv.DictReader(boards, delimiter=',', quotechar='\"')\n formatted_data = []\n for row in rows:\n formatted_data.append(dict(row))\n return formatted_data", "def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames", "def dictparse(csvfilename, keyfield, separator, quote, quotestrategy):\n table = {}\n with open(csvfilename, \"rt\", newline='') as csvfile:\n csvreader = csv.DictReader(csvfile,\n skipinitialspace=True,\n delimiter=separator,\n quotechar=quote,\n quoting=quotestrategy)\n for row in csvreader:\n table[row[keyfield]] = row\n return table, csvreader.fieldnames", "def parse(raw_file):\n parsed_data = []\n with open(raw_file, 'r') as r:\n rows = csv.reader(r)\n fields = rows.next()\n counter = 0\n for r in rows:\n parsed_data.append(dict(zip(fields, r)))\n\n return parsed_data", "def read_data_from_csv(csv_file, header=None, **kwargs):\n if os.path.isabs(csv_file) == False:\n path_to_csv = os.path.join(csv_file)\n else:\n path_to_csv = csv_file\n row_list = []\n if \"field_sep\" not in kwargs.keys():\n field_sep = ','\n else:\n field_sep = kwargs.get(\"field_sep\")\n with open(path_to_csv, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=field_sep, fieldnames=header)\n for record in csv_reader:\n if list(record.values())[0].startswith(\"#\") is not True:\n # IT'S A COMMENT IF IT STARTS WITH \"#\" \n # IF THIS IS YOUR HEADER ROW, SUPPLY A LIST OF COLUMN NAMES WHEN CALLING THE FUNCTION\n row_list.append(record)\n return row_list", "def csv_dict_reader(file_obj):\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n print(line[\"first_name\"]),\n print(line[\"last_name\"])", "def parse_csv_input(input_file): # {{{\n parsed_infile = []\n try:\n with open(input_file) as infile:\n for line in csv.reader(infile):\n parsed_infile.append(line)\n\n temp_object_storage = []\n\n for line_index, line in enumerate(parsed_infile[1:]):\n temp_object_storage.append({})\n for category_index, category in enumerate(parsed_infile[0]):\n if category_index == 0:\n category = category[3:]\n temp_object_storage[line_index][category] = line[category_index]\n\n return temp_object_storage\n except FileNotFoundError as excep:\n LOGGER.info(\"error parsing csv file: %s\", excep) # }}}", "def read_csv_rows(path: str) -> list[dict[str, str]]:\n file_handle = open(\"survey\", encoding=\"utf8\")\n csv_reader = DictReader(file_handle)\n rows: list[dict[str, str]] = []\n for row in csv_reader:\n rows.append(row)\n file_handle.close()\n return rows", "def parse(raw_file, delimiter):\n\t#open csv file\n\topened_file = open(raw_file)\n\t\n\t#read csv file\n\tcsv_data = csv.reader(opened_file,delimiter=delimiter)\n\t\n\t#build parsed data\n\tparsed_data = []\n\t\n\t#define headers\n\tfields = csv_data.next()\n\t\n\t#Iterate over each row of the csv file, zip together field->value pairs\n\tfor row in csv_data:\n\t\tparsed_data.append(dict(zip(fields, row)))\n\t\n\t#close csv file\n\topened_file.close()\n\t\n\treturn parsed_data", "def read_csv(path):\r\n data = []\r\n csv_file = open(path)\r\n for row in csv.DictReader(csv_file):\r\n data.append(row)\r\n csv_file.close() \r\n return data", "def read_strong_csv(strong_meta_csv_path):\n with open(strong_meta_csv_path, 'r') as fr:\n reader = csv.reader(fr, delimiter='\\t')\n lines = list(reader)\n \n meta_dict = {}\n for line in lines:\n [audio_name, begin_time, end_time, label] = line\n meta = {'begin_time': begin_time, 'end_time': end_time, 'label': label}\n if audio_name in meta_dict:\n meta_dict[audio_name].append(meta)\n else:\n meta_dict[audio_name] = [meta]\n \n return meta_dict", "def csv_to_dict_list(file_path, char_sep=\"|\"):\n with open(file_path, mode='r') as f:\n d = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True, delimiter=char_sep)]\n return d", "def load_csv(filename):\n # Open csvfile\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n\n # Put data in gloabal list\n for row in reader:\n # Get data of subject with either or both milk and peanut allergy\n if row[\"MILK_ALG_START\"] != \"NA\" or row[\"PEANUT_ALG_START\"] != \"NA\":\n sub_list = list()\n for key in DATA_KEYS:\n sub_list.append(row[key])\n\n # Add data of subject to all data \n data_list.append(sub_list)", "def read_risposte_from_csv(csv_risposte):\n import os\n try:\n csv_in = open(os.path.join(os.path.dirname(__file__), csv_risposte), 'rb')\n r_reader = csv.DictReader(csv_in, delimiter=',')\n except IOError:\n print \"It was impossible to open file %s\" % csv_risposte\n exit(1)\n\n risposte_partiti = {}\n for row in r_reader:\n partito_id = int(row['partito_id'])\n domanda_id = int(row['domanda_id'])\n risposta = int(row['risposta_int'])\n if not partito_id in risposte_partiti:\n risposte_partiti[partito_id] = {}\n risposte_partiti[partito_id][domanda_id] = risposta\n\n return risposte_partiti", "def parse_csv_file(file_path):\n\n complete_data_list = []\n\n try:\n import_file = open(file_path, \"rb\")\n\n except IOError:\n print 'An error occured trying to read the file.'\n\n else:\n reader_file = csv.DictReader(import_file)\n complete_data_list = get_file_data(reader_file)\n import_file.close()\n\n return complete_data_list", "def read_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[0]] = item[1]\r\n return dictionaryoutput", "def read_csv(csv_file):\r\n with open(csv_file, \"r\") as files:\r\n data = csv.reader(files)\r\n return list(data)", "def get_dictionary_from_csv(file):\n csv_file = file[:-4] # avoid .txt extension\n csv_file += \"_dico.csv\"\n dic = pd.read_csv(csv_file, delimiter=',')\n return list(dic.columns)", "def _csv_to_dict(name):\n csv_path = _get_csv_path(name)\n result = []\n with open(csv_path) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n result.append(row)\n return result", "def load_from_file_csv(cls):\n fields = []\n rows = []\n new_dict = {}\n new_list = []\n key = \"\"\n filename = cls.__name__ + \".csv\"\n with open(filename) as fp:\n reader = csv.reader(fp)\n fields = next(reader)\n for row in reader:\n rows.append(row)\n for row in rows:\n i = 0\n new_dict = new_dict.fromkeys(fields)\n for attr in fields:\n key = fields[i]\n value = row[i]\n new_dict[key] = value\n i += 1\n new_list.append(cls.create(**new_dict))\n return new_list", "def read_csv_rows(filename: str) -> list[dict[str, str]]:\n result: list[dict[str, str]] = []\n \n \"\"\"Open a handle to the data file.\"\"\"\n file_handle = open(filename, \"r\", encoding=\"utf8\")\n\n \"\"\"Prepare to read the data in the file as CSV instead of just strings.\"\"\"\n csv_reader = DictReader(file_handle)\n\n \"\"\"Read each row of the CSV line by line.\"\"\"\n for row in csv_reader:\n result.append(row)\n\n \"\"\"Close file.\"\"\"\n file_handle.close()\n\n return result", "def csvToDict(filepath):\n data = []\n with open(getcwd() + filepath, 'r') as dataset:\n assert csv.Sniffer().has_header(dataset.read(9999)), 'No headers'\n dataset.seek(0)\n dialect = csv.Sniffer().sniff(dataset.read(99999))\n dataset.seek(0)\n reader = csv.DictReader(dataset, dialect=dialect)\n headers = reader.fieldnames\n for row in reader:\n data.append(row)\n\n data = assert_data_format(data)[0]\n\n return data, headers", "def parse_trick_ascii(csv_file):\n data_file = csv.DictReader(open(csv_file))\n single_run_data_dict = {'altitude' : [0.0],\n 'latitude' : [0.0],\n 'longitude' : [0.0]}\n # Your code here\n # ...\n # return the dict\n return single_run_data_dict", "def read_csv(filename, delimiter=','):\n data = []\n try:\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=delimiter)\n try:\n keys = reader.fieldnames\n for row in reader:\n data.append(row)\n except csv.Error as e:\n sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e))\n except IOError as e:\n sys.exit('%s does not exist' % e) \n return data", "def main3():\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #use DictReader method from csv module\r\n csv_reader = csv.DictReader(csvfile1)\r\n #read the lines\r\n for line in csv_reader:\r\n print(line['email'])", "def read_csv(csv_file):\r\n reader = csv.DictReader(csv_file)\r\n if 'PERSON_ID' in reader.fieldnames:\r\n key = 'PERSON_ID'\r\n elif 'LogonName' in reader.fieldnames:\r\n key = 'LogonName'\r\n else:\r\n raise 'no username field specified in the file'\r\n user_list = []\r\n for item in reader:\r\n try:\r\n uid = item[key]\r\n assert uid\r\n user_list.append(uid)\r\n except:\r\n warning('empty username')\r\n if not user_list:\r\n raise 'empty list of users'\r\n return user_list", "def load_data(filepath):\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n yield row", "def load_data_from_csv(csv_file, users_to_i = {}, items_to_i = {}):\n raw_data = []\n with open(csv_file) as f:\n csvreader = csv.reader(f)\n # skipping first row (header)\n next(csvreader)\n for user, item in csvreader:\n raw_data.append((user, item))\n return load_data_from_array(raw_data, users_to_i, items_to_i)", "def get_csv_data(file_name: str) -> Iterator[list]:\n with open(file_name) as f:\n # creating a reader instance that can iterate over rows of file.\n reader = DictReader(f)\n\n # iterating over rows:\n for row in reader:\n yield dict(row) # returning the dicts for each row in the dataset.", "def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary", "def save_csv_into_dictionary(csv_file):\n\n dictionary = OrderedDict()\n with open(csv_file, newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n dictionary[row[0]] = row[1]\n return dictionary", "def csv_to_dict(filename):\n\twith open(filename, 'r') as in_hndl:\n\t\tindict = [i for i in csv.DictReader(in_hndl)]\n\treturn indict[0]", "def load_csv(input):\n with open(input['csv'], 'r', encoding=input['encoding']) as f:\n invoice_dict = dict()\n reader = csv.reader(f, delimiter=';')\n\n for row in reader:\n invoice_id = row[0]\n\n if invoice_id in invoice_dict:\n invoice_dict[invoice_id].add_entry(row[1:])\n else:\n invoice_dict[invoice_id] = Invoice(row)\n\n return invoice_dict", "def read_csv():", "def readcsv(csvfile):\n logger = log.getLogger('obslog.readcsv')\n\n if not os.path.exists(csvfile):\n logger.error('Cannot access %s', csvfile)\n raise SystemExit\n\n data = {}\n with open(csvfile, mode='r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n data[row['FITSFILE']] = row\n logger.debug('Data: %s', data)\n return data", "def read_file(filename):\n with open(filename) as fp:\n reader = csv.DictReader(fp)\n return list(reader)", "def csv_to_dict(csvfile, delimiter=\",\", quotechar='\"'):\n reader = csv.DictReader(csvfile, delimiter=delimiter, quotechar=quotechar)\n\n data = {each: [] for each in reader.fieldnames}\n for i, row in enumerate(reader):\n for key, value in row.items():\n data[key].append(value)\n return data", "def read_name_map( name_map_path) :\n with open( name_map_path, newline=\"\") as csvfile:\n table = { }\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) < 2:\n continue\n if row[key_col] == key_header:\n continue\n key = row[key_col]\n val = row[val_col]\n table[key] = val\n return table", "def parse(raw_file, delimiter):\n\n opened_file = open(raw_file, 'rU')\n csv_data = csv.reader(opened_file, delimiter=delimiter)\n\n parsed_data = []\n\n fields = csv_data.next()\n\n for row in csv_data:\n parsed_data.append(dict(zip(fields,row)))\n\n opened_file.close()\n\n return parsed_data", "def csv2dict(filename):\n dis_dict = {}\n with open(filename) as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n el_a = row[\"Element Name\"]\n dis_dict[el_a] = {}\n for entry in row:\n if entry != \"Element Name\":\n dis_dict[el_a][entry] = float(row[entry])\n csvfile.close()\n return dis_dict", "def get_rows(csv):\n\n labels = csv[0][2:].split(',')\n\n # Convert each row into a hash of label: value\n return [dict(zip(labels, row.split(','))) for row in csv[1:]]", "def parse(file, delimiter):\n\n\t# Open CSV\n\topened_file = open(file)\n\n\t# Read CSV\n\tdata = csv.reader(opened_file, delimiter=delimiter)\n\n\t# Build data structure\n\tparsed_data = []\n\tfields = next(data)\n\tfor row in data:\n\t\tparsed_data.append(dict(zip(fields, row)))\n\n\t# Close CSV\n\topened_file.close()\n\n\treturn parsed_data", "def read_file(filename):\n\n data = {}\n with open(filename, encoding=\"utf8\") as file:\n reader = csv.DictReader(file)\n for line in reader:\n data[line['id']] = line\n return data", "def read_csv(csv_file_path):\n res = [] #list\n # f = open(csv_file_path) #read file\n with open(csv_file_path,\"r\") as f:", "def _process_csv_data(csv_file, user_data_map):\n with open(csv_file, 'r') as csvfile:\n rows = csv.reader(csvfile)\n for row in rows:\n if len(row) < 2:\n print('The CSV file is not in expected format.')\n raise Exception\n user_data_map[row[1].lower()] = row[0]", "def make_json(csvfile):\n\n data = []\n with open(csvfile, encoding='utf-8') as csvf:\n csvreader = csv.DictReader(csvf)\n for rows in csvreader:\n data.append(rows)\n\n return data", "def parse(csvfilename):\n table = []\n with open(csvfilename, \"r\") as csvfile:\n for line in csvfile:\n line = line.rstrip()\n columns = line.split(',')\n table.append(columns)\n return table", "def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d", "def read_from_csvfile(fname, types, header=True):\n num_columns = len(types)\n parsed = []\n with open(fname) as f:\n reader = csv.reader(f)\n if header:\n columns = next(reader)\n else:\n columns = ['col '+str(i) for i in range(num_columns)]\n\n for row in reader:\n parsed.append([types[i](row[i]) if row[i] else None for i in range(num_columns)])\n\n return [{k:v for k,v in zip(columns, row)} for row in parsed]", "def read_csv_file(in_file):\n out_list = []\n with open(in_file, 'r', newline='') as p_file:\n file_list = csv.DictReader(p_file, delimiter=',')\n for row in file_list:\n out_list.append(row)\n return out_list\n #thequeue.put(out_list)", "def load_data(filename='KSI.csv'):\r\n d = []\r\n with open(filename) as csv_file:\r\n # csv_reader = csv.reader(csv_file, delimiter=',')\r\n csv_reader = csv.DictReader(csv_file, delimiter=',')\r\n for line_count, row in enumerate(csv_reader):\r\n if line_count == 0:\r\n print(f'Column names are \\n{\", \".join(row)}')\r\n # column_names = row\r\n else:\r\n d.append(row)\r\n # print(f'Processed {line_count} lines.')\r\n return d", "def parse_csv_from_file(file):\n csvFileReader = None\n employee_data = []\n\n # if FileStorage object (which has a save() method)\n if hasattr(file, 'save'):\n csvFileReader = csv.reader(codecs.iterdecode(file, 'utf-8'))\n # else if File object (which does not have a save() method)\n else:\n csvFileReader = csv.reader(file)\n\n for row in csvFileReader:\n employee_data.append({\n 'first_name': row[0],\n 'last_name': row[1],\n 'annual_salary': int(row[2]),\n 'super_rate': convert_to_float(row[3]),\n 'payment_period': row[4]\n })\n\n return employee_data", "def csv_dict_reader(file_obj, data = [], cost = []):\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n data.append(line[\"Дата\"]),\n cost.append(line[\"Расход\"])", "def read_tags(csv_path):\n tags = {}\n with open(csv_path, 'r') as csv_file:\n reader = csv.DictReader(csv_file)\n for row in reader:\n tags[row['image_name']] = row['tags'].split()\n return tags", "def load_data_from_csv(csv_file):\n list=[]\n\n with open(csv_file) as csv_1:\n csv_out = csv.reader(csv_1) \n next(csv_out)\n for rows in csv_out: \n if len(rows) != 0:\n list.append([rows[0],int(rows[1]),int(rows[2])])\n \n return (list)", "def get_data_from_csv(csv_file):\n # create customer list form csv file\n with open(csv_file, encoding='utf-8', errors='ignore') as people:\n customer_reader = csv.reader(people)\n customers = [row for row in customer_reader]\n\n return customers", "def read_csv(path):\r\n output = []\r\n for row in csv.DictReader(open(path)):\r\n output.append(row)\r\n return output", "def load_csv(path: Path) -> Any:\n with open(path, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n items = list(reader)\n return items", "def read_csv(path):\n output = []\n for row in csv.DictReader(open(path)):\n output.append(row)\n return output", "def read_csv_file(csv_file_path):\n file_names = []\n file_labels = []\n with open(csv_file_path, 'r') as files_path:\n path_list = csv.DictReader(files_path)\n fieldnames = path_list.fieldnames\n for path in path_list:\n file_names.append(path[fieldnames[0]])\n file_labels.append(path[fieldnames[1]])\n return file_names, file_labels", "def process_csv(csv_file=None):\n if csv_file:\n _process_csv_data(csv_file, USER_DATA_MAP)\n else:\n csv_files_list = [os.path.join(DATA_DIR, f) for f in os.listdir(DATA_DIR) if f.endswith('.csv')]\n for fl in csv_files_list:\n _process_csv_data(fl, USER_DATA_MAP)\n return USER_DATA_MAP", "def dictionary_formation():\r\n sales_data = {}\r\n with open('beer_data.csv', \"r\") as data_file:\r\n file_contents = csv.reader(data_file, delimiter=',')\r\n #Value of lines_read used as key value for each dictionary\r\n #in sales_data\r\n lines_read = 1\r\n for line in file_contents:\r\n if lines_read == 1:\r\n lines_read = lines_read + 1\r\n else:\r\n #Stores each column in row as key value in dictionary\r\n sales_data[str(lines_read)] = {\r\n \"invoice_number\": line[0],\r\n \"customer\": line[1],\r\n \"date_required\": line[2],\r\n \"recipe\": line[3],\r\n \"gyle_number\": line[4],\r\n \"quantity_ordered\": int(line[5])\r\n }\r\n lines_read = lines_read + 1\r\n data_file.close()\r\n return sales_data", "def load(filename):\n with open(filename,'r') as fd:\n csv_in = csv.reader(fd, delimiter=',', quotechar='\"')\n keys = csv_in.next()\n data = {k:[] for k in keys}\n for row in csv_in:\n for k,v in zip(keys,row):\n data[k].append(float(v))\n return data", "def parse_prefeaturized_csv_data(features_file_path):\n objects = []\n with open(features_file_path) as f:\n # First line contains column titles\n keys = f.readline().strip().split(',')\n for line in f:\n vals = line.strip().split(\",\")\n if len(vals) != len(keys):\n continue\n else:\n objects.append({})\n for i in range(len(keys)):\n objects[-1][keys[i]] = vals[i]\n return objects", "def read_sailor_data(filename):\n\td=OrderedDict()\n\twith open(filename) as csvfile:\n\t\trdr = csv.reader(csvfile)\t\n\t\tfor i in rdr:\n\t\t\t#This except is so that if the line trying to be inputted into the dictionary is a string\n\t\t\t#It will ignore it and go to the next line\n\t\t\ttry: d[i[0]]=(float(i[1]),float(i[2]))\n\t\t\texcept: None\n\treturn d", "def load_csv(filepath):\n log.debug('Loading csv')\n with open(filepath) as csvfile:\n reader = csv.DictReader(csvfile)\n return reader.fieldnames, list(reader)", "def load_csv_to_dict(filename):\n row_len = list()\n result = dict()\n with open(filename, 'r') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n key = row[0].strip()\n values = [v.strip() for v in row[1:]]\n result[key] = values\n row_len.append(len(values))\n return result, max(row_len)", "def load_csv(filename):\n results = defaultdict(list)\n with open(filename, 'r') as f:\n reader = csv.DictReader(f)\n for line in reader:\n results[line['sid']].append(line)\n return results", "def readCSV(filename, separator):\n \n filetoread = open(filename, \"r\")\n lines = []\n for line in filetoread:\n line = line.replace(\"\\n\", \"\").split(separator)\n lines.append(line)\n keys, values = lines[0], lines[1]\n dictionary = {}\n for i in range(0,len(keys)):\n try:\n dictionary[keys[i]] = int(values[i])\n except:\n dictionary[keys[i]] = values[i]\n return dictionary", "def read_csv(filename):\n with open(filename, 'rb') as f:\n reader = unicodecsv.DictReader(f)\n return list(reader)", "def populate_table_from_csv(csv_file, csv_encoding='iso-8859-15'):\n try:\n with open(file=csv_file, mode='r', encoding=csv_encoding) as input_file:\n # Could find a good place to add iterators/generators/comprehensions elsewhere, so made a new function\n # Also, yet another pylint false positive. The below line isn't supposed to be assigned to anything.\n [add_customer(*l.split(',')) for l in input_file if 'Id,Name,Last_name,' not in l] # pylint: disable=W0106\n except Exception as e:\n logger.error(\"Failed to load records from csv file %s into database %s: %s\", csv_file, customer_db.database, e)", "def read_data(options):\n reader = csv.reader(open(options.datafile, \"U\"))\n raw_rows = list(reader)\n fieldnames = raw_rows[0]\n rows = []\n for raw_row in raw_rows[1:]:\n row = {}\n try:\n for i in range(len(fieldnames)):\n row[fieldnames[i]] = raw_row[i]\n rows.append(row)\n except IndexError:\n print \"SHORT ROW:\", raw_row\n print \"Read %d rows\" % len(rows)\n return rows", "def parse_csv(csv_file):\n if os.path.isfile(csv_file) == True:\n num_lines = sum(1 for line in open(csv_file))\n if num_lines > 1:\n try:\n data = pd.read_csv(csv_file, index_col=False)\n data.insert(0, 'id', range(1, 1 + len(data)))\n return(data)\n except pd.parser.CParserError, err:\n message = \"Can't parse REDCap data. Check CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(3)\n else:\n message = \"CSV file does not contain data: \" + csv_file\n print(message)\n logging.warning(message)\n return(None)\n else:\n message = \"Can't read CSV file: \" + csv_file\n print(message)\n logging.critical(message)\n exit(4)", "def load_csv(file):\n import csv\n reader = csv.reader(open(file, 'r'))\n columns = reader.next()\n c2i = dict((columns[i], i) for i in range(len(columns)))\n data = {}\n excluded = set([REP_CSV_HED_TIM, REP_CSV_HED_HER])\n for row in reader:\n \n # get relevant info from the line\n time = float(row[c2i[REP_CSV_HED_TIM]])\n hero = row[c2i[REP_CSV_HED_HER]]\n other = dict((c, REP_CSV_HANDLERS.get(c, REP_CSV_DEFHANDLER)(row[c2i[c]])) for c in columns if c not in excluded)\n \n # add to the data dictionary\n if hero not in data: data[hero] = []\n data[hero].append([time] + [other])\n \n return data", "def read_csv(filename):\n\twith open(filename, newline = '') as filehandle:\n\t\t\n\t\t# create DictReader objects for inputs and read into memory\n\t\treader = csv.DictReader(filehandle, delimiter = '\\t')\n\t\tdata = []\n\t\tfor row in reader:\n\t\t\tdata.append(row)\n\t\t\t\n\treturn data", "def _convert_csv_column_to_dict(csv_data, column):\n results = dict()\n\n for row in csv_data:\n key = row[0]\n data = row[1:][column]\n\n if data:\n if key not in results:\n results[key] = data.strip() if data else \"\"\n else:\n # append multiple choice questions\n results[key] += \"|{0}\".format(data.strip())\n\n return results", "def csvfileUsage(self):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.DictReader(file_obj, delimiter=',') # CSV DictReader object\n \"\"\" reader.fieldnames returns header , slicing intial 'Month' and\n 'Year' header from list\n \"\"\"\n for com_names in reader.fieldnames[2:]:\n self.company_data[com_names] = {}\n # iterating each row\n for row in reader:\n month, year = self.parse_my(row) # parsing the year and month from row\n # pop the `Month` and `Year` Key to minimize iteration below\n row.pop('Month'), row.pop('Year')\n \"\"\" saving and updating the data at same point of time\n each iteration time, checking the max value and updating \n `Month` `Year` and `Value`\n \"\"\"\n self.prepare_company_data(month, year, row, self.company_data)\n file_obj.close() # close file\n return self.company_data", "def read_file(filepath: str) -> dict:\n if not filepath.endswith(\".csv\"):\n raise RuntimeError(\"File extension must be .csv\")\n\n people = {}\n with open(filepath) as csv:\n for line in csv:\n email, person = Parser.parse_line(line.rstrip(\"\\n\"))\n if email not in people:\n people[email] = person\n else:\n print(\"Ignoring person with duplicate email {}\".format(email))\n return people", "def get_field_list(filename):\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n field_mapping = {}\n for row in reader:\n field_mapping[row[0]] = row[1]\n return field_mapping", "def read(path):\n \n file = open(path, 'r', encoding = 'utf-8')\n reader = csv.reader(file, delimiter = '\\t', quotechar = '', quoting = csv.QUOTE_NONE)\n result = []\n header = reader.__next__()\n for values in reader:\n entry = {}\n for i in range(len(header)):\n entry[header[i]] = values[i]\n result.append(entry)\n file.close()\n return result", "def read_csv_file(file, delimiter=None):\n if delimiter is None: # try delimiters ; and ,\n delimiter = [';', ',']\n else:\n delimiter = [delimiter]\n\n for delim in delimiter:\n with open(file, encoding=\"utf8\") as fh:\n rd = csv.DictReader(fh, delimiter=delim)\n runs = [dict(row) for row in rd]\n if len(runs[0].keys()) > 1:\n logging.debug('Read csv data:')\n logging.debug(runs)\n return runs", "def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"] or None,\n \"father\": row[\"father\"] or None,\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data", "def load_csv(csvpath):\n with open(csvpath, \"r\") as csvfile:\n data = []\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Skip the CSV Header\n next(csvreader)\n\n # Read the CSV data\n for row in csvreader:\n data.append(row)\n return data", "def read_movies_from_csv(filename, header):\n try:\n with open(filename, \"r\") as csvfile:\n movies = []\n reader = csv.DictReader(csvfile, header)\n for row in reader:\n movie = media.Movie(row[\"title\"],\n row[\"storyline\"],\n row[\"poster_image\"],\n row[\"trailer_youtube\"])\n movies.append(movie)\n except IOError:\n print(\"\\\"{}\\\" not found. Exiting\".format(filename))\n exit()\n else:\n if not csvfile.closed:\n csvfile.close()\n return movies", "def csv_dict_reader(file_obj):\r\n with open('heatmap_data_10_200_out.csv','wb') as file:\r\n\t reader = csv.DictReader(file_obj, delimiter=',')\r\n\t for line in reader:\r\n\t \t# data = \"{location: new google.maps.LatLng(\" + str(line[\"latitude\"]) + \", \" + str(line[\"longitude\"]) + \") , weight: \" + str(float(line[\"predicted_price\"])/1000) + \" }, \"\r\n\t \tdata = line[\"predicted_price\"] + \";\" + str(line[\"latitude\"]) + \"; \" + str(line[\"longitude\"]) \r\n\t \t# data = \"new google.maps.LatLng(\" + str(line[\"latitude\"]) + \", \" + str(line[\"longitude\"]) + \"),\"\r\n\t \tprint data\r\n\t # print(line[\"latitude\"]),\r\n\t # print(line[\"longitude\"])\r\n\r\n\t \r\n\t \tfile.write(data)\r\n\t \tfile.write('\\n')", "def parse_csv(csv_path):\n song_list = []\n\n try:\n with open(csv_path, encoding='utf-8') as playlist:\n print(\"Parsing \" + csv_path)\n reader = csv.reader(playlist, delimiter=',')\n next(reader) # skip csv header\n for row in reader:\n song_list.append(row[2] + \" - \" + row[1])\n # todo: parse CSV, then check to see which songs already exist in current dir\n # move non-existent results to new list and return that\n except IndexError as error:\n # consider validating playlists when parsing\n # from API on web server instead\n print(str(error))\n \n return song_list" ]
[ "0.7621891", "0.7575346", "0.74887645", "0.7393947", "0.7317759", "0.73076624", "0.7262524", "0.72616434", "0.71987087", "0.71802855", "0.71753365", "0.7114956", "0.7059726", "0.70039433", "0.6988675", "0.6970228", "0.6970228", "0.6946911", "0.694326", "0.6926613", "0.690651", "0.689248", "0.6891468", "0.68752813", "0.68582946", "0.6845266", "0.682375", "0.680928", "0.68076706", "0.6799992", "0.6798913", "0.6792443", "0.67892015", "0.67817336", "0.67814535", "0.67610663", "0.6760703", "0.6754578", "0.6748893", "0.6739559", "0.6733602", "0.6727984", "0.67212546", "0.67172897", "0.6715123", "0.67090803", "0.6677984", "0.6676487", "0.6668265", "0.66385156", "0.66217774", "0.66193056", "0.6617197", "0.6610454", "0.6607548", "0.6586532", "0.6567929", "0.65585387", "0.6542254", "0.65390015", "0.6530461", "0.6529214", "0.6516734", "0.6508187", "0.65004575", "0.64837337", "0.6477936", "0.6477437", "0.647474", "0.64746004", "0.6468874", "0.64616925", "0.6459871", "0.6440052", "0.64377576", "0.64360845", "0.64360154", "0.6420435", "0.6412988", "0.63936245", "0.63892347", "0.6389114", "0.63858503", "0.638074", "0.6377633", "0.6368581", "0.63598144", "0.6359184", "0.635599", "0.6355695", "0.6348888", "0.6347076", "0.6326281", "0.6316668", "0.630983", "0.63057953", "0.63043934", "0.62968916", "0.6295421", "0.62950504" ]
0.67380226
40
This function takes any , converts it into a string via the str() function (if possible), and then writes it to a file located at . Note that this function is a general writing function. It should not use the .csv module, but rather the python write() function. This is because we want this function to write ANYTHING we give it as the parameter (in the case of this assignement, you will actually use it to write string representations of the class instances you create).
def write_to_file(filepath, data): with open(filepath, 'w') as f: f.write(str(data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def writeToCSV(self, filepath):\r\n\t\twith open(filepath, 'w') as outputFile:\r\n\t\t\toutputFile.write(str(self))", "def export_to_file(data, filename='class_data.txt', mode='a'):\n with open (filename, mode) as f:\n if mode == \"w\":\n for record in data:\n line = \",\".join(record)\n f.write(line + \"\\n\")\n elif mode == \"a\":\n line = \",\".join(data)\n f.write(line + \"\\n\")\n else:\n raise ValueError('Wrong write mode')", "def writeToFile(self, output):\n f = open(output, 'w')\n data = [self.columns, self.chars, self.pwdLength, self.func.__name__]\n data = [str(x) for x in data]\n f.write(\" \".join(data))\n f.write(\"\\n\")\n f.write(repr(self))\n f.close()", "def CSVWriter (iterable, outLoc, header=\"\", ):\n if not iterable:\n print (\"nothing to write\")\n return 0\n\n out = open(outLoc, 'w')\n\n if header:\n out.write(header+'\\n')\n\n #Only works if iterable is a nested list\n for member in iterable:\n for item in member:\n out.write(str(item)+',')\n out.write('\\n')\n\n print(\"write to \"+outLoc+\" successful.\")\n return 1", "def create_writer():\n # python 2 and 3 handle writing files differently\n if sys.version_info[0] <= 2:\n output = io.BytesIO()\n else:\n output = io.StringIO()\n writer = csv.writer(output)\n return writer, output", "def write(self, str: str, /) -> None:", "def writeCSV(filename, separator, data):\n \n filetowrite = open(filename, \"w\")\n values = []\n i = 0 #Count the number of objects already written\n for item in data:\n filetowrite.write(item)\n i += 1\n if i < len(data.keys()):\n filetowrite.write(separator)\n values.append(data[item])\n filetowrite.write(\"\\n\")\n i = 0\n for value in values:\n filetowrite.write(str(value))\n i += 1\n if i < len(values):\n filetowrite.write(separator)\n \n filetowrite.close()", "def save_data(self, data, weight_class, output_path=None):\n if not output_path:\n output_path = self.output_path\n if not os.path.exists(output_path):\n os.makedirs(output_path)\n \n print(\"saving data for \" + weight_class)\n data.to_csv(output_path + weight_class + \".txt\", sep=\"|\", index=False)", "def write_to_file(self, filename: str) -> None:", "def _simple_write(filename, obj, fmt=None):\n string = str(obj) if fmt is None else ('{' + fmt + '}').format(obj)\n with open(filename, 'w+') as file:\n file.write(string)", "def test_to_file(self):\n with TemporaryDirectory() as tmp:\n df_test = make_simple_dataframe()\n Base = BaseDataClass.from_object(df_test)\n fp_save = os.path.join(tmp, \"test_save.csv\")\n Base.to_file(fp_save)\n assert os.path.exists(fp_save)", "def export_to_file(self, fp, *args, **kwargs):\n with open(fp, 'w') as fh:\n self._to_str(fh)", "def save(self, path, separator=\",\", encoder=lambda j,v: v):\n # csv.writer will handle str, int, float and bool:\n s = StringIO()\n w = csv.writer(s, delimiter=separator)\n w.writerows([[encode_utf8(encoder(j,v)) for j,v in enumerate(row)] for row in self])\n f = open(path, \"wb\")\n f.write(BOM_UTF8)\n f.write(s.getvalue())\n f.close()", "def csv_writer(data, path):\n\n with open(path, \"a\") as csv_file:\n\n writer = csv.writer(csv_file,delimiter=',')\n\n \n\n writer.writerow(data)", "def csvWriter(data, out_file):\n print '[+] Writing CSV output.'\n logging.info('Writing CSV to ' + out_file + '.')\n headers = ['ID', 'Name', 'Path', 'Session ID', 'Count', 'Last Used Date (UTC)', 'Focus Time (ms)', 'Focus Count']\n\n with open(out_file, 'wb') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=headers, extrasaction='ignore')\n # Writes the header from list supplied to fieldnames keyword argument\n writer.writeheader()\n\n for i, dictionary in enumerate(data):\n # Insert the 'ID' value to each dictionary in the list. Add 1 to start ID at 1 instead of 0.\n dictionary['ID'] = i + 1\n # Convert the FILETIME object in the fourth index to human readable value\n dictionary['Last Used Date (UTC)'] = fileTime(dictionary['Last Used Date (UTC)'])\n writer.writerow(dictionary)\n\n csvfile.flush()\n csvfile.close()\n msg = 'Completed writing CSV file. Program exiting successfully.'\n print '[*]', msg\n logging.info(msg)", "def write_to_file(data, method, delimiter):\r\n output_file = 'data.csv'\r\n with open(output_file, method, newline='', encoding='utf-8') as file:\r\n writer = csv.writer(file, delimiter=delimiter)\r\n writer.writerows([data])", "def save_data(csv_text, file_name=None, station_id=None, period=None,\n debug=False):\n if file_name is None and station_id is not None:\n if period is None:\n period = \"000000-000000\"\n file_name = f\"jma-{station_id}-{period}.csv\"\n elif file_name is None:\n raise ValueError(\n \"ERROR: both file_name and station_id are not specified.\")\n if debug:\n print(\"saved:\", file_name)\n with open(file_name, \"w\") as fd:\n fd.write(csv_text)", "def writeFile( str_, *args ):\n filePath = path.join( *args )\n with open( filePath, 'w' ) as fd:\n fd.write(str_)", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)", "def to_csv_file_obj(self, rows):\n output = StringIO.StringIO()\n writer = csv.writer(output)\n writer.writerows(rows)\n return output", "def csv_writer(data, path):\n\twith open(path, \"wb\") as csv_file:\n\t\twriter= csv.writer(csv_file, delimiter=',')\n\t\twriter.writerows(data)", "def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()", "def write_csv(data, arg2, arg3):\n\n with open(\"data.csv\", newline='') as csvfile:\n # csv writer object\n writer = csv.writer(csvfile)\n\n # to write to file: writer.writerow([thing1, thing2, ...])", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def save_to_file_csv(cls, list_objs):\n with open(cls.__name__ + \".csv\", \"w\", newline='') as f:\n if cls.__name__ == \"Rectangle\":\n fieldnames = ['id', 'width', 'height', 'x', 'y']\n elif cls.__name__ == \"Square\":\n fieldnames = ['id', 'size', 'x', 'y']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n if list_objs is not None:\n for model in list_objs:\n writer.writerow(model.to_dictionary())", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def write(obj, filename):\n with open(filename, 'w') as f:\n print(*obj, sep='\\n', file=f)", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def save_class_list():\r\n try:\r\n classStringList.clear() #clear the classString List\r\n for i in range(0,len(classes)):\r\n classStringList.append(classes[i].csvRow()) #enter classes to the classStringList from the classes\r\n f = open(\"mySchedule.csv\", 'w', newline ='')\r\n csv.writer(f).writerow([\"Day\", \"Class\", \"Start Time\", \"End Time\"])\r\n for classCSVString in classStringList:\r\n csv.writer(f).writerow(classCSVString)\r\n f.close()\r\n except Exception as e:\r\n print(\"Exception found:\" + e)", "def _toFile(self):\n pass", "def write(self, file: IO) -> None:\n serializer = self.serializer_class(self.get_queryset(), many=True)\n\n writer = csv.DictWriter(file, self.serializer_class.Meta.fields)\n writer.writeheader()\n\n # Write serializer data and replace None/'' with 'NA'\n writer.writerows(\n OrderedDict(\n (\n field_name,\n \"NA\" if (field_value is None or field_value == \"\") else field_value,\n )\n for field_name, field_value in row.items()\n )\n for row in serializer.data\n )\n\n file.seek(0)", "def write_to_file(info: List[str]) -> None:\n return", "def write_CLASS_txtfile(input_file_name, data):\n output_file = open(input_file_name, 'w')\n output_file.write('Human Metabolome CLASS database')\n output_file.write('\\n\\n')\n\n for line in data:\n output_file.write(str(line) +'\\n')", "def write(self, values, file_obj, format=None):\n pass", "def save_to_file_csv(cls, list_objs):\n f_name = cls.__name__ + \".csv\"\n with open(f_name, 'w', newline='') as f:\n if list_objs is None or list_objs == []:\n f.write(\"[]\")\n\n else:\n if cls.__name__ == 'Rectangle':\n h = ['id', 'width', 'height', 'x', 'y']\n else:\n h = ['id', 'size', 'x', 'y']\n ncsv = csv.DictWriter(f, fieldnames=h)\n for obj in list_objs:\n ncsv.writerow(obj.to_dictionary())", "def write_to_files(section, csv_path, srt_path):\n write_to_csv(section, csv_path)\n write_to_srt(section, srt_path)", "def write(data):", "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "def csv_writer(data, path):\r\n with open(path, \"w\") as csv_file:\r\n writer = csv.writer(csv_file, delimiter=',')\r\n for line in data:\r\n writer.writerow(line)", "def write_out(message, fp):\n with open(fp, 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', quotechar='\"')\n writer.writerow([message])", "def csv_writer(data, path):\n with open(path, \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)", "def csv_writer(data, path):\n with open(path, \"w\", newline='') as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)", "def write(file, string, codec='utf8'):\n if isinstance(string, str):\n file.write(string)\n elif isinstance(string, unicode):\n file.write(string.encode(codec))", "def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()", "def write_data_files(self):\n # build our strings\n header_string = \"\"\n data_string = \"\"\n for value in self.data.values():\n header_string += value[2] + \",\"\n if value[0] != None:\n data_string += value[1].format(value[0])\n else:\n data_string += \",\"\n # remove the extra comma and replace with a newline\n header_string = header_string[:-1]\n header_string += \"\\n\"\n data_string = data_string[:-1]\n data_string += \"\\n\"\n \n # show what we built\n #print(header_string)\n #print(data_string)\n \n # open a temp file\n with open(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"w\") as temp_file:\n #temp_file.write(header_string)\n temp_file.write(data_string)\n \n # move to the input file\n filetools.mv(\"{:s}\\\\VWSInput\\\\temp_data.csv\".format(self.path), \"{:s}\\\\VWSInput\\\\data.csv\".format(self.path))\n \n return", "def write(self, s):\n ...", "def csv_writer(data, path):\n with open(path, \"wb\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n for line in data:\n writer.writerow(line)", "def csvWriter(asin, price, name):\n # NOT USED\n date = arrow.now().format('YYYY/MM/DD')\n headers = ['Date', 'ASIN', 'Price', 'Name']\n with open('CSVs/' + asin + '.csv', 'w') as newWrite:\n writer = csv.writer(newWrite)", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def write(self, file_variable):\n file_variable.write(\",\".join(map(str, [self.code, self.title, self.credit, self.term])))", "def save(self, data, outpath):\n data.to_csv(outpath)", "def csv_writer(filepath, seqs):\n with open(filepath, 'w') as f:\n f.write('\\n'.join([','.join(\n ['\"{}\"'.format(r) \n if (' ' in r) or (',' in r) else r\n for r in s])\n for s in seqs]))", "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def save_to_file_csv(cls, list_objs):\n ld = []\n with open(cls.__name__ + \".csv\", \"w\", encoding=\"utf-8\") as f:\n if list_objs:\n for obj in list_objs:\n if cls.__name__ == 'Rectangle':\n ld.append([\n obj.id, obj.width, obj.height, obj.x, obj.y])\n if cls.__name__ == 'Square':\n ld.append([obj.id, obj.size, obj.x, obj.y])\n writer = csv.writer(f)\n for row in ld:\n writer.writerow(row)", "def dataSave():\n # NR5G = gui_reader()\n try: #Python3\n f = open(__file__ + \".csv\",'wt', encoding='utf-8')\n except:\n f = open(__file__ + \".csv\",'wb')\n f.write('%s,'%(entryCol.entry0.get()))\n f.write('%s,'%(entryCol.entry1.get()))\n f.write('%s,'%(entryCol.entry2.get()))\n f.write('%s,'%(entryCol.entry3.get()))\n f.close()\n print(\"DataSave: File Saved\")", "def save_data_to_file(file_name, list_of_product_objects):\r\n objfile = open(file_name, 'w')\r\n for row in list_of_product_objects:\r\n objfile.write(row.product_name + \",\" + str(row.product_price) + \"\\n\")\r\n objfile.close()", "def csv_writer(file_path, data):\n with open(file_path, \"a+\") as f:\n #writer = csv.writer(f, delimiter=',')\n writer = csv.writer(f, lineterminator='\\n')\n writer.writerows(data)\n f.close()", "def csv_writer(data, path, arr):\n with open(path, \"w\", newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames = arr)\n for line in data:\n writer.writerow(line)", "def writeToFile(ruleset, className, classValue, fp):\n size = len(ruleset)\n if (size != 0):\n for i in range(size):\n j=0\n while(j<len(ruleset[i])-1):\n fp.write(str(ruleset[i][j]).replace(\"'\", \"\")+' & ')\n j = j+1\n fp.write(str(ruleset[i][j]).replace(\"'\", \"\")+\" -> (\"+className+\", \"+classValue+\") \\n\")\n fp.close()", "def writeTypedClassadAttrToFile(fd, attr_name, attr_value):\n if isinstance(attr_value, (int, long, float)):\n # don't quote numeric values\n fd.write('%s = %s\\n' % (attr_name, attr_value))\n else:\n escaped_value = string.replace(string.replace(str(attr_value), '\"', '\\\\\"'), '\\n', '\\\\n')\n fd.write('%s = \"%s\"\\n' % (attr_name, escaped_value))", "def fs_write(obj, file_path):\n try:\n with open(str(file_path), 'w') as f:\n f.write(obj)\n return obj\n except TypeError as e:\n raise e", "def to_file(self, filename=None):\n name = None\n if filename is not None:\n name = filename\n elif self.name:\n name = self.name\n\n if name:\n #f = open(self.name, 'w')\n f = codecs.open(name, 'w', encoding='utf-8')\n self.seek(0)\n f.write(self.read())\n f.close()\n else:\n print \"No log_name for this log\"", "def _write_to_file(self, string):\n with open(self.p.base_dir + '/' + self.p.filename, 'w') as f:\n f.write(string)", "def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()", "def write(self, file_obj, file_format):\n if ( file_format.upper() == 'FASTA' ):\n write_func = write_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # write_func = write_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # write_func = write_phylip\n #elif ( file_format.upper() == 'COMPACT' ):\n # write_func = write_compact \n #elif ( file_format.upper() == 'COMPACT2' ):\n # write_func = write_compact2 \n #elif ( file_format.upper() == 'COMPACT3' ):\n # write_func = write_compact3\n else:\n write_func = write_fasta\n write_func(self, file_obj)", "def create_csvWriter(csvFile):\n csvWriter = csv.writer(csvFile)\n csvWriter.writerow(['College Name',\n 'City',\n 'State',\n 'ZipCode',\n 'Selectivity',\n 'file_name'])\n return csvWriter", "def _file_writer(file, extension, output_data, headers):\n if extension == 'csv':\n separator = ','\n else:\n separator = ' '\n writer = csv.writer(file, delimiter=separator, quotechar='|')\n if headers is True:\n writer.writerow(['Login', 'P/N', 'Password', 'P/N'])\n writer.writerows(output_data)", "def write (self, path):\n\t\ts=[]; add=s.append\n\t\tadd ('\\t'.join (self.schema))\n\t\tfor record in self.data:\n\t\t\tadd (record.asTabDelimitedRecord())\n\t\t\n\t\t# f = open (path, 'w')\n\t\tf = codecs.open(path, 'w', 'utf-8')\n\t\tf.write (self.linesep.join (s))\n\t\tf.close()\n\t\tprint (\"data written to \" + path)", "def _CsvFunc(self, obj=None, verbose=False, use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(obj, verbose=verbose, recursive=False,\n format_name='csv')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)", "def object_export_save(simulation, object_name, dir):\n query = get_query(object_name, simulation)\n # To avoid conflict if two users export a file at the same time, we\n # generate a random name for the export file.\n filename = dir + '/' + object_name + 's.tsv'\n\n with codecs.open(filename, 'w', encoding='utf8') as f:\n if object_name == 'centroid':\n filename = dir + '/zones.tsv'\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'crossing':\n filename = dir + '/Intersections.tsv'\n fields = ['id', 'name', 'x', 'y', 'db_id']\n elif object_name == 'link':\n filename = dir + '/links.tsv'\n fields = ['id', 'name', 'origin', 'destination', 'lanes', 'length',\n 'speed', 'capacity', 'vdf']\n elif object_name == 'function':\n filename = dir + '/functions.tsv'\n fields = ['id', 'expression']\n writer = csv.writer(f, delimiter='\\t')\n if object_name in ('centroid', 'crossing'):\n writer.writerow(['id', 'name', 'x', 'y', 'db_id'])\n values = query.values_list('user_id', 'name', 'x', 'y', 'id')\n elif object_name == 'function':\n writer.writerow(['id', 'name', 'expression'])\n values = query.values_list('user_id', 'name', 'expression')\n elif object_name == 'link':\n writer.writerow(['id', 'name', 'lanes', 'length', 'speed',\n 'capacity', 'function', 'origin', 'destination'])\n values = query.values_list('user_id', 'name', 'lanes', 'length',\n 'speed', 'capacity', 'vdf__user_id')\n # Origin and destination id must be converted to user_id.\n centroids = get_query('centroid', simulation)\n crossings = get_query('crossing', simulation)\n ids = list(centroids.values_list('id', 'user_id'))\n ids += list(crossings.values_list('id', 'user_id'))\n # Map id of nodes to their user_id.\n id_mapping = dict(ids)\n origins = query.values_list('origin', flat=True)\n origins = np.array([id_mapping[n] for n in origins])\n destinations = query.values_list('destination', flat=True)\n destinations = np.array([id_mapping[n] for n in destinations])\n # Add origin and destination user ids to the values array.\n origins = np.transpose([origins])\n destinations = np.transpose([destinations])\n if values:\n values = np.hstack([values, origins, destinations])\n writer.writerows(values)\n\n return filename", "def write(self, filename, data):\n raise NotImplementedError", "def string_to_file(path_to_file, string_to_write):\n\t\twith open(path_to_file, 'w+') as f:\n\t\t\tf.write(string_to_write)", "def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))", "def write(self, object, content_type, to_file):\n return to_file", "def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()", "def apple_search_csv_writer(file: IO, data: str) -> callable:\n file_writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n file_writer.writerows(data)\n return None", "def csvWrite(self, data, csvFileName):\n\twith open(csvFileName, 'w') as csv_file:\n\t\twriter = csv.writer(csv_file)\n\t\t\tfor key, value in data.items():\n\t\t\t\twriter.writerow([key,value])", "def save_csv(filename, rows):\n with open(filename, 'w', newline='', encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([\n 'title', 'runtime', 'genre(s)', 'director(s)', 'writer(s)',\n 'actor(s)', 'rating(s)', 'number of rating(s)'\n ])\n\n writer.writerows(rows)", "def to_file(self, outfile):\n\n with open(outfile, \"w\") as outf:\n outf.write(self.to_string())", "def _writeRecord(self, path, name, data):\n file_path = os.path.join(path, name)\n with open(file_path, 'w') as f:\n for item in data:\n f.write(str(item)+'\\t')\n f.write('\\n')", "def write(self, filename):\n pass", "def write(self, filename):\n pass", "def write_table_to_file(table):\n with open(\"story.csv\", \"w\") as file:\n for record in table:\n row = ';'.join(record)\n file.write(row + \"\\n\")", "def save(string, file):\n\n save_file = open(file, 'w')\n save_file.write(string)\n save_file.close()", "def save_to_file_csv(cls, list_objs):\n r_fields = ['id', 'width', 'height', 'x', 'y']\n s_fields = ['id', 'size', 'x', 'y']\n filename = cls.__name__ + \".csv\"\n new_list = []\n with open(filename, \"w\") as fp:\n if cls.__name__ == \"Rectangle\":\n dict_writer = csv.DictWriter(fp, fieldnames=r_fields)\n elif cls.__name__ == \"Square\":\n dict_writer = csv.DictWriter(fp, fieldnames=s_fields)\n dict_writer.writeheader()\n for objs in list_objs:\n dict_writer.writerow(objs.to_dictionary())", "def write(self, out):", "def write(self):", "def write(self):", "def save_to_csv(today, task, description, hours, start_time, end_time):\n fee = '$5'\n with open('timeTracker.csv', 'a', newline='') as file:\n fieldnames = ['Date', 'Task Name', 'Description', 'Start Time',\n 'End Time', 'Number of hours', 'Price per hour', 'Fee Charged']\n writer = csv.DictWriter(file, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow({'Date': today, 'Task Name': task, 'Description': description, 'Start Time': start_time, 'End Time': end_time,\n 'Number of hours': hours, 'Price per hour': fee, 'Fee Charged': price})", "def write( data ):", "def write_into_csv(self, loc_details=[], itype='atm', mode='w'): \n \n if itype==\"brc\":\n csvfile_name = self.branch_file\n headers = self.branch_headers\n else:\n csvfile_name = self.atm_file\n headers = self.atm_headers\n\n with open(csvfile_name, mode, newline='') as csvfile:\n locwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n if mode=='w':\n locwriter.writerow(headers) \n\n for loc in loc_details:\n locwriter.writerow(loc)", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def setup_writer(fileid, postfix):\n # we dump episode num, step, total reward, and \n # number of episodes solved in a csv file for analysis\n csvfilename = \"%s.csv\" % fileid\n csvfilename = os.path.join(postfix, csvfilename)\n csvfile = open(csvfilename, 'w', 1)\n writer = csv.writer(csvfile,\n delimiter=',',\n quoting=csv.QUOTE_NONNUMERIC)\n writer.writerow(['Episode',\n 'Step',\n 'Total Reward',\n 'Number of Episodes Solved'])\n\n return csvfile, writer", "def write(self, fname):\n pass", "def write_data():", "def write_file(data,file_name):\r\n\twith open(file_name,'wb') as new_csv_file:\r\n\t\twrtr = writer(new_csv_file)\r\n\t\tfor row in data:\r\n\t\t\twrtr.writerow(row)", "def write_csv(reviewer_data, file_obj):\n writer = csv.writer(file_obj)\n writer.writerow(\n ('Reviewer', 'Reviews', '-2', '-1', '+1', '+2', '+A', '+/- %',\n 'Disagreements', 'Disagreement%'))\n for (name, r_data, d_data) in reviewer_data:\n row = (name,) + r_data + d_data\n writer.writerow(row)", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)" ]
[ "0.6876169", "0.6498835", "0.63612753", "0.6353106", "0.633464", "0.62842685", "0.61852163", "0.60895437", "0.6051628", "0.6050783", "0.60384357", "0.60274184", "0.59963906", "0.5985917", "0.59784335", "0.5975056", "0.596307", "0.59575045", "0.59389687", "0.5937647", "0.59298587", "0.5923549", "0.5917342", "0.59139335", "0.5905097", "0.5889366", "0.5875775", "0.5874494", "0.5873106", "0.5869246", "0.5868335", "0.5859966", "0.58496827", "0.5844759", "0.5839364", "0.5829087", "0.5824984", "0.5824279", "0.5823811", "0.5823558", "0.58217937", "0.58192116", "0.5816786", "0.5814687", "0.5810711", "0.5804529", "0.58033514", "0.5794972", "0.57853484", "0.5776491", "0.57708126", "0.57669604", "0.5766498", "0.5758647", "0.5754421", "0.57459426", "0.5744092", "0.5737706", "0.5727849", "0.57274437", "0.57204664", "0.57124937", "0.5710953", "0.5709139", "0.57053334", "0.57017285", "0.569754", "0.5696463", "0.5693235", "0.5691548", "0.5691433", "0.5689473", "0.5685731", "0.5685478", "0.56775135", "0.56706", "0.5666054", "0.5664446", "0.5662544", "0.56617826", "0.5658255", "0.565159", "0.5651257", "0.5651257", "0.56439537", "0.56363916", "0.5636378", "0.563377", "0.5630346", "0.5630346", "0.56281847", "0.5620427", "0.56194115", "0.56191903", "0.56141335", "0.56118137", "0.56108606", "0.5608029", "0.56076324", "0.5605706" ]
0.5933413
20
In this function, you will instantiate several times, given the data provided. Then, you will open "sh_additional_info.csv" and for each line in that file, perform an operation using one of the methods of one of your classes. Follow the commented instructions in this main() function. Refer to Problem Set 07 README.md for instructions and tips.
def main(): # Refer to Problem Set 07 README.md for instructions and tips. # 6.1: Read in < sh_basic_info.csv > basic_info = read_csv_file('sh_basic_info.csv') # 6.2: Create instances of < SuperHeroine > heroines = {} for hero in basic_info: heroines[hero['name']] = SuperHeroine(hero['name'], hero['full_name'], hero['team'], hero['eye_color'], hero['hair_color'], hero['base']) print(heroines) # 6.3: Read in < sh_additional_info.csv > additional_info = read_csv_file('sh_additional_info.csv') # 6.4: Add powers and nemesis for row in additional_info: name = row["Heroine Name"] instance_affected = heroines[name] how_affected = row["Category"] value = row['Value'] if how_affected == 'power': instance_affected.add_power(value) else: instance_affected.add_nemesis(value) # 6.5: Write to file write_to_file('storm.txt',heroines['Storm']) write_to_file('scarlet_witch.txt',heroines['Scarlet Witch']) write_to_file('jessica_jones.txt',heroines['Jessica Jones'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n raw_data = pd.read_csv('data/raw_hospital_data.csv')\n\n fe_data = new_features(raw_data)\n fe_data = compressing_admission_type(data)\n fe_data = age_to_cat(fe_data)\n fe_data = compressing_careunit(fe_data)\n fe_data = compressing_curr_serv(fe_data)\n fe_data = compressing_ethnicity(fe_data)\n fe_data = compressing_marital_status(fe_data)\n fe_data = compressing_religion(fe_data)\n fe_data = compressing_admit_location(fe_data)\n fe_data = compress_icd9_codes(fe_data)\n\n fe_data.to_csv('data/feature_engineering_data.csv')", "def __init__(self):\n self.project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.excel_file = os.path.join(self.project_dir, \"data\", \"Literature_Data.xlsx\")\n self.spreadsheet_name = \"Individualized Data\"\n self.filled_output_file = os.path.join(self.project_dir, \"data\", \"filled_data.csv\")\n self.output_file = os.path.join(self.project_dir, \"data\", \"final.csv\")\n self.use_fake_data = False # For testing\n # This instance value \"self.df\" is the pandas DataFrame that contains all of the data\n # from the literature case studies. Manipulating this field is the purpose of this class.\n \n self.num_negative = 500\n self.df = None", "def main():\n try:\n\n logger = settings.get_logger(__name__)\n\n # if not (check_for_data()):\n # raise Exception(\"The following files were not all found: %s\"%(\"files\")) \n\n logger.info(\"*** Import data from raw files ***\")\n #load raw file\n\n logger.info(\"Load raw data file (Huge file, please be patient)...\")\n p1c1File = \"histo_7cerf_p1c1.txt\"\n df_histo_p2c1_jour = pd.read_csv(settings.raw_path + p1c1File, sep = \",\", encoding = 'utf-8', header = None,dtype={0:str,2:str,3:str}).fillna(0)\n\n #prepare sales dataframe\n logger.info(\"Droping uneccessary columns...\")\n sales_df= df_histo_p2c1_jour.drop([1,3,4,5,6],axis=1)\n\n #set headers\n logger.info(\"Setting headers info...\")\n end_date = \"01-14-2019\"\n columns = settings.row_headers\n nb_days = len(sales_df.columns) - len(columns)\n date_range = pd.date_range(end = end_date,periods = nb_days, freq='1w').strftime(\"%d/%m/%Y\")\n columns.extend(date_range)\n sales_df.columns = columns\n\n #drop Client 0\n sales_df = sales_df[sales_df[\"Client\"]!=0]\n\n #Get p1c1 keys\n p1c1 = sales_df[[\"Product\",\"Client\"]].dropna().drop_duplicates().astype(str).copy()\n\n #Product table\n logger.info(\"Loading products descriptions...\")\n product_df = get_product_df(\"product_7cerf.txt\")\n #save product season mapping\n save_product_season(product_df)\n\n #Get keys table from previous files\n p1c1p2 = p1c1.join(product_df[[\"Key_lvl2\"]],on =[\"Product\"]).dropna().set_index([\"Product\"]).astype(str)\n\n\n #save sales history\n save_p2_sales(sales_df,p1c1p2)\n \n #Get client talbe\n logger.info(\"Loading clients descriptions...\")\n client_df = get_clients_df(\"client_7cerf.txt\",columns =[\"Store Level\",\"Business Area\"] )\n cli_features = p1c1p2.join(client_df,on=\"Client\",how=\"left\").drop([\"Client\"],axis=1)\n\n \n\n #Calculate store counts\n logger.info(\"Saving store counts file...\")\n save_storecounts(cli_features,p1c1p2)\n\n \n\n #Client counts by p2\n logger.info(\"Saving clients count by product...\")\n save_clients_count(p1c1p2)\n # return True\n except Exception as err:\n print(err)\n logger.error(err)\n # return False", "def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()", "def main():\n # Load in original data\n origin_data = pd.read_csv('/Users/apple/Desktop/CSE_163/cse163_project/'\n + 'Admission_Predict_Ver1.1.csv',\n sep=r'\\s*,\\s*', header=0, encoding='ascii',\n engine='python')\n\n # Research question 1\n lasso_regression(origin_data)\n\n # Research question 2\n # We drop the 'Serial No.' column because it is unrelated to our analysis.\n df = origin_data.drop(columns=['Serial No.'])\n find_correlation(df)\n boxplots_testscores_vs_admission(df)\n\n # Research question 3\n university_rating_analysis(origin_data)", "def handle(self, *args, **options):\n try:\n # csv path argument\n fpath = options['csv_path']\n # checks if file is csv\n if fpath.lower().endswith('csv'):\n encoding = get_encoding(fpath)\n with open(fpath, encoding=encoding) as file:\n reader = csv.reader(file, delimiter=\",\")\n # iterating through each row in csv\n for row in reader:\n try:\n create_instance(row[0].strip(), row[1].strip(),\n row[2].strip())\n except IntegrityError:\n # duplicate information\n self.stderr.write(self.style.WARNING(\n \"(Warning) \"\n \"Duplicate Information: {}\".format(row)))\n except IndexError:\n # problem extracting missing information\n self.stderr.write(self.style.WARNING(\n \"(Warning) Incorrect Format: {}\".format(row)))\n except ValueError:\n # missing information\n self.stderr.write(self.style.WARNING(\n \"(Warning) Missing Value: {}\".format(row)))\n except ObjectDoesNotExist:\n # queried object does not exist\n self.stderr.write(self.style.WARNING(\n \"(Warning) \"\n \"ObjectDoesNotExist: {}\".format(row)))\n else:\n # file not csv\n self.stderr.write(self.style.ERROR(\"(Error) \"\n \"File not of type csv.\"))\n except FileNotFoundError:\n # file path does not exist\n self.stderr.write(self.style.ERROR(\"(Error) File does not exist.\"))\n except IOError:\n # error reading file\n self.stderr.write(self.style.ERROR(\"(Error) Error reading file.\"))", "def main():\n scores_file = open(\"scores.csv\")\n scores_data = scores_file.readlines()\n print(scores_data)\n subjects = scores_data[0].strip().split(\",\")\n score_values = []\n for score_line in scores_data[1:]:\n score_strings = score_line.strip().split(\",\")\n score_numbers = [int(value) for value in score_strings]\n score_values.append(score_numbers)\n scores_file.close()\n scores_by_subjects = reorganise_score(score_values)\n subject_details(scores_by_subjects, subjects)", "def run():\n options = [\"Add\", \"Remove\", \"Update\", \"Oldest person\", \"Persons closest to average\"]\n common_options = [\"Name: \", \"Year: \"]\n file = \"model/hr/persons.csv\"\n title_list = [\"Id\", \"Name\", \"Year\"]\n choice = None\n dont_clear = False\n while choice != '0':\n if not dont_clear:\n os.system(\"clear\")\n table = data_manager.get_table_from_file(file)\n terminal_view.print_table(table, title_list)\n choice = terminal_view.get_choice_submenu(options)\n dont_clear = False\n if choice == '1':\n common.add(file, common_options)\n elif choice == '2':\n common.remove(file)\n elif choice == '3':\n common.update(file, common_options)\n elif choice == '4':\n terminal_view.print_result(hr.get_oldest_person(table), \"Oldest persons:\\n\")\n dont_clear = True\n elif choice == '5':\n msg = \"Persons with age closest to average:\\n\"\n terminal_view.print_result(hr.get_persons_closest_to_average(table), msg)\n dont_clear = True\n else:\n terminal_view.print_error_message(\"There is no such choice.\")", "def main():\n filename = \"data/exercise.csv\"\n analyze(filename)", "def main():\n\n #get the csv file into a data-frame\n universities_df = pd.read_csv('universities_data.csv', encoding = 'utf-8-sig')\n universities_names_list = universities_df['name'].tolist()\n\n #get list of university objects\n url = 'http://universities.hipolabs.com/search?country=Israel'\n api_universities = Get_universities(url)\n list_of_universities = api_universities.get_universities_info()\n\n #to see if we got new entities or not for exporting to csv later..\n is_new_entities = False\n\n for university in list_of_universities:\n if university.name not in universities_names_list:\n is_new_entities = True\n universities_df= universities_df.append(pd.DataFrame({\n 'alpha_two_code': [university.alpha_two_code], \n 'country': [university.country],\n 'web_pages': [str(university.web_pages)],\n 'domains': [str(university.domains)],\n 'name': [university.name],\n 'state_province':[str(university.state_province)]}) , ignore_index = True)\n\n #export back to csv if true\n if is_new_entities: \n print('we got new entities!') \n universities_df.to_csv('universities_data.csv', encoding = 'utf-8-sig', index = False)\n else:print('no new universities for now!')", "def main():\n with open('csv_files/products.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" description{}\".format(str(i)),\n \" type{}\".format(str(i)),\n \" {}\".format(str(random.randint(1, 100)))])\n\n with open('csv_files/customers.csv', 'a') as data_file:\n # Move to the next line before appending new row to the file\n data_file.write(\"\\n\")\n data_writer = csv.writer(data_file)\n for i in range(5, 10000):\n data_writer.writerow([str(i+1), \" first_name{}\".format(str(i)),\n \" last_name{}\".format(str(i)),\n \" address{}\".format(str(i)),\n \" phone_number{}\".format(str(i)),\n \" email{}\".format(str(i))])", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def main():\n\n dataframes = importing(['admissions_data', 'patient_data',\n 'diagnoses_icd_data', 'services_data',\n 'icustays'])\n merged_data = merging_data(dataframes)\n cleaned = data_cleaning(merged_data)\n\n cleaned.to_csv('raw_hospital_data.csv')", "def __init__(self, args):\n self.verbose = args.verbose\n self.force = args.force\n self.extra = args.extra\n self.master_csv = args.master\n self.new_files = args.new_files\n self.df_mas_lab_data = None # Master Lab data\n self.df_new_lab_data = None # Aggregated new Lab data\n self.columns = [\n \"CLIA\",\n \"FACILITY_TYPE\",\n \"CERTIFICATE_TYPE\",\n \"LAB_NAME\",\n \"STREET\",\n \"CITY\",\n \"STATE\",\n \"ZIP\",\n \"PHONE\",\n ]", "def main():\n now = time.strftime('%Y%m%d%H%M%S')\n\n # info = get_info(now)\n # info_filename = 'info_' + now + '.csv'\n # info.to_csv(os.path.join('..', '..', 'data', 'raw', info_filename), index=False)\n\n questions = get_questions(now)\n\n # don't talk about all this detail in the talk", "def main():\n\n csv_file = \"shortlist.csv\"\n team_count = 0\n participant_count = 0\n\n\n #Delete all existing teams and participants from the database.\n Team.objects.all().delete()\n Participant.objects.all().delete()\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = [row for row in reader]\n\n for item in data:\n if item[0]:\n team_count += 1\n\n t = Team.objects.create(\n name=item[0].strip(),\n idea=item[30].strip()\n )\n\n no_of_p = int(item[1])\n print item[1]\n participant_count += no_of_p\n\n p1 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[2].strip() + \" \" + item[3].strip(),\n gender=item[4].strip(),\n college=item[7].strip(),\n email=item[5].strip(),\n phone=str(item[6]),\n team=t\n )\n\n p2 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[11].strip() + \" \" +item[12].strip(),\n gender=item[13].strip(),\n college=item[16].strip(),\n email=item[14].strip(),\n phone=str(item[15]),\n team=t\n )\n\n if no_of_p == 3:\n p3 = Participant.objects.create(\n participant_id=get_pid(),\n name=item[20].strip() + \" \" +item[21].strip(),\n college=item[25].strip(),\n gender=item[22].strip(),\n email=item[23].strip(),\n phone=str(item[24]),\n team=t\n )\n\n print \"{} teams and {} participants imported.\".format(team_count,\n participant_count)", "def __init__(self):\n self.file_name = 'data.csv'\n # Column of interest\n self._col = ['product_name', 'url', 'quantity', 'packaging']\n self._col += ['brands', 'origins', 'countries_fr', 'allergens']\n self._col += ['traces_fr', 'additives_n', 'additives_fr']\n self._col += ['nutrition_grade_fr', 'categories_fr']\n self._col += ['main_category_fr']\n\n # Check if the csv is already in the file\n try:\n with open(self.file_name, 'r'):\n pass\n except FileNotFoundError:\n CsvAnalysis.download_file()\n finally:\n # Read the csv file, and create a dataframe\n self.food_cat = pandas.read_csv(self.file_name,\n sep=\"\\t\",\n low_memory=False,\n usecols=self._col,\n encoding=\"utf8\")\n\n # Remove countries which aren't France\n mask = self.food_cat['countries_fr']\n self.food_cat = self.food_cat[mask == 'France']\n\n # Delete column countries_fr\n del self.food_cat['countries_fr']\n\n # Remove empty row countries_fr from dataframe\n columns = ['main_category_fr', 'product_name', 'nutrition_grade_fr']\n for column in columns:\n self.food_cat = self.food_cat[~self.food_cat[column].isnull()]\n\n # Remove empty row from product_name\n self.food_cat.sort_values(by='categories_fr')\n\n # Select the last value from categories_fr\n # to use it as a subcategory\n col = 'categories_fr'\n self.food_cat[col] = self.food_cat[col].str.split(',').str.get(-1)\n self.food_cat.sort_values(by='categories_fr')", "def __init__(self):\r\n self.filter_p_number = 3 # First one with enough data for statistics\r\n self.prfs_d = extract_settings_elvis()\r\n\r\n ccds = True\r\n filtered = False\r\n scamp = False\r\n\r\n input_df = read_csv('cats/cat_clean_ssos.csv', index_col=0)\r\n filt_cat = self.gets_filtered_catalog() # Gets data from filtered\r\n\r\n if ccds:\r\n cats_d = self.extract_cats()\r\n self.extract_stats_ccds(cats_d, input_df, filt_cat)\r\n elif filtered:\r\n self.extract_stats_filt(filt_cat, input_df)\r\n elif scamp:\r\n pass\r\n # self.extract_stats_scamp(input_df)\r\n else:\r\n pass", "def main():\n draft_class = 56\n from_csv = 'smjhl-2020-09-10.csv'\n to_csv = 'S' + str(draft_class) + '-bmi.csv'\n\n\n full_data = pd.read_csv(f\"../{from_csv}\")\n draft_class_data = full_data.loc[full_data['Draft Class Numeric'] == draft_class].copy()\n height_weight_raw = draft_class_data[[\"First Name\", \"Last Name\", \"Height\", \"Weight\"]]\n\n bmi_chart = bmi_magic(height_weight_raw)\n bmi_chart = bmi_chart.sort_values(by=[\"BMI\"], axis=0, ascending=False)\n\n # print(full_data)\n print(bmi_chart)\n bmi_chart.to_csv(to_csv)", "def main():\n\n # Read the CSV and get its content\n jobOfferList, professionsList = usefulFunctions.readCsv()\n \n # Create an empty output tab with the right number of lines and columns\n finalTab = usefulFunctions.createEmpty(jobOfferList, professionsList)\n \n # Fill the tab\n finalTab = usefulFunctions.fillTabExceptTotals(jobOfferList, professionsList, finalTab)\n \n # Update the totals \n finalTab = usefulFunctions.fillTotals(finalTab)\n \n print(\"\\nTable des métiers par profession et type de contrat : \")\n for line in finalTab:\n print(line)", "def part1():\n print('=== Starting Part 1 ===')\n data = pd.read_csv(DATA)\n\n print('Number of species:', hw2_pandas.species_count(data))\n print('Highest level pokemon:', hw2_pandas.max_level(data))\n print('Low-level Pokemon', hw2_pandas.filter_range(data, 1, 9))\n print('Average attack for fire types',\n hw2_pandas.mean_attack_for_type(data, 'fire'))\n print('Count of each Pokemon type:')\n print(hw2_pandas.count_types(data))\n print('Highest stage for each Pokemon type')\n print(hw2_pandas.highest_stage_per_type(data))\n print('Average attack for each Pokemon type')\n print(hw2_pandas.mean_attack_per_type(data))", "def test_add_data():\n add_furniture(\"invoice_file.csv\", \"Elisa Miles\", \"LR04\", \"Leather Sofa\", 25.00)\n add_furniture(\"invoice_file.csv\", \"Edward Data\", \"KT78\", \"Kitchen Table\", 10.00)\n add_furniture(\"invoice_file.csv\", \"Alex Gonzales\", \"BR02\", \"Queen Mattress\", 17.00)", "def main():\n s = content.DataFiles()\n \n date_list = generate.get_list_dates(2016, 2016, 500)\n prod_list = list(s.get_collist_by_name(os.path.join(content.data_fldr,'food','garden_produce.csv'), 'name')[0])\n \n tbl_cust = generate.TableGenerator(8, ['STRING','PEOPLE', 'PEOPLE', 'PLACE'], ['Customer ID', 'First Name', 'Surname', 'Country'])\n tbl_cust.save_table('customers.csv')\n cust_list = list(s.get_collist_by_name('customers.csv', 'Customer ID')[0])\n \n tbl_sales = generate.TableGenerator(25, [date_list, cust_list, prod_list, 'CURRENCY'], ['Date of sale', 'Customer ID', 'Product', 'Amount'])\n tbl_sales.save_table('sales.csv')", "def main():\n\n # start at loading the dataset\n data = h1bdata_loading()\n merged_data = pd.concat([data[year] for year in range(2010,2017)], ignore_index= True)\n raw_data = h1b_data(data)\n \n \n\n # Then clean the data\n #h1b_data = Clean_df(raw_data)\n #print(\"data cleaned >>>\")\n\n\n while True:\n try:\n print (\"================================ H1b Visa Approve Rate Exploring ================================\")\n print (\"\")\n print (\" How do you want to explore the H1b Data? \")\n print (\" <a> : Overview \t\t \")\n print (\" <b> : Location \")\n print (\" <c> : Industry \")\n print (\" <d> : Company \") \n print (\" You can always input 'quit' to leave the system \")\n print (\"=================================================================================================\")\n\n key = option_input()\n if key == 'a':\n overview(data)\n if key == 'b':\n location(data)\n if key == 'c':\n industry_exploring(merged_data)\n if key == 'd':\n company_exploring(merged_data)\n except wrong_option_exception:\n print (\"Invalid option, please reselect.\")", "def main(argv):\n # Question 1\n # Saves the features given in a list\n features = (argv[2].split(sep=\", \"))\n the_data = data.load_data(argv[1], features)\n statistic_functions = [sum, mean, median]\n # Saves the relevant records\n summer_data, not_summer = data.filter_by_feature(the_data, \"season\", [1])\n holiday_data, not_holiday = data.filter_by_feature(the_data, \"is_holiday\", [1])\n print(\"Question 1:\")\n print(\"Summer:\")\n data.print_details(summer_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n print(\"Holiday:\")\n data.print_details(holiday_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n print(\"All:\")\n data.print_details(the_data, [\"hum\", \"t1\", \"cnt\"], statistic_functions)\n\n # Question 2\n print(\"\\nQuestion 2\")\n print(\"If t1<=13.0, then:\")\n # Saves the relevant records\n winter_data, not_winter = data.filter_by_feature(the_data, \"season\", [3])\n w_h_data, not_w_h_data = data.filter_by_feature(winter_data, \"is_holiday\", [1])\n population_statistics(\"Winter holiday records:\", w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 0, statistic_functions[1:])\n population_statistics(\"Winter weekday records:\", not_w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 0, statistic_functions[1:])\n print(\"If t1>13.0, then:\")\n population_statistics(\"Winter holiday records:\", w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 1, statistic_functions[1:])\n population_statistics(\"Winter weekday records:\", not_w_h_data, \"t1\", [\"cnt\"], THRESHOLD, 1, statistic_functions[1:])", "def main():\n print(time.time())\n data = '../datasets/between_phase/clean_df.csv'\n print(\"Process Beginning\")\n print(\"Reading Clean CSV\")\n clean_df = pd.read_csv(data, dtype={\"journey_pattern_id\": str})\n print(clean_df.shape)\n base_table = Base_Table(clean_df)\n print(\"Adding datetime\")\n base_table.add_datetime()\n print(\"Adding Day\")\n base_table.add_day()\n print(\"Adding Hour\")\n base_table.add_hour()\n print(\"Adding Time Bin\")\n base_table.add_time_bin()\n print(\"Adding Weekend Boolean\")\n base_table.add_weekend()\n print(\"Adding Distance\")\n base_table.add_distance_feature()\n print(\"Updating Stop Id\")\n base_table.add_nearest_stop_distance()\n print(\"Filtering Data\")\n base_table.remove_null_stops()\n print(\"Adding Travel Time\")\n base_table.add_travel_time()\n print(\"Adding Congestion\")\n base_table.congestion_feature()\n bs = base_table.get_df()\n bs.to_csv('../datasets/output_files/base_table.csv')\n return bs", "def load(cls):\n \n # Loop through procedures and build patient procedure lists:\n procs = csv.reader(file(PROCEDURES_FILE,'U'),dialect='excel-tab')\n header = procs.next() \n for proc in procs:\n cls(dict(zip(header,proc))) # Create a procedure instance ", "def main():\n data = pd.read_csv('countries.csv')\n # import_data_pandas(data)\n # continent_data(data)\n # continent_data_le(data)\n continent_data_gdp_growth(data)", "def main(raw_filepath, interim_filepath, processed_filepath):\n raw_filepath = Path(raw_filepath)\n interim_filepath = Path(interim_filepath)\n processed_filepath = Path(processed_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n years = ['2010', '2011', '2012', '2013', '2014']\n\n #############################################################\n ################ Life Expectancy Outcome ####################\n #############################################################\n\n le_birth = pd.read_csv(raw_filepath / 'US_A.csv',\n usecols=['Tract ID', 'e(0)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index('t10_cen_uid_u_2010')\n\n le_other = pd.read_csv(raw_filepath / 'US_B.csv',\n usecols=['Tract ID', 'Age Group', 'e(x)'],\n dtype={'Tract ID': \"object\"}) \\\n .rename(columns={'Tract ID': 't10_cen_uid_u_2010'}) \\\n .set_index(['t10_cen_uid_u_2010', 'Age Group']) \\\n .sort_index() \\\n .loc[(slice(None), ['15-24', '35-44', '55-64']), :] \\\n .unstack() \\\n .reindex(le_birth.index) # use the same tracts for all experiments\n\n le_other.columns = ['e(20)', 'e(40)', 'e(60)']\n\n # le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n # le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n # le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n # le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n\n ##############################################################\n ################## Priority Dataset ##########################\n ##############################################################\n\n with open(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', 'r') as f:\n cols = f.readline().strip().split(',')\n\n proj_cols = [x for x in cols if x[-4:] in years]# and\n # get all the priority NETS columns for later\n net_cols = ['t10_cen_uid_u_2010'] + [x[:11] + '_d_' + x[14:] for x in cols if '_net_' in x]\n\n data_X = pd.read_csv(raw_filepath / 'T10_Priority_Wide_Interpolated.csv', usecols=proj_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010')\n\n # Create % younger than 25 (this method is far less than ideal)\n ag25up = data_X.filter(regex='.*(_pop_c_|ag25up).*')\n ag25up_coltuples = [(x[:-4], x[-4:]) for x in ag25up.columns]\n ag25up.columns = pd.MultiIndex.from_tuples(ag25up_coltuples)\n ag25up_long = ag25up.stack()\n ag25dwn_p = ((ag25up_long['t10_ldb_pop_c_'] - ag25up_long['t10_ldb_ag25up_c_'])\n / ag25up_long['t10_ldb_pop_c_']).unstack()\n ag25dwn_p.columns = ['t10_ldb_ag25dwn_p_' + x for x in ag25dwn_p.columns]\n\n # Create % older than 65\n ag65up = data_X.filter(regex='.*(_pop_c_|a60up).*')\n ag65up_coltuples = [(x[:-4], x[-4:]) for x in ag65up.columns]\n ag65up.columns = pd.MultiIndex.from_tuples(ag65up_coltuples)\n ag65up_long = ag65up.stack()\n ag65up_p = (ag65up_long['t10_ldb_a60up_c_'] / ag65up_long['t10_ldb_pop_c_']) \\\n .unstack()\n ag65up_p.columns = ['t10_ldb_ag60up_p_' + x for x in ag65up_p.columns]\n\n # Add our new measure\n data_X = pd.concat([data_X, ag25dwn_p, ag65up_p], axis=1)\n\n # Get rid of all count variables, including nets\n no_count_cols = [x for x in data_X.columns if '_c_' not in x]\n data_X = data_X[no_count_cols]\n\n\n drop_cols = ['t10_gis_area_l_2010',\n 'm10_cen_uid_u_2010',\n 'm10_cen_memi_x_2010',\n 'c10_cen_uid_u_2010',\n 'z10_cen_uid_u_2010']\n\n data_X = data_X.drop(columns=drop_cols) \\\n .reindex(le_birth.index)\n\n data_X.columns = pd.Index([(x[:-5], int(x[-4:])) for x in data_X.columns])\n\n X_priority = data_X.groupby(axis=1, level=0).mean()\n X_priority.to_csv(interim_filepath / 'X_priority.csv')\n\n ###########################################################\n #################### NETS Dataset #########################\n ###########################################################\n\n X_nets_allyrs = pd.read_csv(raw_filepath / 'recvd_t10_vars_v8_20190607.csv', usecols=net_cols,\n dtype={'t10_cen_uid_u_2010': \"object\"}) \\\n .set_index('t10_cen_uid_u_2010') \\\n .reindex(le_birth.index)\n\n X_nets_allyrs.columns = pd.Index([(x[:-5], int(x[-4:])) for x in X_nets_allyrs.columns])\n X_nets = X_nets_allyrs.groupby(axis=1, level=0).mean()\n X_nets.to_csv(interim_filepath / 'X_nets.csv')\n\n # Split predictive data by Variable Set\n X_all = pd.concat([X_priority, X_nets], axis=1) \\\n .dropna(how='any')\n\n final_index = le_birth.index.intersection(X_all.index)\n X_all = X_all.reindex(final_index)\n le_birth = le_birth.reindex(final_index)\n le_other = le_other.reindex(final_index)\n\n le_birth.to_csv(processed_filepath / 'y_00.csv', header=True)\n le_other['e(20)'].to_csv(processed_filepath / 'y_20.csv', header=True)\n le_other['e(40)'].to_csv(processed_filepath / 'y_40.csv', header=True)\n le_other['e(60)'].to_csv(processed_filepath / 'y_60.csv', header=True)\n\n # Var Set 1\n p1_features = ['t10_ldb_hinci_m',\n 't10_ldb_pop_d',\n 't10_ldb_nhblk_p',\n 't10_ldb_hisp_p',\n 't10_ldb_col_p']\n X_p1 = X_all[p1_features]\n X_p1.to_csv(processed_filepath / 'X_varGroup1.csv')\n\n # Var Set 2\n p2_features = [\n \"t10_ldb_hinci_m\",\n \"t10_ldb_pop_d\",\n \"t10_ldb_ag25dwn_p\",\n \"t10_ldb_ag60up_p\",\n \"t10_ldb_nhblk_p\",\n \"t10_ldb_hisp_p\",\n \"t10_ldb_col_p\",\n \"t10_ldb_lep_p\",\n \"t10_ldb_mrenti_m\",\n \"t10_ldb_multi_p\",\n \"t10_ldb_nhwht_p\",\n \"t10_ldb_asian_p\",\n \"t10_ldb_fb_p\",\n \"t10_ldb_hs_p\",\n \"t10_ldb_unemp_p\",\n \"t10_ldb_npov_p\",\n \"t10_ldb_vac_p\",\n \"t10_ldb_own_p\",\n \"t10_ldb_mhmvali_m\"\n ]\n X_p2 = X_all[p2_features]\n X_p2.to_csv(processed_filepath / 'X_varGroup2.csv')\n\n # Var Set 3\n X_p3 = X_nets.reindex(final_index)\n X_p3.to_csv(processed_filepath / 'X_varGroup3.csv')\n\n # Var Set 4\n X_p4 = X_all\n X_p4.to_csv(processed_filepath / 'X_varGroup4.csv')", "def __init__(self, file_path):\n # will raise an error if the path is invalid, we don't need an\n # if statement here\n df = pandas.read_excel(file_path)\n\n \"\"\"\n read in the cities using a dictionary comprehension\n dictionary = { key: value for elem in iterable }\n In this case we are reading in the name of the city as the key\n and its corresponding CityLocation object as the value. We\n have made the assumption that each city has a unique name.\n \"\"\"\n #\n self.lulu = [Order(row[1][\"Date\"], row[1][\"Order Number\"],\n row[1][\"Brand\"], row[1][\"Garment\"],\n row[1][\"Count\"], row[1][\"Style name\"])\n for row in df.iterrows()\n if row[1][\"Brand\"] == \"Lululime\"]\n self.lulu = LululimeFactory()\n GarmentMaker(self.lulu)\n # brand = self.lulu[0].brand\n # garment = self.lulu[0].garment\n # print(brand)\n # print(garment)\n\n lulu_order = ((row[1][\"Date\"], row[1][\"Garment\"],\n row[1][\"Brand\"], row[1][\"Garment\"])\n for row in df.iterrows() if\n row[1][\"Brand\"] == \"Lululime\")\n for item in lulu_order:\n print(item)\n\n lulu_order = LululimeFactory()\n # test = GarmentMaker(lulu_order)\n # print(test)\n\n # for lulu in self.lulubrand:\n # print(lulu)\n # print(*self.lulubrand)", "def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()", "def run_methods(self):\n try:\n # dictionaries of population time series\n self.batch_exponential()\n except Exception as e:\n print(str(e))", "def __init__(self, cfg, radious, type_feature, type_filtering, h_filterig):\n print(\"feat test!!\")\n self.path_root = cfg['preprocessing']['path_root']\n self.path_data = cfg['data']['path']\n self.path_checkpoint = os.path.join(self.path_data, \"preprocess_checkpoint.csv\")\n self.file_checkpoint_data = open(self.path_checkpoint, \"a+\").close()\n # self.file_checkpoint_data.close()\n if (len(open(self.path_checkpoint).readlines()) == 0):\n print(\"creating the file...\")\n with open(self.path_checkpoint, \"a+\") as f:\n f.write('radious,type_feature,type_filtering,h_filterig'+ \"\\n\")\n self.init_refined = self.path_root + \"/data/new_refined/\"\n self.init_casf = self.path_root + \"/data/new_core_2016/\"\n self.dict_atoms = dict_atoms_hot\n self.dict_atoms_simple = dict_atoms_simple\n self.dict_words = atom_most_common\n self.dict_atoms_masses = dict_atoms_masses\n self.dict_atoms_charges = dict_atoms_charges\n self.radious = radious\n self.type_feature = type_feature\n self.type_filtering = type_filtering\n self.h_filterig = h_filterig\n ##################refined files###################\n self.files_refined = os.listdir(self.init_refined)\n self.files_refined = [file for file in self.files_refined if file[0].isdigit()]\n self.files_refined.sort()\n self.idx_files_refined = list(range(0, len(self.files_refined)))\n # self.idx_files_refined = [0, 1]\n self.max_length = 0\n # array_names = [str(radious), self.type_feature, self.type_filtering, self.h_filterig]\n # self.name_checkpoint_features = '_'.join(array_names)\n # os.makedirs(os.path.join(self.path_data, \"checkpoints\"), exist_ok=True)\n # self.path_checkpoint_features = os.path.join(self.path_data, \"checkpoints\", self.name_checkpoint_features + \".pkl\")\n # if (os.path.exists(self.path_checkpoint_features)):\n # print(\"loading feature ids...\")\n # checkpoint_features = torch.load(self.path_checkpoint_features)\n # self.idx_max_length = checkpoint_features['idx_max_length']\n # self.max_length = checkpoint_features['max_length']\n # self.idx_write = checkpoint_features['idx_write']\n # else:\n # self.idx_max_length = 130\n # self.max_length = 0\n # self.idx_write = 0\n # save_checkpoint_feature(self.path_checkpoint_features, self.idx_max_length, self.max_length, self.idx_write)\n\n # self.max_length = 0\n # self.write_filtered_pad_feat_geo()\n # else:\n # f, m, g = self._get_feat_geo_from_file(0)\n # self.max_length = f.shape[0]", "def main():\n\n file_name_base = \"./lab-record/result/fairness/\"\n scenarios = ['lan', 'wan1', 'wan2']\n scenario = scenarios[2]\n\n algorithms = [\"bbr\", \"scalable\", \"bic\", \"highspeed\", \"htcp\", \"hybla\",\n \"illinois\", \"vegas\", \"yeah\"]\n names = [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\", \"YeAH\"]\n\n test_types = [\"vs_reno\", \"vs_cubic\", \"vs_itself\"]\n\n fsize = 36\n \n index_reno = []\n index_cubic = []\n index_itself = []\n\n data = []\n \n print 'Loadint statistics for ' + file_name_base + '/' + scenario\n\n for algorithm in algorithms:\n for test in test_types:\n path_base = file_name_base + \"/\" + scenario + \"/\" + test + \"/\" + \\\n algorithm + \"/\"\n if test == \"vs_itself\":\n exp_name = names[algorithms.index(algorithm)] + \"_1\"\n con_name = names[algorithms.index(algorithm)] + \"_2\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \"_1.log\"\n con_filename = \"/\" + algorithm + \"_2.log\"\n process(path_base, exp_filename, con_filename, index_itself)\n if test == \"vs_reno\":\n exp_name = names[algorithms.index(algorithm)]\n con_name = \"Reno\"\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/reno.log\"\n process(path_base, exp_filename, con_filename, index_reno)\n if test == \"vs_cubic\":\n con_name = \"CUBIC\"\n exp_name = names[algorithms.index(algorithm)]\n print path_base + exp_name\n print path_base + con_name\n exp_filename = \"/\" + algorithm + \".log\"\n con_filename = \"/cubic.log\"\n process(path_base, exp_filename, con_filename, index_cubic)\n\n size = 9\n x = numpy.arange(size)\n\n total_width, n = 1.2, 2.5\n width = 1.0 / n\n x = x - (total_width - width) / 2\n\n for i in range(0, len(x)):\n x[i] += 0.5 * i\n\n # Exp\n fig = plt.figure()\n\n # Con\n con_reno = plt.bar(x + 0 * width - 1.2,\n index_reno,\n width=width,\n label='Against Reno',\n alpha=0.5,\n color=\"darkorange\")\n\n con_cubic = plt.bar(x + 1 * width - 1.2,\n index_cubic,\n width=width,\n label='Against CUBIC',\n alpha=0.5,\n color=\"lawngreen\")\n\n con_itself = plt.bar(x + 2 * width - 1.2,\n index_itself,\n width=width,\n label='Against Another Same CCA',\n alpha=0.5,\n color=\"dodgerblue\")\n\n # Index\n plt.xticks(x + 1.5 * width - 1.2, [\"BBR\", \"Scalable\", \"BIC\", \"High Speed\",\n \"H-TCP\", \"Hybla\", \"Illinois\", \"Vegas\",\n \"YeAH\"],\n fontsize=fsize,\n rotation=\"45\")\n plt.ylabel(\"Jain`s Fairness Index\", fontsize=fsize)\n plt.yticks(fontsize=fsize)\n plt.ylim(0.5, 1.1)\n\n ax = plt.subplot(111)\n ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n ncol=3, mode=\"expand\", borderaxespad=0., fontsize=fsize)\n\n plt.subplots_adjust(left=0.07, right=0.98, top=0.9, bottom=0.2)\n\n plt.show()", "def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def ERCC_Stat(self):\n self.l_ERCC_name = []\n# self.l_RGCs_name = []\n self.l_mRNA_name = []\n self.l_ERCC_FPKM = {}\n# self.l_RGCs_FPKM = {}\n self.l_mRNA_FPKM = {}\n\n self.l_ERCC_UMI = {}\n self.l_mRNA_UMI = {}\n\n# self.l_cirRNA_FPKM={}\n \n self.l_ERCC_HTSname = []\n# self.l_RGCs_HTSname = []\n self.l_mRNA_HTSname = []\n self.l_ERCC_RPKM = {}\n# self.l_RGCs_RPKM = {}\n self.l_mRNA_RPKM = {}\n \n \n self.l_ERCC_MOLs = {}\n# self.l_RGCs_MOLs = {}\n self.l_mRNA_MOLs = {}\n self.l_cirRNA_MOLs={}\n self.l_mRNA_MOLs_HTSname = {}\n \n self.regression = {}\n \n self.__load_FPKM()\n self.__load_MOLs() # ERCC RGC mols\n self.__get_mRNA_MOLs() # get mRNA mols using ERCC_FPKM, ERCC_MOLs and mRNA_FPKM\n# self.__load_Count()\n self.__load_umi()\n \n out_file = \"%s/02.%s.ERCC_Mols.xls\" % (self.dir_StatInfo, self.s_idx)\n f_out_file = open( out_file,\"w\" )\n \n l_info = [\n \"Sample\", \"ERCC_MOLs\", \"mRNA_MOLs\",\n \"RefSeq_mRNA_MOLs\", \"Regression_R\",\n \"Regression_P\", \"RefSeq_mRNA_TPM>0\", \"ERCC_UMI\", \"RefSeq_mRNA_UMI\"\n ] \n print >>f_out_file, \"\\t\".join(l_info)\n \n for samp in self.samInfo_pd_RNA['sample']:\n idx = (self.samInfo_pd_RNA['sample'] == samp)\n brief_name = self.samInfo_pd_RNA[idx]['brief_name'].values[0]\n rename = self.samInfo_pd_RNA[idx]['rename'].values[0]\n\n ERCC_MOLs = sum( self.l_ERCC_MOLs[brief_name])\n# RGC_MOLs = sum( self.l_RGCs_MOLs[brief_name])\n mRNA_MOLs = np.sum(self.l_mRNA_MOLs[brief_name])\n RefSeq_mRNA_MOLs = \\\n np.sum(self.l_mRNA_MOLs[brief_name][self.mRNA_refSeq_index])\n \n RefSeq_mRNA_lFPKM = \\\n np.array(self.l_mRNA_FPKM[brief_name],dtype=float)\n \n RefSeq_mRNA_lFPKM = RefSeq_mRNA_lFPKM[ self.mRNA_refSeq_index ]\n RefSeq_mRNA_Exps = \\\n np.shape(RefSeq_mRNA_lFPKM[RefSeq_mRNA_lFPKM > 0])[0]\n\n regression_R = self.regression[brief_name]['r_value']\n regression_P = self.regression[brief_name]['p_value']\n \n RefSeq_mRNA_lUMI = \\\n np.array(self.l_mRNA_UMI[brief_name],dtype=int)\n\n RefSeq_mRNA_UMI_count = np.sum( RefSeq_mRNA_lUMI )\n\n ERCC_lUMI = \\\n np.array(self.l_ERCC_UMI[brief_name],dtype=int)\n\n ERCC_UMI_count= np.sum( ERCC_lUMI )\n \n l_out = [\n rename,\n ERCC_MOLs, mRNA_MOLs, RefSeq_mRNA_MOLs,\n regression_R, regression_P, RefSeq_mRNA_Exps, ERCC_UMI_count, RefSeq_mRNA_UMI_count\n ]\n l_out = [str(i) for i in l_out]\n print >>f_out_file, \"\\t\".join(l_out)\n f_out_file.close()", "def main():\n\n if os.path.exists(os.path.join(PROCESSED_PATH,\n 'all_posts_data.csv')):\n print(\"-- all_posts_data.csv found locally - delete interm files if rerun needed\")\n total_df = pd.read_csv(PROCESSED_PATH / 'all_posts_data.csv')\n else:\n training_post_filenames = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'training',\n 'posts', '*.xml'))\n dev_post_filenames = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'testing',\n 'posts', '*.xml'))\n\n new_posts2017 = glob.glob(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'posts', '*.xml'))\n\n training_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'training',\n 'labels.tsv')\n dev_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych16-data',\n 'data',\n 'testing',\n 'labels.tsv')\n\n training_df = create_posts_df(training_post_filenames)\n dev_df = create_posts_df(dev_post_filenames)\n new_df = create_posts_df(new_posts2017)\n\n training_df['corpus_source'] = '2016train_2017train'\n dev_df['corpus_source'] = '2016test_2017train'\n new_df['corpus_source'] = '2017test'\n\n training_df = merge_post_labels(training_df, training_labels)\n dev_df = merge_post_labels(dev_df, dev_labels)\n\n training_df = merge_author_ranks(training_df)\n dev_df = merge_author_ranks(dev_df)\n new_df = merge_author_ranks(new_df)\n\n total_df = pd.concat([training_df, dev_df, new_df])\n\n test_labels = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'test_ids.tsv')\n total_df.reset_index(inplace=True)\n total_df = merge_test_ids(total_df, test_labels)\n label_file = os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test-labels.tsv')\n merge_ground_truth(total_df, label_file)\n output_path = PROCESSED_PATH / 'all_posts_data.csv'\n \n\n # clean body of text\n total_df['cleaned_body'], total_df['contained_quote'] = zip(*total_df['body'].apply(process_body))\n total_df['images'] = total_df['body'].apply(process_images)\n\n print('--Writing data to {}--'.format(output_path))\n total_df.to_csv(output_path, index=False)\n\n sentences_df = total_df.loc[:, ['post_id', 'cleaned_body', 'label', 'predict_me']]\n # the following will split posts into sentences and write out to a separate csv\n split_to_sentences(sentences_df)", "def __init__(self, path):\n\n # Metadata Definition\n metadata = pd.read_csv(path, nrows=5, header=None)\n self.subject = str(metadata.loc[0, 0])\n base_date = dt.datetime.strptime(metadata.loc[2, 0], '%d.%m.%Y').date()\n if metadata.loc[4, 0] != 'Unknown Line':\n self.valid_measurements = str(metadata.loc[4, 0])\n else:\n metadata = pd.read_csv(path, nrows=6, header=None)\n self.valid_measurements = str(metadata.loc[5, 0])\n\n column_names = ['hour', 'minutes', 'SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'CODE', 'UNKNOW_3']\n self.data = pd.read_csv(path, sep=',', skiprows=51, skipfooter=1, header=None, names=column_names, engine='python')\n\n # Adjusting Date\n dates = [base_date]\n times = [dt.time(hour=self.data.loc[i, 'hour'], minute=self.data.loc[i, 'minutes']) for i in range(len(self.data))]\n current_date = base_date\n for i in range(1, len(times)):\n if times[i] < times[i-1]:\n current_date += dt.timedelta(days=1)\n dates.append(current_date)\n\n self.data.reset_index(inplace=True)\n self.data['timestamp'] = pd.to_datetime([dt.datetime.combine(dates[i], times[i]) for i in range(len(dates))])\n self.data['date'] = dates\n self.data['time'] = times\n\n order = ['timestamp', 'date', 'time', 'SYS(mmHg)', 'DIA(mmHg)', 'UNKNOW_1', 'UNKNOW_2', 'UNKNOW_3', 'CODE']\n self.data = self.data[order]\n\n try:\n self.data.set_index('timestamp', inplace=True)\n except KeyError:\n print('Timestamp can not be set as an index:')\n print(KeyError)\n\n xml_line = open(path, 'r').readlines()[-1]\n xml_root = ET.fromstring(xml_line)\n self.metadata = self._etree_to_dict(xml_root)['XML']", "def stat_parser():\n from tools import file_importer, file_outporter\n from math import log\n \n print(\"this is stat parser\")\n \n relPath = \"bob/processed/24h_bobdata_ed2.csv\"\n outPathUp = \"bob/processed/24h_bobprots_up_full.csv\"\n outPathDown = \"bob/processed/24h_bobprots_down_full.csv\"\n inpF = file_importer(relPath)\n outFUp = file_outporter(outPathUp)\n outFDown = file_outporter(outPathDown)\n \n \n skipFlag = True\n \n for inpLine in inpF:\n if skipFlag:\n skipFlag = False\n outFDown.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n outFUp.write(\"ID,Uniprot ID,Gene name,unique peptides (unique+razor),KO1,KO2,KO3,WT1,WT2,WT3,enrichment,P value\\n\")\n continue\n inpLine = inpLine.split(\"\\\" \\\"\")\n curLine = []\n for inpI in inpLine:\n curLine.append(inpI.strip(\"\\\"\\n\"))\n try: \n curLine[-1] = float(curLine[-1])\n except ValueError:\n curLine[-1] = 1 \n if curLine[-1] < 0.05 and int(curLine[3]) > 1: # check if protein has at least 2 unique peptides and has a significant p value\n curLine[4:10] = [int(x) for x in curLine[4:10]]\n enrScore = log((sum(curLine[4:7]) / 3.0)/(sum(curLine[7:10]) / 3.0),2) # calculate log2 enrichment score\n # print int(sum(curLine[4:7]) / 3.0), int(sum(curLine[7:10]) / 3.0)\n if sum(curLine[4:7]) / 3.0 > sum(curLine[7:10]) / 3.0: # if the mean of the KO intensities is higher than the wt \n for outI in curLine:\n outFDown.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFDown.write(\",\")\n if outI is curLine[-2]:\n outFDown.write(str(enrScore)+ \",\")\n else:\n outFDown.write(\"\\n\")\n # outFDown.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n else:\n # outFUp.write(curLine[1] + \",\" + curLine[2] + \"\\n\")\n for outI in curLine:\n outFUp.write(str(outI).strip(\" \"))\n if outI is not curLine[-1]:\n outFUp.write(\",\")\n if outI is curLine[-2]:\n outFUp.write(str(enrScore)+ \",\")\n else:\n outFUp.write(\"\\n\")\n \n inpF.close()\n outFUp.close()\n outFDown.close()\n print(\"stat_parser completed\")", "def __init__(self,k,p,expfile,sampfile):\n \n self.k = k\n self.p = p\n self.expression =pd.read_table(expfile,sep = '\\t')\n self.samples = pd.read_table(sampfile,sep = '\\t',names = ['samples','patient'])\n self.set_data() # a functionto wrangle the data a bit.", "def __init__(self, sc, dataset_path):\n\n logger.info(\"Starting up the Recommendation Engine: \")\n\n self.sc = sc\n\n\t#Load cusomer data for later use\n\t\n logger.info(\"Loading Customer data...\")\n customer_file_path = os.path.join(dataset_path, 'tpo_customer.csv')\n customer_raw_RDD = self.sc.textFile(customer_file_path)\n customer_raw_data_header = customer_raw_RDD.take(1)[0]\n self.customer_RDD = customer_raw_RDD.filter(lambda line: line!=customer_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]))).cache()\n\tlogger.info(\"Loading Customer data success...\")\n\t#CUSTOMCUSTOMER_NAME,CUSTOMER_ADDRESS1,CUSTOMER_ADDRESS2,CUSTOMER_CITY,CUSTOMER_STATE,CUSTOMER_COUNTRY,CUSTOMER_ZIPCODE,CREATED_BY,CREATION_DATE,LAST_UPDATED_BY,LAST_UPDATE_DATE\n \n\n\n\t\n\t#Load turbine data for later use\t\n logger.info(\"Loading Turbine data...\")\n turbine_file_path = os.path.join(dataset_path, 'test_tpo_unit_config.csv')\n turbine_raw_RDD = self.sc.textFile(turbine_file_path)\n turbine_raw_data_header = turbine_raw_RDD.take(1)[0]\n self.turbine_RDD = turbine_raw_RDD.filter(lambda line: line!=turbine_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[5]),(tokens[34]),(tokens[51]),(tokens[35]))).cache()\n\tlogger.info(\"Loading Turbine data success...\")\n \n\t\n\t\n\t\n\t#Load site data for later use\t\n logger.info(\"Loading Site data...\")\n site_file_path = os.path.join(dataset_path, 'tpo_site.csv')\n site_raw_RDD = self.sc.textFile(site_file_path)\n site_raw_data_header = site_raw_RDD.take(1)[0]\n self.site_RDD = site_raw_RDD.filter(lambda line: line!=site_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: ((tokens[0]),(tokens[1]),(tokens[2]),(tokens[3]),(tokens[4]),(tokens[5]),(tokens[6]),(tokens[7]),(tokens[16]))).cache()\n\tlogger.info(\"Loading Site data success...\")\n\t\n\n\n\n\t# Load ratings data for later use\n logger.info(\"Loading Ratings data...\")\n ratings_file_path = os.path.join(dataset_path, 'ratings.csv')\n ratings_raw_RDD = self.sc.textFile(ratings_file_path)\n ratings_raw_data_header = ratings_raw_RDD.take(1)[0]\n self.ratings_RDD = ratings_raw_RDD.filter(lambda line: line!=ratings_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache()\n # Load movies data for later use\n logger.info(\"Loading Movies data...\")\n movies_file_path = os.path.join(dataset_path, 'movies.csv')\n movies_raw_RDD = self.sc.textFile(movies_file_path)\n movies_raw_data_header = movies_raw_RDD.take(1)[0]\n self.movies_RDD = movies_raw_RDD.filter(lambda line: line!=movies_raw_data_header)\\\n .map(lambda line: line.split(\",\")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache()\n self.movies_titles_RDD = self.movies_RDD.map(lambda x: (int(x[0]),x[1])).cache()\n # Pre-calculate movies ratings counts\n self.__count_and_average_ratings()\n\n # Train the model\n self.rank = 8\n self.seed = 5L\n self.iterations = 10\n self.regularization_parameter = 0.1\n self.__train_model()", "def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)", "def main():\n csv_file_name = ''\n do_linear = False\n\n if len(sys.argv) == 3:\n csv_file_name = sys.argv[1]\n do_linear = bool(int(sys.argv[2]))\n\n data = open(csv_file_name)\n\n #ignore title\n first_line = data.readline()\n\n #read the remaining data within the file\n body = data.readlines()\n length = len(body)\n position_array = np.zeros((length, 2))\n class_array = np.zeros((length, 1))\n\n index = 0\n for line in body:\n values = line.strip().split(',')\n position_array[index][0] = values[0]\n position_array[index][1] = values[1]\n class_array[index] = values[2]\n index += 1\n\n if do_linear == False:\n #plot_spiral(position_array, class_array, \"spiral.png\")\n linear_position_array, linear_class_array, linear_model = linear_classifier(position_array, class_array, 4)\n plot_spiral_and_predicted_class(linear_position_array, linear_class_array, linear_model, \"linear.png\", \"Linear Classification Results\")\n\n else:\n nonlinear_position_array, nonlinear_class_array, nonlinear_model = non_linear_classifier(position_array, class_array, 4)\n plot_spiral_and_predicted_class(nonlinear_position_array, nonlinear_class_array, nonlinear_model, \"nonlinear.png\", \"Nonlinear Classification Results\")", "def main():\n\n preprocessed_file = preprocess_clinical_trials()\n\n preprocessed_file.to_csv(PREPROCESSED_CLINICAL_TRIALS_FILE_PATH, index=False)", "def __init__(self, className, name, title=None, unit=None):\n self.className = className\n filename = os.path.join(DATA, name + '.csv')\n with open(filename,encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n for row_number, row in enumerate(reader):\n if row[1] == filename:\n continue\n if row[0] == '':\n self.title = row[1]\n continue\n if row[0] == 'unit':\n self.unit = row[1]\n continue\n try:\n datetime.strptime(row[0], \"%Y-%m-%d\")\n break\n except: ValueError\n super().__init__(name, title, unit)\n with open(filename,encoding='utf8') as csv_file:\n reader = csv.reader(csv_file)\n for skip in range(row_number): # row_number is first data line\n next(reader)\n for row in reader:\n try:\n self.data[datetime.strptime(row[0], \"%Y-%m-%d\")]=float(row[1])\n except: ValueError\n self.first_date=min(self.data.keys())\n self.last_date=max(self.data.keys())", "def main():\n master_csv = open(\"master.csv\", \"w\")\n master_csv.write(\"lat, lon, year, month, day, T_max, T_min, PrecipMM, T_ave, PrecipCM, RelHum\\n\")\n MoLS_comp_csv = open(\"MoLS_comp.csv\", \"w\")\n lat_lon_csv = open(\"lat_lon.csv\", \"w\")\n\n for csvFilename in os.listdir('.'):\n\n if not csvFilename.endswith('.csv') or csvFilename == \"master.csv\" or csvFilename == \"MoLS_comp.csv\" or csvFilename == \"lat_lon.csv\":\n continue # skip non-csv files\n\n csvFileObj = open(csvFilename)\n readerObj = csv.reader(csvFileObj)\n print(\"Currently parsing \" + str(csvFilename))\n for row in readerObj:\n if readerObj.line_num <= 8 :\n continue\n\n year = int(row[0])\n vp = float(row[8]) / 1000.0\n month, day = get_month(int(row[1]))\n T_ave = (float(row[6]) + float(row[7])) / 2\n PrecipCM = float(row[3]) / 10\n svp = .611 * math.e ** (5321 * ((1 / 273.0) - (1 / (T_ave + 273.15))))\n rh_ave = round((vp / svp) * 100, 2)\n\n # print([[csvFilename], , row[0], row[1], row[3], row[6], row[7], row[8]])\n # print(readerObj.line_num)\n # print([csvFilename.split(\"_\")[0], csvFilename.split(\"_\")[1].split(\".csv\")[0], row[0], row[1], row[3],\n # row[6], row[7], row[8]])\n\n master_csv.write(str(csvFilename.split(\"_\")[0]) + \",\" + str(csvFilename.split(\"_\")[1].split(\".csv\")[0]) +\n \",\" + str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(row[6]) + \",\" + str(row[7])\n + \",\" + str(row[3]) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n MoLS_comp_csv.write(str(year) + \",\" + str(month) + \",\" + str(day) + \",\" + str(row[6]) + \",\" + str(row[7])\n + \",\" + str(row[3]) + \",\" + str(T_ave) + \",\" + str(PrecipCM) + \",\" + str(rh_ave) + \"\\n\")\n lat_lon_csv.write(str(csvFilename.split(\"_\")[0]) + \",\" + str(csvFilename.split(\"_\")[1].split(\".csv\")[0]) + \"\\n\")\n\n master_csv.close()\n MoLS_comp_csv.close()\n lat_lon_csv.close()", "def __init__(self):\n self.file_name = \"entries.csv\"\n self.csv_header = \"date,name,minutes,note\"\n try:\n with open(self.file_name) as file:\n reader = csv.DictReader(file)\n self.entries = list(reader)\n except FileNotFoundError:\n with open(self.file_name, \"a\") as file:\n file.write(f'{self.csv_header}\\n')\n\n try:\n for i in range(len(self.entries)):\n entry = self.entries[i]\n self.entries[i] = Entry(\n entry[\"date\"],\n entry[\"name\"],\n entry[\"minutes\"],\n entry[\"note\"]\n )\n print(f\"Worklog with {len(self.entries)} entries has been loaded.\\n\")\n print(\"Starting program...\")\n time.sleep(.75)\n except TypeError:\n raise TypeError(\"Could not read data file.\"\n + \" Ensure that CSV is properly formatted.\")\n except AttributeError:\n print(\"No existing worklog found.\\nNew worklog has been created.\\n\")\n self.entries = []\n print(\"Starting program...\")\n time.sleep(.75)", "def __init__(self, data_dir, html_dir, paver_html_dir, resolution_1, resolution_2, picture_format, hierarchy_version, original_mapping_dir):\n\n pn = proteomaps_PATHNAMES(hierarchy_version)\n PROTEIN_HIERARCHY_DIR = pn.PROTEIN_HIERARCHY_DIR\n po = proteomaps_organisms(pn)\n\n filenames_file = data_dir + \"/filenames.csv\"\n print \"Data set directory: \" + data_dir\n \n for row in csv.reader(open(filenames_file, 'r'), delimiter='\\t'):\n organism, data_file, data_set_name, data_set_name_matlab, article_name = row\n print \"\\nData set \" + data_set_name + \":\\nWriting html files to directory \" + html_dir\n\n data_type = \"cost\"\n proteomap_process_html(paver_html_dir, html_dir, data_dir, data_set_name, organism, resolution_1, resolution_2, article_name, data_type, picture_format,po,PROTEIN_HIERARCHY_DIR, original_mapping_dir)\n\n data_type = \"abundance\"\n proteomap_process_html(paver_html_dir, html_dir, data_dir, data_set_name, organism, resolution_1, resolution_2, article_name, data_type, picture_format,po,PROTEIN_HIERARCHY_DIR, original_mapping_dir)", "def main():\n\takpPoints,chpPoints = extractSupporterCities(\"Data/PreprocessedAkpTweets.csv\",\n\t\t\t\t\t\t\t\t\t\t\t \"Data/PreprocessedChpTweets.csv\")\n\tgenerateMapPoints(akpPoints,chpPoints)\n\tgenerateCitySentimentData(akpPoints,chpPoints)\n\tgenerateChoroplethMap(\"Data/tr_cities_modified.json\",\"Data/city_ratio.csv\")", "def __init__(self,csvrow):\n self.raw = csvrow\n data = csvrow.split(',')\n self.number = data[0]\n self.area = int(data[1])\n self.population = int(data[5])\n self.latitude = float(data[7])\n self.longitude = float(data[8])", "def __init__(self):\n \n self.csv_features = {} # Create dictionary to load the CSV features\n self.meta_features = [] # Create list to load the metadata features", "def __init__(self, updated=False):\n self.deputies = []\n self.senators = []\n\n if updated:\n file = IDENTITY_FILE_UPDATED\n else:\n file = IDENTITY_FILE\n\n with open(file, 'rt') as csvfile:\n spamreader = csv.DictReader(csvfile, delimiter=';')\n for row in spamreader:\n # Senators and deputies has different contracts\n # TODO: CREATE ANOTHER FEATURE TELLING SENATORS vs DEPUTIES\n try:\n if row.get('sen:CodigoParlamentar') or 'senador' in row.get('post', '').lower():\n self.add_senator(Senator(row))\n\n if row.get('cam:ideCadastro') or 'deputado' in row.get('post', '').lower():\n self.add_deputy(Deputy(row))\n except AttributeError:\n import code; code.interact(local=dict(globals(), **locals()))", "def main():\n\n outFile = r'../product/yoshinoya_rawdata.csv'\n\n with open(r'../data/yoshinoya_id.csv') as yoshinoya_id:\n\n # Reader to read store id\n id_reader = csv.reader(yoshinoya_id)\n\n # Skip the headers\n header = next(yoshinoya_id, None)\n\n # Keys from the get_data function\n headers = ['storeid', 'brand', 'name', 'lat', 'lon', 'postalCode',\n 'address', 'MON_open', 'MON_close', 'TUE_open', 'TUE_close',\n 'WED_open', 'WED_close', 'THU_open', 'THU_close', 'FRI_open',\n 'FRI_close', 'SAT_open', 'SAT_close', 'SUN_open', 'SUN_close']\n\n with open(outFile, 'w', newline='') as csvfile:\n\n writer = csv.DictWriter(csvfile, delimiter=',', lineterminator='\\n',\n fieldnames=headers)\n\n # Write the headers\n writer.writeheader()\n\n for row in id_reader:\n\n storeid = row[0]\n\n print(f\"Processing {storeid}...\")\n\n store_row = get_data_yoshinoya(storeid)\n\n if store_row is None:\n\n print(f\"failed to request the page with storeid {storeid}\")\n continue\n\n writer.writerow(store_row)\n\n csvfile.close()", "def collect_data(self):\n categories = []\n list_of_feature_lists = []\n feature_sets = set()\n with open(self.csv_path, \"r\") as f:\n reader = csv.DictReader(f)\n # collecting some stats for FDR calculation:\n self.PSM_count = 0\n self.decoy_count = 0\n\n if self[\"dump_svm_matrix\"]:\n self.init_svm_matrix_dump()\n additional_matrix_info = []\n\n for i, row in enumerate(\n sorted(\n reader,\n reverse=self[\"bigger_scores_better\"],\n key=lambda d: float(d[self.col_for_sorting]),\n )\n ):\n\n features = self.row_to_features(row)\n\n if tuple(features) in feature_sets:\n continue\n feature_sets.add(tuple(features))\n\n category, psm_FDR = self.get_psm_category(row)\n\n list_of_feature_lists.append(features)\n categories.append(category)\n\n if self[\"dump_svm_matrix\"]:\n label = -1 if row_is_decoy(row) else 1\n sequence = \"{0}.{1}#{2}.{3}\".format(\n row[\"Sequence Pre AA\"].strip(),\n row[\"Sequence\"].strip(),\n row[\"Modifications\"].strip(),\n row[\"Sequence Post AA\"].strip(),\n )\n additional_matrix_info.append(\n {\n \"psm_id\": row[\"Spectrum Title\"].strip(),\n \"label\": label,\n \"scannr\": row[\"Spectrum Title\"].strip().split(\".\")[-2],\n \"peptide\": sequence,\n \"proteins\": self.parse_protein_ids(row[\"Protein ID\"]),\n }\n )\n\n if i % 1000 == 0:\n score_val = float(row[self.col_for_sorting])\n msg = (\n \"Generating feature matrix from input csv \"\n \"(line ~{0}) with score {1} and FDR \"\n \"{2}\".format(i, score_val, psm_FDR)\n )\n print(msg, end=\"\\r\")\n\n # All data points are collected in one big matrix, to make standardization possible\n print(\"\\nConverting feature matrix to NumPy array...\")\n X_raw = np.array(list_of_feature_lists, dtype=float)\n\n print(\"Replacing empty/NaN values with the mean of each column...\")\n self.nan_replacer = Imputer()\n self.nan_replacer.fit(X_raw)\n X_raw = self.nan_replacer.transform(X_raw)\n # Standardize input matrix to ease machine learning! Scaled data has zero mean and unit variance\n print(\"Standardizing input matrix...\")\n self.scaler = SCALER.fit(X_raw)\n self.X = self.scaler.transform(X_raw)\n self.categories = np.array(categories)\n print()\n\n if self[\"dump_svm_matrix\"]:\n print(\"Dumping SVM matrix to\", self[\"dump_svm_matrix\"])\n\n for i, matrix_row in enumerate(self.X):\n matrix_row_info = additional_matrix_info[i]\n self.dump_svm_matrix_row(\n row=list(matrix_row),\n psm_id=matrix_row_info[\"psm_id\"],\n label=matrix_row_info[\"label\"],\n scannr=matrix_row_info[\"scannr\"],\n peptide=matrix_row_info[\"peptide\"],\n proteins=matrix_row_info[\"proteins\"],\n )\n\n print(\"Dumped SVM matrix to\", self[\"dump_svm_matrix\"])\n return", "def load(cls):\n \n # Loop through problems and build patient problem lists:\n probs = csv.reader(file(PROBLEMS_FILE,'U'),dialect='excel-tab')\n header = probs.next() \n for prob in probs:\n cls(dict(zip(header,prob))) # Create a problem instance ", "def main():\n args = parse_arguments()\n\n de_data = pd.read_csv(args.raw_file, sep=\"\\t\")\n de_data.rename(columns={\"Unnamed: 0\": \"gene_id\"}, inplace=True)\n de_data.fillna(value=1, inplace=True)\n columns = {}\n col_order = []\n\n # Make sure all listed numeric columns are valid numeric variables based\n # on a union of numeric column names from cuffdiff, edgeR, deseq2 and test\n # files.\n numeric_columns = [\n \"baseMean\",\n \"log2FoldChange\",\n \"lfcSE\",\n \"stat\",\n \"pvalue\",\n \"padj\",\n \"value_1\",\n \"value_2\",\n \"log2(fold_change)\",\n \"test_stat\",\n \"p_value\",\n \"q_value\",\n \"logfc\",\n \"fdr\",\n \"stat\",\n \"logFC\",\n \"logCPM\",\n \"LR\",\n \"Pvalue\",\n \"FDR\",\n ]\n de_columns = de_data.columns\n\n for column in numeric_columns:\n if column not in de_columns:\n continue\n\n if not is_numeric_dtype(de_data[column]):\n msg = (\n f\"Column {column} is not numeric. Please make sure \"\n f\"that the input file has valid numeric values (i.e. \"\n f\"periods for decimal places).\"\n )\n send_message(error(msg))\n raise ValueError(msg)\n\n if args.gene_id:\n if args.gene_id == \"index\":\n columns[\"gene_id\"] = list(de_data.index.astype(str))\n col_order.append(\"gene_id\")\n else:\n columns[\"gene_id\"] = list(de_data[args.gene_id].astype(str))\n col_order.append(\"gene_id\")\n\n if args.logfc:\n col = np.array(de_data[args.logfc])\n col[np.isinf(col)] = 0\n columns[\"logfc\"] = list(col)\n col_order.append(\"logfc\")\n\n if args.fdr:\n columns[\"fdr\"] = list(de_data[args.fdr])\n col_order.append(\"fdr\")\n\n if args.pvalue:\n columns[\"pvalue\"] = list(de_data[args.pvalue])\n col_order.append(\"pvalue\")\n\n if args.fwer:\n columns[\"fwer\"] = list(de_data[args.fwer])\n col_order.append(\"fwer\")\n\n if args.logodds:\n columns[\"logodds\"] = list(de_data[args.logodds])\n col_order.append(\"logodds\")\n\n if args.stat:\n columns[\"stat\"] = list(de_data[args.stat])\n col_order.append(\"stat\")\n\n with open(args.output_json, \"w\") as f:\n json.dump(columns, f, separators=(\",\", \":\"), allow_nan=False)\n\n outdf = pd.DataFrame(columns)\n outdf = outdf[col_order]\n outdf.to_csv(args.output_file, sep=\"\\t\", index=False, compression=\"gzip\")", "def main():\n # Specify path\n training_filepath = 'data/training.csv'\n testing_filepath = 'data/public_test_features.csv'\n\n # Check whether the specified path exists or not\n isExist = os.path.exists(training_filepath)\n if(isExist):\n print('Reading from ' + training_filepath)\n else:\n print('Training file not found in the app path.')\n exit()\n preprocess_file(training_filepath, 'data/clean_training1.csv', True)\n # Check whether the specified path exists or not\n isExist = os.path.exists(testing_filepath)\n if(isExist):\n print('Reading from ' + testing_filepath)\n else:\n print('Testing file not found in the app path.')\n exit()\n preprocess_file(testing_filepath,'data/clean_testing1.csv', False)", "def main():\n# file_name = input(\"Please Provide File Name (e.g: sample_data.txt): \")\n file_name = \"sample_data.txt\"\n file_path = get_path(file_name)\n if file_path != -1:\n df = generate_DataFrame(file_path=file_path)\n# table_name = input(\"Please Enter table Name(emp_details)\")\n table_name = \"emp_details\"\n table_create(table_name)\n for x, i in df.iterrows():\n insert_update(i,table_name)\n print('processing id:', x)", "def __init__(\n self,\n data_columns: Sequence[str] = None,\n bijector: Tuple[InitFunction, Bijector_Info] = None,\n latent: distributions.LatentDist = None,\n conditional_columns: Sequence[str] = None,\n data_error_model: Callable = None,\n condition_error_model: Callable = None,\n autoscale_conditions: bool = True,\n N: int = 1,\n info: Any = None,\n file: str = None,\n ) -> None:\n\n # validate parameters\n if data_columns is None and file is None:\n raise ValueError(\"You must provide data_columns OR file.\")\n if file is not None and any(\n (\n data_columns is not None,\n bijector is not None,\n conditional_columns is not None,\n latent is not None,\n data_error_model is not None,\n condition_error_model is not None,\n info is not None,\n )\n ):\n raise ValueError(\n \"If providing a file, please do not provide any other parameters.\"\n )\n\n # if file is provided, load everything from the file\n if file is not None:\n # load the file\n with open(file, \"rb\") as handle:\n save_dict = pickle.load(handle)\n\n # make sure the saved file is for this class\n c = save_dict.pop(\"class\")\n if c != self.__class__.__name__:\n raise TypeError(\n f\"This save file isn't a {self.__class__.__name__}. It is a {c}.\"\n )\n\n # load the ensemble from the dictionary\n self._ensemble = {\n name: Flow(_dictionary=flow_dict)\n for name, flow_dict in save_dict[\"ensemble\"].items()\n }\n # load the metadata\n self.data_columns = save_dict[\"data_columns\"]\n self.conditional_columns = save_dict[\"conditional_columns\"]\n self.data_error_model = save_dict[\"data_error_model\"]\n self.condition_error_model = save_dict[\"condition_error_model\"]\n self.info = save_dict[\"info\"]\n\n self._latent_info = save_dict[\"latent_info\"]\n self.latent = getattr(distributions, self._latent_info[0])(\n *self._latent_info[1]\n )\n\n # otherwise create a new ensemble from the provided parameters\n else:\n # save the ensemble of flows\n self._ensemble = {\n f\"Flow {i}\": Flow(\n data_columns=data_columns,\n bijector=bijector,\n conditional_columns=conditional_columns,\n latent=latent,\n data_error_model=data_error_model,\n condition_error_model=condition_error_model,\n autoscale_conditions=autoscale_conditions,\n seed=i,\n info=f\"Flow {i}\",\n )\n for i in range(N)\n }\n # save the metadata\n self.data_columns = data_columns\n self.conditional_columns = conditional_columns\n self.latent = self._ensemble[\"Flow 0\"].latent\n self.data_error_model = data_error_model\n self.condition_error_model = condition_error_model\n self.info = info", "def main():\n # LoL = readcsv( \"wds.csv\" )\n # print(LoL[:10])\n\n # test writing\n # write_to_csv( LoL[:10], \"tenrows.csv\" )\n\n # text csv_to_html_table_starter\n # output_html = csv_to_html_table_starter( LoL[:10] )\n # print(\"\\noutput_html is\\n\\n\" + output_html)\n # create_html_page(output_html, \"test.html\")\n Wcount_first()\n Wcount_last()\n Wcount_middle()", "def main():\n langs = []\n\n with open(\"sql/07_populate.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in data folder\\n\\n\")\n\n langs = write_lang_city(sql)\n write_groups_diets(sql, langs)\n\n with open(\"sql/10_populate_test_data.sql\", 'w', encoding='utf8') as sql:\n sql.write(\"--this file is generated from csv files in moc_data folder\\n\\n\")\n write_test_data(sql)\n # This command sets postgis coordinates based on latitude and longitude\n sql.write(\"UPDATE restaurant SET geo_location = ST_POINT(latitude, longitude);\\n\")\n sql.close()", "def main ( independentBaseName, dependentBaseName, independentTSID, dependentTSID, statisticsFile, nEquations ):\n # Get the logger for diagnostics and print out command line parameters (use str() in case None)\n logger = logging.getLogger()\n logger.info ( \"independentBaseName='\" + str(independentBaseName) + \"'\" )\n logger.info ( \"dependentBaseName='\" + str(dependentBaseName) + \"'\" )\n logger.info ( \"independentTSID='\" + str(independentTSID) + \"'\" )\n logger.info ( \"dependentTSID='\" + str(dependentTSID) + \"'\" )\n logger.info ( \"statisticsFile='\" + str(statisticsFile) + \"'\" )\n logger.info ( \"nEquations=\" + str(nEquations) )\n # Define lists\n _TSID=[]\n _TSID_Indep=[]\n _n1=[]\n _MeanX1=[]\n _SX1=[]\n _n2=[]\n _MeanX2=[]\n _SX2=[]\n _MeanY1=[]\n _SY1=[]\n _NY=[]\n _MeanY=[]\n _SY=[]\n _SkewY=[]\n _a=[]\n _b=[]\n _R=[]\n _R2=[]\n _meanY1Est=[]\n _SY1Est=[]\n _RMSE=[]\n _SEE=[]\n _SEP=[]\n _SESlope=[]\n _TestScore=[]\n _TestQuantile=[]\n _TestRelated=[]\n _Y2=[]\n _MeanY2=[]\n _SY2=[]\n _Skew=[]\n\n # Set values\n missingValue = -999\n p = 0.95 # p = one sided exceedence probability\n\n # Read data and determine statistics\n for i in range (0,nEquations):\n # Construct name based on number of equations\n if nEquations == 1:\n independentName = independentBaseName + '.csv'\n dependentName = dependentBaseName + '.csv'\n else:\n monthsString = string.rjust(str(i+1),2)\n monthsString = string.replace(monthsString,' ', '0')\n monthsString = '_' + monthsString\n independentName = independentBaseName + monthsString + '.csv'\n dependentName = dependentBaseName + monthsString + '.csv'\n\n # Read the data from the CSV files\n [TSID_Indep, X] = Read_TS.Read_CSV(independentName)\n [TSID, Y] = Read_TS.Read_CSV(dependentName)\n\n # Override the TSID with that from the command line\n #if ( dependentTSID != None ):\n # TSID = dependentTSID\n #if ( independentTSID != None ):\n # TSID_Indep = independentTSID\n\n # remove missing values\n [independent, x_n2, dependent, n1, n2] = Statistics.overlapping(X, Y, missingValue)\n\n # calculate basic statistics\n [MeanX1, SX1]=Statistics.statistics(independent)\n [MeanY1, SY1]=Statistics.statistics(dependent)\n if x_n2 != []:\n [MeanX2, SX2]=Statistics.statistics(x_n2)\n else: # No data that can be filled\n [MeanX2, SX2]=[missingValue, missingValue]\n\n # calculate additional statistics for dependent variable\n [MeanY, NY, SY]=Statistics.dependent_statistics(Y, missingValue)\n SkewY = Statistics.Skew(Y)\n\n # calculate regression coefficients\n [a,b,R,R2]=Statistics.regression_coef(independent, dependent)\n\n # Estimate the values for the dependent variable\n [Yest, meanY1Est, SY1Est]=Statistics.calculate_Yest(independent, a, b)\n\n # Calculated the estimates for the dependent variable\n if x_n2 != []:\n Y2 = Statistics.LinearRegression(a,b,x_n2)\n [MeanY2, SY2]=Statistics.statistics(Y2)\n else: # No data that can be filled\n Y2 = []\n [MeanY2, SY2]=[missingValue, missingValue]\n\n # Calculate additional statistics\n if x_n2 != []:\n Skew = Statistics.Skew(Y2)\n RMSE = Statistics.RMSE(dependent, Yest, n1)\n SEE = Statistics.SEE(dependent, Yest, n1)\n SEP = Statistics.SEP(independent, SEE, n1)\n SESlope = Statistics.SESlope(independent, dependent, Yest, n1)\n TestScore = Statistics.TestScore(b, SESlope)\n TestQuantile = StudentTQuantiles.Student_T_quantile (p,n1-2)\n if TestScore >= TestQuantile:\n TestRelated = 'Yes'\n else:\n TestRelated = 'No'\n else: # No data that can be filled\n Skew = missingValue\n RMSE = missingValue\n SEE = missingValue\n SEP = [missingValue]\n SESlope = missingValue\n TestScore = missingValue\n TestQuantile = missingValue\n TestRelated = 'No'\n\n # Append all the monthly data to an array containing the data for all months\n # This will create a list with a single entry for a run with one equation\n _TSID.append(TSID)\n _TSID_Indep.append(TSID_Indep)\n _n1.append(n1)\n _MeanX1.append(MeanX1)\n _SX1.append(SX1)\n _n2.append(n2)\n _MeanX2.append(MeanX2)\n _SX2.append(SX2)\n _MeanY1.append(MeanY1)\n _SY1.append(SY1)\n _NY.append(NY)\n _MeanY.append(MeanY)\n _SY.append(SY)\n _SkewY.append(SkewY)\n _a.append(a)\n _b.append(b)\n _R.append(R)\n _R2.append(R2)\n _meanY1Est.append(meanY1Est)\n _SY1Est.append(SY1Est)\n _RMSE.append(RMSE)\n _SEE.append(SEE)\n _SEP.append(SEP)\n _SESlope.append(SESlope)\n _TestScore.append(TestScore)\n _TestQuantile.append(TestQuantile)\n _TestRelated.append(TestRelated)\n _Y2.append(Y2)\n _MeanY2.append(MeanY2)\n _SY2.append(SY2)\n _Skew.append(Skew)\n\n # Pass the lists with all information to be written to a CSV file\n Write_Stats.Write_CSV(statisticsFile,\n _TSID,\n _TSID_Indep,\n _n1,\n _MeanX1,\n _SX1,\n _n2,\n _MeanX2,\n _SX2,\n _MeanY1,\n _SY1,\n _NY,\n _MeanY,\n _SY,\n _SkewY,\n _a,\n _b,\n _R,\n _R2,\n _meanY1Est,\n _SY1Est,\n _RMSE,\n _SEE,\n _SEP,\n _SESlope,\n _TestScore,\n _TestQuantile,\n _TestRelated,\n _Y2,\n _MeanY2,\n _SY2,\n _Skew)\n logger.info ('Statistics file written')\n print ('Statistics file written')", "def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)", "def __init__(self):\n self.animals_list = []\n self.book = None\n try:\n with open(os.getcwd() + '/animals.csv', 'r') as f:\n read_file = csv.reader(f)\n for row in read_file:\n self.animals_list += row\n except LookupError:\n print(\"Check if 'animals.csv' file is in the main directory.\")", "def __init__(self, filename):\n self.data = pd.read_csv(filename)\n\n # Dialogue information.\n self.dialogue_ids = sorted(list(set(self.data['dialogue_id'])))\n self.number_of_dialogues = len(self.dialogue_ids)\n\n # Dialogue Act information.\n self.DAs = sorted(list(set(self.data['dialogue_act'])))\n self.number_of_DAs = len(self.DAs)\n\n # Speaker information.\n self.speakers = sorted(list(set(self.data['speaker'])))\n\n # Level information.\n self.levels = sorted(list(set(self.data['level'])))\n\n # Extracts the unique (speaker, DA) dialogue turn tuples from the data set.\n speaker_da_tuples = self.data[['speaker', 'dialogue_act']].drop_duplicates().values\n speaker_da_tuples = [tuple(pair) for pair in speaker_da_tuples]\n\n # Constructs a dictionary consisting of the unique turn tuples and their corresponding vector representation.\n self.number_of_classes = len(speaker_da_tuples)\n self.class_dict = dict()\n class_vectors = np.identity(self.number_of_classes)\n for i in range(self.number_of_classes):\n self.class_dict[speaker_da_tuples[i]] = class_vectors[i]", "def __init__(self, instancefile):\n coord = []; demand = [], # initialize lists\n with open(instancefile,\"r\") as iFile: # read data from file\n self.InstanceName = iFile.readline().strip() # name of instamce\n iFile.readline(); iFile.readline(); iFile.readline() # skip lines\n vals = iFile.readline().strip().split()\n self.MaxNumVeh = int(vals[0]) # read data\n self.MaxVehCap = float(vals[1]) # read data\n iFile.readline(); iFile.readline(); iFile.readline(); iFile.readline() # skip lines\n line = iFile.readline().strip()\n while line != \"\": # read customer data\n vals = line.split()\n coord.append([float(vals[1]), float(vals[2])])\n demand.append(float(vals[3]))\n line = iFile.readline().strip()\n self.NumCust = len(coord)-1 # set number of customers\n self.Coord = np.array(coord) # create object variables\n self.CustDem = np.array(demand)\n self.DistMatrix = squareform(pdist(self.Coord,\"euclidean\")) # compute distance matrix", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def __init__(\n self,\n file_name=None,\n file_contents=None,\n uploaded_file: UploadedFile = None,\n user: User = None,\n gating_strategy: GatingStrategy = None,\n ):\n if uploaded_file:\n self.upload_file = uploaded_file\n file_name = uploaded_file.name\n file_contents = uploaded_file.content\n # print(file_contents)\n self.content = file_contents\n self.file_name = file_name\n self.gating_strategy = gating_strategy\n self.df = pd.read_csv(self.content, parse_dates=[\"Date\"])\n\n # List of columns always expected\n # ToDo: Find out if any of these columns are 'required' - if so\n # cannot continue without them.\n\n # Use variables to store static_column names in case they change\n # in future\n self.sc_panel = \"Panel\"\n self.sc_clinical_sample = \"Clinical_sample\"\n self.sc_filename = \"filename\"\n self.sc_operator1 = \"Operator name\"\n self.sc_comments = \"Comments\"\n self.sc_batch = \"batch\"\n self.sc_date = \"Date\"\n self.required_columns = [\n self.sc_filename,\n self.sc_panel,\n self.sc_clinical_sample,\n ]\n\n self.static_columns = [\n self.sc_batch,\n self.sc_operator1,\n self.sc_comments,\n self.sc_date,\n ]\n\n # Store the unique panels in the data\n # ToDo: I think there should be only one unique panel - check.\n self.panels = self.df[\"Panel\"].unique().tolist()\n self.panel_name = self.panels[0].upper()\n\n # Compute names of parameters present. These are all the other\n # columns in the file that are not in the static_columns list\n # and are not unregistered_derived_parameters\n parameter_columns = set(self.df.columns) - set(self.static_columns)\n parameter_columns -= set(self.required_columns)\n self.parameter_columns = list(parameter_columns)\n\n # Store unregistered parameters. Derived ones will be dynamically\n # added to the Parameter table before upload\n self.unregistered_derived_parameters = []\n self.unregistered_parameters = []\n for parameter_column in self.parameter_columns:\n try:\n parameter_object = Parameter.objects.get(\n gating_hierarchy=parameter_column\n )\n except Parameter.DoesNotExist:\n if parameter_column.endswith(\"Count_back\") or parameter_column.endswith(\n \"freq\"\n ):\n self.unregistered_derived_parameters.append(parameter_column)\n else:\n self.unregistered_parameters.append(parameter_column)\n self.parameter_columns = [\n column\n for column in self.parameter_columns\n if column not in self.unregistered_parameters\n and column not in self.unregistered_derived_parameters\n ]\n\n # Names for pseudo parameters (parameters computed from data)\n self.pseudo_parameters_numeric = []\n if self.sc_batch in self.df.columns:\n self.pseudo_parameters_numeric.append(\n (self.sc_batch, f\"{self.panel_name}_batch\")\n )\n if self.sc_operator1 in self.df.columns:\n self.pseudo_parameters_numeric.append(\n (self.sc_operator1, f\"{self.panel_name}_operator_1\")\n )\n\n self.pseudo_parameters_date = []\n if self.sc_date in self.df.columns:\n self.pseudo_parameters_date.append(\n (self.sc_date, f\"{self.panel_name}_date_processed\")\n )\n\n self.pseudo_parameters_text = []\n if self.sc_comments in self.df.columns:\n self.pseudo_parameters_text.append(\n (self.sc_comments, f\"{self.panel_name}_comments\")\n )\n\n # Number of rows to process\n self.nrows = len(self.df)\n\n # Default uploaded file\n if not uploaded_file:\n self.upload_file = UploadedFile(\n name=self.file_name,\n user=user,\n description=\"Panel results\",\n row_number=self.nrows,\n content=self.content,\n notes=\"\",\n content_type=\"PANEL_RESULTS\",\n )\n self.upload_file.save()", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def run_code_for_question_one_part_two():\n df_flights_info = read_csv_file_data(\"../Data/T_Fare_Flight_Q1.csv\")\n df_flights_info = evaluate_percentage_of_class_for_each_flight(df_flights_info)\n df_flights_info.to_csv(\"../Output/Question_1_Part_2_percentages.csv\")\n print(df_flights_info.head())", "def main():\n if len(sys.argv) != 4:\n sys.exit('Please run with : python data-eng.py donors_file.txt zipcode_output_filename date_output_filename')\n compute_stats(sys.argv[1], sys.argv[2], sys.argv[3])", "def __init__(self,\n feature_selection=True,\n clinical_path='data/tidy/train_cli.csv',\n proteomic_path='data/tidy/train_pro.csv',\n rna_path='data/tidy/train_rna.csv',\n mismatch_path='data/tidy/sum_tab_1.csv',\n test_proteomic_path='data/raw/test_pro.tsv',\n test_clinical_path='data/raw/test_cli.tsv',\n train_rna_path='data/raw/train_rna.tsv',\n test_rna_path='data/raw/test_rna.tsv',\n mislabel_path='data/tidy/sum_tab_2.csv'):\n self.clinical = pd.read_csv(clinical_path, index_col=0)\n self.proteomic = self.preprocess(\n pd.read_csv(proteomic_path, index_col=0)\n )\n self.rna = self.preprocess(\n pd.read_csv(rna_path, index_col=0)\n )\n self.mismatch = pd.read_csv(mismatch_path, index_col=0)\n self.test_proteomic = self.preprocess(\n pd.read_csv(test_proteomic_path, index_col=0, sep='\\t').T\n )\n self.test_rna = self.preprocess(\n pd.read_csv(test_rna_path, index_col=0, sep='\\t').T\n )\n self.test_clinical = pd.read_csv(test_clinical_path, index_col=0, sep='\\t')\n self.train_rna = pd.read_csv(train_rna_path, index_col=0, sep='\\t').T\n self.train_pro_rna = self.train_rna.merge(self.proteomic, how='outer', left_index=True, right_index=True)\n self.test_pro_rna = self.test_rna.merge(self.test_proteomic, how='outer', left_index=True, right_index=True)\n self.train_all = self.train_pro_rna.merge(self.clinical, how='outer', left_index=True, right_index=True)\n self.train_all = self.train_all.replace(['Female', 'Male','MSI-Low/MSS', 'MSI-High'], [0, 1, 0, 1])\n self.test_all = self.test_pro_rna.merge(self.test_clinical, how='outer', left_index=True, right_index=True)\n self.test_all = self.test_all.replace(['Female', 'Male', 'MSI-Low/MSS', 'MSI-High'], [0, 1, 0, 1])\n self.mislabel = pd.read_csv(mislabel_path, index_col=0)\n\n if feature_selection:\n self.select_features()\n\n # create training labels for if a sample has been mislabeled\n self.mislabel_labels = []\n for i in range(0, len(self.mislabel.index)):\n if self.mislabel.iloc[i, 0] == self.mislabel.iloc[i, 1] and self.mislabel.iloc[i, 1] == self.mislabel.iloc[i, 2]:\n self.mislabel_labels.append(0)\n else:\n self.mislabel_labels.append(1)", "def __init__(self, filename=None, filetype=None, instrument=None):\n if filename:\n if instrument == 'Element':\n skipfooter = 4\n header = 1\n drop = 9\n elif instrument == 'Agilent':\n skipfooter = 4\n header = 3\n drop = 3\n else:\n skipfooter = 0\n header = 0\n drop = 0\n\n if filetype == 'xlsx':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.imported = pd.ExcelFile(filename)\n self.data = self.imported.parse(\n 0, index_col=0, skipfooter=skipfooter, header=header)\n self.data = self.data.drop(self.data.index[:drop], axis=0)\n os.chdir(pwd)\n # TODO xlsx doesnt work with agilent type\n elif filetype == 'csv':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.data = pd.read_csv(filename, sep=',', index_col=0, skipfooter=skipfooter,\n header=header, engine='python')\n os.chdir(pwd)\n elif filetype == 'asc':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.data = pd.read_csv(filename, sep='\\t', index_col=0, skipfooter=skipfooter,\n header=header, engine='python')\n self.data = self.data.drop(self.data.index[:drop], axis=0)\n self.data.dropna(axis=1, how='all', inplace=True)\n self.data = self.data.apply(pd.to_numeric, errors='coerce')\n os.chdir(pwd)\n else:\n warnings.warn('File type not supported.')\n\n self.data.index = self.data.index.astype('float32')\n self.time = self.data.index\n self.elements = list(map(elem_resolution, self.data.columns))\n self.data.columns = self.elements\n\n self.srms = pd.ExcelFile('./SRM.xlsx').parse(index_col=0)\n self.sum_koeficients = pd.ExcelFile(\n './default_sum_koef.xlsx').parse(0, index_col=0, header=None).to_dict()[1]\n\n self.srm = None\n self.iolite = None\n self.names = None\n self.internal_std = None\n self.ablation_time = None\n\n self.laser_off = []\n self.laser_on = []\n self.skip = {'bcg_start': 0,\n 'bcg_end': 0,\n 'sample_start': 0,\n 'sample_end': 0} # time in seconds to skip from each bcg and sample\n\n self.filter_line = None\n self.starts = None\n self.ends = None\n self.bcg = None\n self.average_peaks = None\n self.ratio = None\n self.quantified = None\n self.lod = None\n self.correction_elements = None\n self.corrected_IS = None\n self.corrected_SO = None\n\n self.dx = None\n self.dy = None\n self.maps = {}\n self.qmaps = {}\n\n self.regression_values = {}\n self.regression_equations = {}", "def main(xls, seqtype):\n data_extraction = {}\n # 1 - Load data\n logger.info(f'Load {xls}')\n manifest, metadata = load_xls(xls)\n # 2 - Check file and data\n logger.info(f'Start to validate XLS')\n check_samples(manifest, metadata)\n check_seqtype(manifest, seqtype)\n check_metadata(metadata, seqtype)\n logger.success(f'Successfully validate XLS')\n # 3 - Export XLS to TSV for Qiime2\n logger.info(f'Start to export XLS to TSV')\n data_extraction = extract_manifest(manifest, seqtype, data_extraction)\n data_extraction, metadata_vars = extract_metadata(metadata, seqtype, data_extraction)\n export_to_tsv_for_qiime(data_extraction, metadata_vars, seqtype)\n logger.success(f'Done')", "def process(self):\n parser = csv.reader(self.reader,delimiter=self.delimiter_DIC[self.delim])\n firstRec = True\n for fields in parser:\n if firstRec:\n fieldNames = fields\n firstRec = False\n else:\n self.dicts.append({})\n for i,f in enumerate(fields):\n try:\n self.dicts[-1][fieldNames[i]] = f\n except:\n import pdb\n pdb.set_trace()\n if self.eng is \"spectrumMill\":\n for i,row in enumerate(self.dicts):\n fileSM = row[self.engine[self.eng][0]]\n acNoSM = row[self.engine[self.eng][1]]\n masSM=row[self.engine[self.eng][2]]\n chrgeSM=row[self.engine[self.eng][3]]\n preAmSM=row[self.engine[self.eng][4]].replace('(','').replace(')','')\n pepSM=row[self.engine[self.eng][5]]\n nAmSM=row[self.engine[self.eng][6]].replace('(','').replace(')','')\n modSM=row[self.engine[self.eng][7]].split('\\s')+row[self.engine[self.eng][8]].split('\\s')\n modLis = [mod.strip() for mod in modSM if mod!=' ']\n modSM = ';'.join(modLis)\n scoreSM=row[self.engine[self.eng][9]]\n descrimentSM=row[self.engine[self.eng][10]]\n if modSM !='':\n modPepInHupaFormat=self.modTermDic.spectrumMill(preAmSM,pepSM,nAmSM,modSM,self.eng)\n parsedData=acNoSM+'\\t'+masSM+'\\t'+chrgeSM+'\\t'+modPepInHupaFormat+'\\t'+scoreSM+'\\n'\n data = self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n else:\n parsedData=acNoSM+'\\t'+masSM+'\\t'+chrgeSM+'\\t'+preAmSM+'.'+pepSM+'.'+nAmSM+'\\t'+'-'+'\\t'+scoreSM+'\\n'\n data = self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n\n if self.eng is \"mascot\":\n \"\"\"\n In Mascot, under every gi (protein) corresponding peptide information will be there\n \"\"\"\n giFound=True\n for i,row in enumerate(self.dicts):\n if row[self.engine[self.eng][0]]!='':\n giAsKey = row[self.engine[self.eng][0]]\n giFound=False\n if giFound==False:\n massM=row[self.engine[self.eng][1]]\n chargeM=row[self.engine[self.eng][2]]\n preAmM=row[self.engine[self.eng][3]]\n pepM=row[self.engine[self.eng][4]]\n nAmM = row[self.engine[self.eng][5]]\n modM=row[self.engine[self.eng][6]]\n modSiteM=row[self.engine[self.eng][7]]\n scoreM=row[self.engine[self.eng][8]]\n evalM=row[self.engine[self.eng][9]]\n if modM !='':\n \"\"\"\n modificationFormat from modification.py creates a MASTER_UNIMOD dictionary \n Where all modifications of unimod would be available. \n At same time formatMod function in modificationFormat class would convert modification format \"\"\"\n modPepInHupaFormat=self.modTermDic.mascot(preAmM,pepM,nAmM,modSiteM,modM,self.eng)\n parsedData=giAsKey+'\\t'+massM+'\\t'+chargeM+'\\t'+modPepInHupaFormat+'\\t'+scoreM+'\\n'\n data=self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n else:\n \n parsedData=giAsKey+'\\t'+massM+'\\t'+chargeM+'\\t'+preAmM+'.'+pepM+'.'+nAmM+'\\t'+'-'+'\\t'+scoreM+'\\n'\n data=self.mapCaller(parsedData,self.eng)\n #print >>self.writer,data\n\n if self.eng is \"inspect\":\n \"\"\"\n InSpect does not have mass information in TSV file\n So we need to fetch it from spectrum file (this is not yet done)\n \"\"\"\n for i,row in enumerate(self.dicts):\n data = row[self.engine[self.eng][0]]+'\\t'+row[self.engine[self.eng][1]]+'\\t'+row[self.engine[self.eng][2]]+'\\t'+row[self.engine[self.eng][3]]+'\\t'+row[self.engine[self.eng][4]]+'\\t'+row[self.engine[self.eng][5]]+'\\n'\n #return data\n #data = self.mapCaller(data)\n #self.writer.write(data)\n\n if self.eng is \"omssa\":\n #OMSSA csv doesnot contain start and last residue of the peptide, instead contains position. So start and last residue need to fetch from protein sequence\n for i,row in enumerate(self.dicts):\n giO = row[self.engine[self.eng][6]]\n massO= row[self.engine[self.eng][5]]\n chargeO=row[self.engine[self.eng][8]]\n preAmO = row[self.engine[self.eng][1]]#position in protein\n pepO = row[self.engine[self.eng][2]]\n nextAmO = row[self.engine[self.eng][3]] #position in protein\n scoreO= row[self.engine[self.eng][4]]\n modO=row[self.engine[self.eng][7]]\n if modO !='':\n #parsedData=giO+'#'+massO+'#'+chargeO+'#'+preAmO+'.'+pepO+'.'+nextAmO+'#'+modO+'#'+scoreO\n modPepInHupaFormat=self.modTermDic.omssa(preAmO,pepO,nextAmO,modO,self.eng)\n parsedData=giO+'\\t'+massO+'\\t'+chargeO+'\\t'+modPepInHupaFormat+'\\t'+scoreO+'\\n'\n self.mapCaller(parsedData,self.eng)\n else:\n parsedData=giO+'\\t'+massO+'\\t'+chargeO+'\\t'+preAmO+'.'+pepO+'.'+nextAmO+'\\t'+'-'+'\\t'+scoreO+'\\n'\n self.mapCaller(parsedData,self.eng)", "def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()", "def main():\n\t#first check args and file paths\n\tcheckArgs(args)\n\t\n\tdata = args.dataset_file\n\tf_name = data.split(\".\")\n\tprint \"\\n[AP]\\t\"+\"######## \"+f_name[0] + '.' + f_name[1]+\" ########\"\n\tprint \"\\n[AP]\\tChecked inputs, now acquiring data\"\n\n\thost = \"localhost\"\n\tuser = \"readonly\"\n\tpasswd = \"readonlypswd\"\n\tdb = args.db_schema\n\tdb_table = args.db_table\n\n\tnameFile = data[0:-20]\n\tdataset = queryDataset(host,user,passwd,db,db_table,\"tmpFile.txt\",nameFile)\n\tif dataset is not None:\n\t\tdataset = dataset.rstrip('\\n')\n\t\tdataset = dataset.replace(\"/\",\"-\")\n\n\t\tlocations_list, length_list = generateDataset(data)\n\n\t\tif len(locations_list) < 2:\n\t\t\tprint \"\\n[SKIP]\\t{dataset} has only one unique line! Can't estimate anything.\\n\\tSKIP THIS FILE!\\n\".format(dataset=str(dataset))\n\t\t\treturn 0\n\n\t\t# Alias for estAbund calling\n\t\testAbund = sonicLength.estAbund\n\n\t\t# Call estAbund and store returned object in results\n\t\tresults = estAbund(robjects.StrVector(locations_list), robjects.FloatVector(length_list))\n\n\t\t# Put estimation for theta in estimations_theta and associated locations in locations_theta; then organize data in dic_of_theta\n\t\ttheta = results.rx2(\"theta\")\n\t\testimations_theta = tuple(theta)\n\t\tlocations_theta = tuple(theta.names)\n\t\t# dic_of_theta\n\t\tdic_of_theta = {}\n\t\tfor i in range(len(locations_theta)):\n\t\t\tdic_of_theta.update({locations_theta[i]:estimations_theta[i]})\n\n\t\t# Put different fragment lengths in length_phi and associated frequencies in freq_phi\n\t\tphi = results.rx2(\"phi\")\n\t\tfreq_phi = tuple(phi)\n\t\tlength_phi = tuple(phi.names)\n\n\t\tlength_phi_numbers = fragmentsLengthPlot(length_phi,freq_phi,length_list,nameFile,dataset)\n\n\t\tprintThetaInfo(estimations_theta,locations_theta,nameFile)\n\n\t\t# Retrieving redundant reads data\n\t\tdic_of_redundant_reads_count, sequence_count_list = redundant_reads_count(from_file_to_list(data,'.tsv'))\n\n\t\t# Box Plot\n\t\tsequence_count = []\n\t\tfor v in sequence_count_list:\n\t\t\tsequence_count.append(int(v))\n\t\tbox_plot(sequence_count, estimations_theta, nameFile,dataset)\n\n\t\t# Plot: unique lengths retrieved for a genomic location VS expected number of parent fragment for the same location\n\t\tphi_VS_theta(length_phi, freq_phi, nameFile, dataset)\n\n\n\t\t#######################################################################################################\n\t\t# Produce .tsv output about measured redundant reads count, abundance-corrected redundant reads count # \n\t\t# and some descriptive of unique fragments lengths #\n\t\t#######################################################################################################\n\n\t\t# Retrieving data\n\t\tdic_of_relative_abundance, dic_of_corrected_reads_count, dic_of_percentage_difference = corrected_reads_count(dic_of_redundant_reads_count, dic_of_theta)\n\t\tdic_of_unique_lengths, dic_of_unique_lengths_number, dic_of_median_of_unique_lengths, dic_of_MAD = fragment_lengths_statistics(data)\n\t\tdic_of_lengths = lengths_explicit_list(from_file_to_list(data,'.txt'))\n\n\t\t# Writing File\n\t\tcorrected_file = open(dataset + \".\" + nameFile+\".outcomes\"+\".tsv\", 'w')\n\t\tcorrected_file.write(\"Chromosome\\tIntegration_locus\\tStrand\\tSequence_Count\\tEstimated_Relative_Abundance\\tCorrected_Sequence_Count\\tPercentage_Variation\\tNumber_of_fragments_of_unique_lengths\\tLength_Min\\tLength_Max\\tLenght_Median\\tRounded_Lenght_Median\\tMAD\\tUnique_Lengths_List\\tUnique_Lengths_Amount\\tCEM_region_?\") ## ! NB ! ## \\tCEM_region_?\" has to remain the last!!!\n\t\tgenome_locations = dic_of_redundant_reads_count.keys()\n\t\tgenome_locations.sort()\n\t\tfor key in genome_locations:\n\t\t\tsplitted_location = key.split(' ')\n\t\t\tcorrected_file.write(\"\\n\" + splitted_location[0] + \"\\t\" + splitted_location[1] + \"\\t\" + splitted_location[2] + \"\\t\" + str(dic_of_redundant_reads_count[key]) + \"\\t\" + str(round(dic_of_relative_abundance[key],5)) + \"\\t\" + str(round(dic_of_corrected_reads_count[key],0)) + \"\\t\" + str(dic_of_percentage_difference[key]) + \"\\t\" + str(dic_of_unique_lengths_number[key]) + \"\\t\" + str(min(dic_of_unique_lengths[key])) + \"\\t\" + str(max(dic_of_unique_lengths[key])) + \"\\t\" + str(dic_of_median_of_unique_lengths[key]) + \"\\t\" + str(math.ceil(dic_of_median_of_unique_lengths[key]))+ \"\\t\" + str(dic_of_MAD[key]) + \"\\t\" + str(dic_of_unique_lengths[key]) + \"\\t\" + str(dic_of_lengths[key]))\n\t\t\tresponse, cem_symbol, cem_coordinates = is_CEM(key)\n\t\t\tif (response == True):\n\t\t\t\tcorrected_file.write(\"\\t\" + cem_symbol)\n\n\t\t# Write database file - Like corrected_file with more field appended in the end\n\t\tdb_file = open(dataset + \".\" + nameFile+\".db_file\"+\".tsv\", 'w')\n\t\tgenome_locations = dic_of_redundant_reads_count.keys()\n\t\tgenome_locations.sort()\n\t\tdataset_split = dataset.split('.')\n\t\tdataset_label = '_'.join(dataset_split)\n\t\tfor key in genome_locations:\n\t\t\tsplitted_location = key.split(' ')\n\t\t\tdb_file.write(splitted_location[0] + \"\\t\" + splitted_location[1] + \"\\t\" + splitted_location[2] + \"\\t\" + str(dic_of_redundant_reads_count[key]) + \"\\t\" + str(round(dic_of_relative_abundance[key],5)) + \"\\t\" + str(round(dic_of_corrected_reads_count[key],0)) + \"\\t\" + str(dic_of_percentage_difference[key]) + \"\\t\" + str(dic_of_unique_lengths_number[key]) + \"\\t\" + str(min(dic_of_unique_lengths[key])) + \"\\t\" + str(max(dic_of_unique_lengths[key])) + \"\\t\" + str(dic_of_median_of_unique_lengths[key]) + \"\\t\" + str(math.ceil(dic_of_median_of_unique_lengths[key]))+ \"\\t\" + str(dic_of_MAD[key]) + \"\\t\" + str(dic_of_unique_lengths[key])[1:-1] + \"\\t\" + str(dic_of_lengths[key])[1:-1] + \"\\t\")\n\t\t\tdb_file.write(\"\\t\".join(dataset_split) + \"\\t\" + dataset_label + \"\\t\")\n\t\t\tresponse, cem_symbol, cem_coordinates = is_CEM(key)\n\t\t\tif (response == True):\n\t\t\t\tdb_file.write(cem_symbol + \"\\t\" + cem_coordinates)\n\t\t\telse:\n\t\t\t\tdb_file.write(\"\\t\")\n\t\t\tdb_file.write(\"\\n\")\n\n\t\tdb_file.close()\n\n\t\t#######################################################################################################\n\n\t\t# Last print for user\n\t\tprint \"\\n[AP]\\tTask Finished, closing.\\n\"\n\telse:\n\t\tprint \"\\n[AP]\\tThe dataset is not in the reference DB. Skipped.\\n\"\n\n\treturn 0", "def __init__(self, raw_data_file,):\n self.raw_data_file = raw_data_file\n self.clean_data = self.cleanData()\n self.microtrip_data = []", "def Basic_Stat(self):\n out_file = \"%s/01.%s.BasicInfo_QC_map_SpikeIn.xls\" % (self.dir_StatInfo, self.s_idx)\n f_out_file = open( out_file,\"w\" )\n \n l_info = [\n \"Rename\", \"Raw_Reads\", \"Clean_Reads\",\n \"Pre_Map_Reads\", \"Aligned_Reads\", \"HTSseq_Known_Reads\",\n \"HTSeq_Refseq_Reads\",\n \"ERCC_Reads\",\n \"ERCC_Mols\"\n ]\n if not self.given_GTF:\n l_info = [\n \"Rename\", \"Raw_Reads\", \"Raw_bases\", \"Clean_Reads\", \"Clean_bases\", \"Q20(%)\",\"Q30(%)\",\"GC content(%)\",\n \"Pre_Map_Reads\", \"Aligned_Reads\", \"MappingRate\",\n \"ERCC_Reads\",\n \"ERCC_Mols\"\n ]\n \n \n out_info = \"\\t\".join(l_info)\n\n print >>f_out_file, out_info\n\n l_brief = self.samInfo_pd_RNA['brief_name']\n\n ### Load HTSeq reads\n HTS_info = m_cnt.CountInfo(\n self.s_idx,\n self.dir_mergeCout,\n# l_brief,\n# \"umi_clean_gene\",\n# self.dir_mergeCout\n )\n HTS_info.load_mat(gen_col=1, generate = 0)\n HTS_info.sam_tot_reads()\n \n ### Split refseq reads into RefSeq gene, lncGenes and novo genes.\n# self.__get_HTS_clean_split()\n \n# ### Load HTSeq refseq reads\n# Refseq_info = m_cnt.CountInfo(\n# self.s_idx,\n# self.dir_mergeCout,\n# l_brief,\n# \"dexseq_clean_refseq\",\n# self.dir_mergeCout\n# )\n# Refseq_info.load_mat(gen_col=1, generate = 0)\n# Refseq_info.sam_tot_reads()\n \n# ### Load HTSeq lncRNA reads\n# lncRNA_info = m_cnt.CountInfo(\n# self.s_idx,\n# self.dir_mergeCout,\n# l_brief,\n# \"dexseq_clean_lncRNA\",\n# self.dir_mergeCout\n# )\n# lncRNA_info.load_mat(gen_col=1, generate = 0)\n# lncRNA_info.sam_tot_reads()\n\n ### Load HTSeq novo-gene reads\n# if not self.given_GTF:\n# NeoPass_info = m_cnt.CountInfo(\n# self.s_idx,\n# self.dir_mergeCout,\n# l_brief,\n# \"dexseq_NeoPass\",\n# self.dir_mergeCout\n# )\n# NeoPass_info.load_mat(gen_col=1, generate = 0)\n# NeoPass_info.sam_tot_reads()\n \n \n \"\"\"\n Load other information\n \"\"\"\n for samp in self.samInfo_pd_RNA['sample']:\n idx = (self.samInfo_pd_RNA['sample'] == samp)\n brief_name = self.samInfo_pd_RNA[idx]['brief_name'].values[0]\n rename = self.samInfo_pd_RNA[idx]['rename'].values[0]\n data_type = self.samInfo_pd_RNA[ idx ]['end_type'].values[0]\n QC_log = \"%s/%s/%s.QC.log\" % (self.dir_clean_data, samp, samp)\n \n Tophat_log = \"%s/%s/align_summary.txt\" %\\\n (self.dir_tophat, brief_name)\n \n HTSeq_SpikeIn = \"%s/%s/%s.dexseq_ERCC_RGCPloyA.txt\" %\\\n (self.dir_HTS_known, brief_name, brief_name)\n \n QcStat_info = Stat.QcStat(QC_log)\n MapStat_info = Stat.TophatStat(Tophat_log)\n SpikeIn_info = Stat.SpikeIn(HTSeq_SpikeIn, self.ercc_info_file)\n \n \n QcStat_info.read_infile()\n MapStat_info.read_infile()\n SpikeIn_info.load_HTS_file()\n \n pre_map_read = MapStat_info['statInfo']['totalRead']\n aligned_read = MapStat_info['statInfo']['mappedRead']\n mappingRate = MapStat_info['statInfo']['mappingRate']\n \n if data_type == \"PE\": \n HTSseq_read = self.__get_HTS_reads(HTS_info ,samp) * 2\n Refseq_read = self.__get_HTS_reads(Refseq_info ,samp) * 2\n lncRNA_read = self.__get_HTS_reads(lncRNA_info ,samp) * 2\n NeoPass_read = 0\n if not self.given_GTF:\n NeoPass_read = self.__get_HTS_reads(NeoPass_info,samp) * 2\n read_RFP = SpikeIn_info.RGC_count['RGC-mRFP'] * 2\n read_GFP = SpikeIn_info.RGC_count['RGC-GFP' ] * 2\n read_CRE = SpikeIn_info.RGC_count['RGC-CRE' ] * 2\n read_ERCC = SpikeIn_info.ERCC_total * 2\n else:\n HTSseq_read = self.__get_HTS_reads(HTS_info ,samp)\n# Refseq_read = self.__get_HTS_reads(Refseq_info ,samp)\n# lncRNA_read = self.__get_HTS_reads(lncRNA_info ,samp)\n# if not self.given_GTF:\n# NeoPass_read = self.__get_HTS_reads(NeoPass_info,samp)\n\n# read_RFP = SpikeIn_info.RGC_count['RGC-mRFP']\n# read_GFP = SpikeIn_info.RGC_count['RGC-GFP' ]\n# read_CRE = SpikeIn_info.RGC_count['RGC-CRE' ]\n read_ERCC = SpikeIn_info.ERCC_total\n# \n# mol_RFP = self.samInfo_pd_RNA[idx]['RFP_polyA'].values[0]\n# mol_GFP = self.samInfo_pd_RNA[idx]['GFP_polyA'].values[0]\n# mol_CRE = self.samInfo_pd_RNA[idx]['CRE_polyA'].values[0]\n mol_ERCC = self.samInfo_pd_RNA[idx]['ERCC_time'].values[0] *\\\n 6.023*10**10\n\n l_out = [\n rename,\n QcStat_info.raw_reads, QcStat_info.cln_reads,\n pre_map_read, aligned_read, HTSseq_read,\n read_ERCC,\n mol_ERCC\n ]\n if not self.given_GTF:\n l_out = [\n rename,\n QcStat_info.raw_reads,QcStat_info.raw_bases, QcStat_info.cln_reads,QcStat_info.cln_bases ,QcStat_info.Q20_l_rate ,QcStat_info.Q30_l_rate ,QcStat_info.GC_l_rate,\n pre_map_read, aligned_read, mappingRate,\n read_ERCC,\n mol_ERCC\n ]\n l_out = [str(i) for i in l_out]\n print >>f_out_file, \"\\t\".join(l_out)\n \n f_out_file.close()", "def _process_data(self):\r\n # Rename columns to match final feature class\r\n self._rename_columns()\r\n # Add point ID column\r\n self._add_pointid()\r\n # Sort rows by transect id and timestamp\r\n self._sort_rows()\r\n # Fill Null records with a value\r\n self._fill_nulls()\r\n # Set site_code to lower case\r\n self._lower_site_code()\r\n # Create survey_id\r\n self._calc_survey_id()\r\n # Calculate nativesg column if at least one of the veg columns is a Native seagrass type\r\n if set(self.veg_columns).intersection(set(NATIVESG_CODES)) > 0:\r\n self.nativesg_columns = list(set(self.veg_columns).intersection(set(NATIVESG_CODES)))\r\n self._calc_nativesg()\r\n #\r", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def process_data(rows, options):\n cohort = int(options.cohort)\n id_expiry = PHASE_END_DATES[cohort][4]\n for row in rows:\n new_student = Person()\n for k, v in row.items():\n setattr(new_student, k, v)\n new_student.student_cohort = cohort\n new_student.id_expiry = id_expiry\n if options.section:\n new_student.student_sec_phase1 = options.section\n if options.add:\n new_student.save()\n print \"Saved:\", new_student", "def main():\n #find sys args and handle appropriately\n argv = sys.argv\n if(len(argv) != 2):\n print(\"Invalid number of input strings.\")\n print(\"Usage: python mdc.py [inputfile]\")\n else:\n #run the IO script, expect out a list of lists, etc..\n data = CSVIO.readDataCSV(argv[1])\n #create a list of classNames from the data, trim/convert as necessary\n classNames = data[0][1:]\n try:\n while(True):\n classNames.remove('')\n except ValueError:\n pass\n try:\n classNames = [item.split('=')[1] for item in classNames]\n except IndexError as ie:\n print(\"I've failed you Dr. Weiss...\")\n #declare/init our class with proper values\n pc = training.PatternClassifier(data[0][0], len(classNames), classNames, len(data[1][2:]), \\\n data[1][2:], len(data[2:]), data[2:])\n #run the training data with leaving out i for our test on each sample\n for i in range(0, pc.nSamples):\n pc.train(i)\n #output to console/file\n print(pc)\n CSVIO.writeToFile(argv[1], pc)", "def lcia_methods__metadata(self):\r\n with UnicodeReader(os.path.join(dirpath, \"categoryUUIDs.csv\"), \r\n encoding='latin-1', \r\n delimiter=dt) as csv_file:\r\n next(csv_file) \r\n csv_data = [{'name': (line[0], line[2], line[4]),\r\n 'description': line[7]\r\n } for line in csv_file]\r\n \r\n filename = \"LCIA_implementation_2019.xlsx\" # this was donwloaded and updated on Oct 2019 from ecoinvent website. \r\n wb = xlrd.open_workbook(os.path.join(dirpath, filename))\r\n #characterizaton factors\r\n sheet= wb.sheet_by_name(\"CFs\")\r\n cf_data = [{\r\n 'method': (sheet.cell(row, 0).value,\r\n sheet.cell(row, 1).value,\r\n sheet.cell(row, 2).value),\r\n 'name': sheet.cell(row, 3).value,\r\n 'categories': (sheet.cell(row, 4).value, sheet.cell(row, 5).value),\r\n 'amount': sheet.cell(row, 7).value\r\n }\r\n for row in range(1, sheet.nrows)\r\n if sheet.cell(row, 0).value not in \r\n {'selected LCI results, additional', 'selected LCI results'} and isinstance(sheet.cell(row, 7).value, Number)]\r\n #units\r\n sheet= wb.sheet_by_name(\"units\")\r\n units = {(sheet.cell(row, 0).value, sheet.cell(row, 1).value, \r\n sheet.cell(row, 2).value): sheet.cell(row, 4).value for row in range(1, sheet.nrows)}\r\n return csv_data, cf_data, units, filename", "def run(self):\n\n # How to retrieve your input data.\n input_1_data = self.in_data['input_1']\n\n # How to retrieve your params value.\n param_1 = self.param['param_1']\n\n # How to process data.\n # Just write any number of methods you want and use them here.\n sample_out_data = self.sample_method(input_1_data, param_1)\n\n # Go to the definition of this method to see how to log.\n self.demo_log()\n\n # This is how to set output data.\n self.out_data['output_1'] = sample_out_data", "def perform_process(transformer: transformer_class.Transformer, check_md: dict) -> dict:\n # Process each CSV file into BETYdb\n start_timestamp = datetime.datetime.now()\n files_count = 0\n files_csv = 0\n lines_read = 0\n error_count = 0\n files_loaded = []\n for one_file in check_md['list_files']():\n files_count += 1\n if os.path.splitext(one_file)[1].lower() == '.csv':\n files_csv += 1\n\n # Make sure we can access the file\n if not os.path.exists(one_file):\n msg = \"Unable to access csv file '%s'\" % one_file\n logging.debug(msg)\n return {'code': -1000,\n 'error': msg}\n\n try:\n # Read in the lines from the file\n with open(one_file, 'r') as in_file:\n reader = csv.DictReader(in_file)\n files_loaded.append(one_file)\n for row in reader:\n centroid_lonlat = [row['lon'], row['lat']]\n time_fmt = row['dp_time']\n timestamp = row['timestamp']\n dp_metadata = {\n \"source\": row['source'],\n \"value\": row['value']\n }\n trait = row['trait']\n\n __internal__.create_datapoint_with_dependencies(transformer.args.clowder_url, transformer.args.clowder_key,\n trait, (centroid_lonlat[1], centroid_lonlat[0]), time_fmt,\n time_fmt, dp_metadata, timestamp)\n lines_read += 1\n\n except Exception:\n logging.exception(\"Error reading CSV file '%s'. Continuing processing\", os.path.basename(one_file))\n error_count += 1\n\n if files_csv <= 0:\n logging.info(\"No CSV files were found in the list of files to process\")\n if error_count > 0:\n logging.error(\"Errors were found during processing\")\n return {'code': -1001, 'error': \"Too many errors occurred during processing. Please correct and try again\"}\n\n return {\n 'code': 0,\n configuration.TRANSFORMER_NAME: {\n 'version': configuration.TRANSFORMER_VERSION,\n 'utc_timestamp': datetime.datetime.utcnow().isoformat(),\n 'processing_time': str(datetime.datetime.now() - start_timestamp),\n 'num_files_received': str(files_count),\n 'num_csv_files': str(files_csv),\n 'lines_loaded': str(lines_read),\n 'files_processed': str(files_loaded)\n }\n }", "def __init__(self, path):\n with open(path, 'r') as bt:\n self.headers = bt.readline().split(',')\n self.data = []\n for line in bt:\n self.data.append(list(eval(line)))\n self.scores = []\n self.models = {'dtr': DecisionTreeRegressor(),\n 'br': BaggingRegressor(n_jobs=-1),\n 'rfr': RandomForestRegressor(n_jobs=-1),\n }", "def getArguments(Comm):\n input_dir=''\n dataset=''\n l=0.0\n if len(Comm)==4:\n input_dir=str(Comm[1])\n dataset=str(Comm[2])\n l=float(Comm[3])\n else:\n print \"To run this program you have to provide a training file, a test file, an output file and a smoothing parameter\"\n print \"the chemicals must be integer indexed. This script read chemical feature files based on chemical index in integer\"\n print \"example: python PRW_10cv.py /path/to/N1/L1to5/ N1L1to5 0.9\"\n exit(1)\n feature=get_feature()\n RCRS=[]\n TPR35=[]\n for i in range(1,11):\n #10cv for 1 to 10\n trainfile=input_dir+\"train\"+str(i)+\".csv\"\n Train=get_train(trainfile,feature)\n\n testfile=input_dir+\"test\"+str(i)+\".csv\"\n \n testrow=[]\n testcol=[]\n print \"Reading test instances : \",testfile\n for line in open(testfile,\"r\").xreadlines():\n z=line.strip().split(\",\")\n chem=str(z[0])\n prot=str(z[1])\n testrow.append(chem)\n testcol.append(prot)\n rcrs=get_PRW_rcrs(feature,Train,zip(testrow,testcol),l)\n tpr35=TPRbyRowRank(rcrs,35)\n TPR35.append(tpr35)\n RCRS=RCRS+rcrs\n avgtpr35=np.average(TPR35)\n semtpr35=(np.std(TPR35)/math.sqrt(len(TPR35)))\n print \"Dataset=%s, Avg.TPR35=%s, S.E.M.TPR35=%s\"%(dataset,str(avgtpr35),str(semtpr35))\n print TPR35\n print \"Rank\\tTPR\"\n for i in range(1,351):\n tpr=TPRbyRowRank(RCRS,i)\n print \"%s\\t%s\"%(str(i),str(tpr))\n return", "def main(): \n for info_hash_record in info_hashs:\n get_and_save_bt_info(info_hash_record)", "def __init__(self):\n with open(\"sat.json\", \"r\") as infile:\n self._sat = json.load(infile)[\"data\"]\n #Define the headers for the csv\n self._headers = [\"DBN\", \"School Name\", \"Number of Test Takers\", \"Critical Reading Mean\", \"Mathematics Mean\", \"Writing Mean\"]", "def main():\n df_path = './DuReader_reformatted/DuReader_for_dbCombinedPara500-150-sample10000.csv'\n df = pd.read_csv(df_path, sep='\\t', index_col=0).dropna() # drop 2 nan question and 8 nan title\n epoch = 5 # about 6 hours\n total_time_list, back_end_time_list = get_time_avg(df['question'].tolist(), epoch)\n df['time_avg'] = total_time_list\n df['backend_time'] = back_end_time_list\n new_df_path = os.path.splitext(df_path)[0] + '-whole-epoch-' + str(epoch) + '.csv'\n df.to_csv(new_df_path, sep='\\t')\n print('file successfully saved to ', new_df_path)", "def automated(a):\n\n try:\n\n lg.warning(\"user gave the input path/file as:\"+' '+str(a))\n df=pd.read_excel(a)\n lg.warning(\"data successfully loaded from the file/path\"+' '+str(a))\n\n lg.info(\"starting all the pre-processing done for the train dataset\")\n\n df.dropna(inplace=True)\n lg.warning(\"successfully dropped all null values in the given dataset\")\n\n def change_into_datetime(col):\n df[col]=pd.to_datetime(df[col])\n\n for i in ['Date_of_Journey','Dep_Time', 'Arrival_Time']:\n change_into_datetime(i)\n lg.info(\"successfully changed the required columns into datetime format\")\n\n df['journey_day']=df['Date_of_Journey'].dt.day\n lg.info(\"successfully extracted day from Date_of_journey and creating a separate column for day\")\n df['journey_month']=df['Date_of_Journey'].dt.month\n lg.info(\"successfully extracted month from Date_of_Journey and creating a separate column for month\")\n\n def extract_hour(data,col):\n data[col+'_hour']=data[col].dt.hour\n def extract_min(data,col):\n data[col+'_min']=data[col].dt.minute\n def drop_col(data,col):\n data.drop(col,axis=1,inplace=True)\n\n\n extract_hour(df,'Dep_Time')\n lg.info(\"successfully extracted hours from Dep_Time and dumped the data into new column Dep_Time_hour\")\n extract_min(df,'Dep_Time')\n lg.info(\"successfully extracted minutes from Dep_Time and dumped the data into new column Dep_Time_min\")\n drop_col(df,'Dep_Time')\n lg.warning(\"dropping the original Dep_Time column as we extracted the values form that column\")\n extract_hour(df,'Arrival_Time')\n lg.info(\"successfully extracted hours from Arrival_Time and dumped the data into new column Arrival_Time_hour\")\n extract_min(df,'Arrival_Time')\n lg.info(\"successfully extracted min from Arrival_Time and dumped the data into new column Arrival_Time_min\")\n drop_col(df,'Arrival_Time')\n lg.warning(\"dropping the original Arrival_Time column as we extracted the values form that column\")\n\n duration = list(df[\"Duration\"])\n\n for i in range(len(duration)):\n if len(duration[i].split()) != 2:\n if \"h\" in duration[i]:\n duration[i] = duration[i].strip() + \" 0m\"\n else:\n duration[i] = \"0h \" + duration[i]\n\n duration_hours = []\n duration_mins = []\n for i in range(len(duration)):\n duration_hours.append(int(duration[i].split(sep = \"h\")[0]))\n duration_mins.append(int(duration[i].split(sep = \"m\")[0].split()[-1]))\n\n df[\"Duration_hours\"] = duration_hours\n lg.info(\"successfully extracted hours from Duration column and dumped the data into new column Duration_hours\")\n df[\"Duration_mins\"] = duration_mins\n lg.info(\"successfully extracted minutes from Duration column and dumped the data into new column Duration_mins\")\n\n df.drop([\"Date_of_Journey\",\"Duration\",\"Additional_Info\"], inplace=True,axis=1)\n lg.warning(\"dropping the Date_of_Journey, Duration, Additional_Info columns as we extracted the required \"\n \"information\")\n\n Airline=pd.get_dummies(df['Airline'],drop_first=True)\n lg.info(\"creating dummy variables for Airline and dropping the first dummy column\")\n\n source=pd.get_dummies(df['Source'],drop_first=True)\n lg.info(\"creating dummy variables for Source and dropping the first dummy column\")\n\n destination=pd.get_dummies(df['Destination'],drop_first=True)\n lg.info(\"creating dummy variables for Destination and dropping the first dummy column\")\n\n dict={'non-stop':0, '2 stops':2, '1 stop':1, '3 stops':3, '4 stops':4}\n df['Total_Stops']=df['Total_Stops'].map(dict)\n lg.info(\"successfully mapped the Total_Stops column to 0,1,2,3,4 respectfully\")\n\n df=pd.concat([df, Airline, source, destination], axis = 1)\n lg.warning(\"concatenating all the newly created columns into the main dataframe\")\n\n df.drop([\"Airline\", 'Source', 'Destination','Route'],inplace=True,axis=1)\n lg.warning(\"dropping the categorical columns as we dummy encoded them\")\n\n df['Trujet']=0\n lg.info(\"adding an extra column as this feature is not there in our test dataset\")\n\n\n model = open('flight_rf.pkl','rb')\n forest = pickle.load(model)\n lg.info(\"loading our test model for prediction\")\n\n y_prediction = forest.predict(df)\n lg.info(\"processing the prediction\")\n\n a=pd.DataFrame(y_prediction)\n lg.info(\"dumping all our predicted values into a dataframe and showing the results\")\n\n print(a)\n return a\n\n except Exception as e:\n lg.warning(\"error occurred during execution, which is:\"+' '+str(e))\n return \"error occurs is:\"+' '+str(e)", "def test():\n\n # todo: using 'analysisname' for group by, I think I can also use 'File Number'\n statListDict = None # list of dict mapping human readbale to column names\n masterDf = None\n interfaceDefaults = None\n\n # machine learning db\n if 0:\n # this is from mac laptop\n #path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master.csv'\n path = '/Users/cudmore/data/laura-ephys/SANdatabaseForMachineLearning.xlsx'\n analysisName = 'File Number'\n #statListDict = None #sanpy.bAnalysisUtil.getStatList()\n categoricalList = ['LOCATION', 'SEX', 'File Number']#, 'File Name']\n hueTypes = ['LOCATION', 'SEX', 'File Number'] #, 'File Name'] #, 'None']\n sortOrder = ['LOCATION', 'SEX', 'File Number']\n\n # sanpy database\n if 0:\n #import sanpy\n #sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'sanpy'))\n #import bAnalysisUtil\n #statListDict = bAnalysisUtil.statList\n import statlist\n statListDict = statlist.statList\n\n # this is from mac laptop\n #path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master.csv'\n path = '../examples/Superior vs Inferior database_master.csv'\n path = '/Users/cudmore/data/laura-ephys/Superior_Inferior_database_master_jan25.csv'\n path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master.csv'\n path = '/Users/cudmore/data/laura-ephys/Superior vs Inferior database_master_20210402.csv'\n\n #path = 'data/Superior vs Inferior database_master_20210402.csv'\n path = 'data/Superior vs Inferior database_master_20210402.csv'\n #path = '/Users/cudmore/data/laura-ephys/Superior_Inferior_database_master_jan25.csv'\n path = 'data/Superior vs Inferior database_13_Feb_master.csv'\n analysisName = 'analysisname'\n #statListDict = None #sanpy.bAnalysisUtil.getStatList()\n categoricalList = ['include', 'condition', 'region', 'Sex', 'RegSex', 'File Number', 'analysisname']#, 'File Name']\n hueTypes = ['region', 'sex', 'RegSex', 'condition', 'File Number', 'analysisname'] #, 'File Name'] #, 'None']\n sortOrder = ['region', 'sex', 'condition']\n\n interfaceDefaults = {'Y Statistic': 'Spike Frequency (Hz)',\n 'X Statistic': 'region',\n 'Hue': 'region',\n 'Group By': 'File Number'}\n # bimpy database\n if 0:\n path = '../examples/edges_db.csv'\n analysisName = 'fileNumber'\n categoricalList = ['san', 'region', 'path', 'file', 'fileNumber', 'nCon']\n hueTypes = categoricalList\n sortOrder = ['san', 'region']\n\n # dualAnalysis database\n if 0:\n # grab our list of dict mapping human readable to .csv column names\n sys.path.append(os.path.join(os.path.dirname(sys.path[0]),'sanpy'))\n import bAnalysisUtil\n statListDict = bAnalysisUtil.statList\n\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/dualAnalysis_final_db.csv'\n analysisName = 'fileNumber' # # rows in .xlsx database, one recording per row\n # trial is 1a/1b/1c... trial withing cellNumber\n categoricalList = ['include', 'region', 'fileNumber', 'cellNumber', 'trial', 'quality']\n hueTypes = categoricalList\n sortOrder = ['region']\n\n # sparkmaster lcr database\n if 0:\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/lcr-database.csv'\n analysisName = 'fileNumber' # # rows in .xlsx database, one recording per row\n # trial is 1a/1b/1c... trial withing cellNumber\n categoricalList = ['quality', 'region', 'fileNumber', 'dateFolder', 'tifFile']\n hueTypes = categoricalList\n sortOrder = ['region']\n\n # lcr/vm analysis using lcrPicker.py\n if 0:\n #basePath = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/'\n #path = basePath + 'dual-data/20210115/20210115__0002_lcrPicker.csv'\n #path = basePath + 'dual-data/20210115/20210115__0001_lcrPicker.csv'\n\n # output of lcrPicker.py ... mergeDatabase()\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/lcrPicker-db.csv'\n categoricalList = None\n hueTypes = None\n analysisName= 'tifFile'\n sortOrder = None\n\n # merged sanpy+lcr pre spike slope\n # generated by dualAnalysis.py xxx()\n # usnig to compare lcr slope to edddr for fig 9\n if 0:\n path = '/Users/cudmore/Sites/SanPy/examples/dual-analysis/combined-sanpy-lcr-db.csv'\n statListDict = None\n categoricalList = None\n hueTypes = None\n analysisName= 'filename'\n sortOrder = None\n\n if 1:\n path = 'data'\n ad = sanpy.analysisDir(path, autoLoad=True)\n for row in range(len(ad)):\n ad.getAnalysis(row)\n masterDf = ad.pool_build()\n categoricalList = ['file', 'File Number']\n hueTypes = ['file', 'File Number']\n analysisName = 'file'\n from sanpy.bAnalysisUtil import statList as statListDict\n sortOrder = ['file', 'File Number']\n interfaceDefaults = {'Y Statistic': 'Spike Frequency (Hz)',\n 'X Statistic': 'Spike Number',\n 'Hue': 'file',\n 'Group By': 'file'}\n\n #\n app = QtWidgets.QApplication(sys.argv)\n\n ex = bScatterPlotMainWindow(path, categoricalList, hueTypes,\n analysisName, sortOrder, statListDict=statListDict,\n masterDf = masterDf,\n interfaceDefaults = interfaceDefaults)\n ex.show()\n\n sys.exit(app.exec_())", "def main():\n\n house_path = '../../Data/wijk1_huizen.csv'\n battery_path = '../../Data/wijk1_batterijen.txt'\n\n houses, batteries = read_data(house_path, battery_path)\n\n smart_wijk = SmartGrid(51,51)\n smart_wijk.add_house_dictionaries(houses)\n smart_wijk.add_battery_dictionaries(batteries)\n\n for element in houses:\n smart_wijk.create_house(element['position'], element['output'])\n for element in batteries:\n smart_wijk.create_battery(element['position'], element['capacity'])\n\n solution_reader(smart_wijk, '../../Results/best_brabo_solution.csv')", "def _main(hgt_results_fp,\n method):\n\n with open(hgt_results_fp, 'U') as input_f:\n\t if method == 'ranger-dtl':\n\t parse_rangerdtl(input_f=input_f)\n\t elif method == 'trex':\n\t parse_trex(input_f=input_f)\n\t elif method == 'riata-hgt':\n\t parse_riatahgt(input_f=input_f)\n\t elif method == 'jane4':\n\t parse_jane4(input_f=input_f)\n\t elif method == 'consel':\n\t parse_consel(input_f=input_f)", "def get_data(self, csv_file):\n pass", "def new_csv_imp(infile):\r\n with open(infile, \"r\") as fd:\r\n txt = fd.readlines()\r\n if len(txt) > 1:\r\n if 'Serial' in txt[0]:\r\n print('{:} is Solinst'.format(infile))\r\n if 'UNIT: ' in txt[7]:\r\n level_units = str(txt[7])[5:].strip().lower()\r\n if 'UNIT: ' in txt[12]:\r\n temp_units = str(txt[12])[5:].strip().lower()\r\n f = pd.read_csv(infile, skiprows=13, parse_dates=[[0, 1]], usecols=[0, 1, 3, 4])\r\n print(f.columns)\r\n f['DateTime'] = pd.to_datetime(f['Date_Time'], errors='coerce')\r\n f.set_index('DateTime', inplace=True)\r\n f.drop('Date_Time', axis=1, inplace=True)\r\n f.rename(columns={'LEVEL': 'Level', 'TEMP': 'Temp'}, inplace=True)\r\n level = 'Level'\r\n temp = 'Temp'\r\n\r\n if level_units == \"feet\" or level_units == \"ft\":\r\n f[level] = pd.to_numeric(f[level])\r\n elif level_units == \"kpa\":\r\n f[level] = pd.to_numeric(f[level]) * 0.33456\r\n printmes(\"Units in kpa, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"mbar\":\r\n f[level] = pd.to_numeric(f[level]) * 0.0334552565551\r\n elif level_units == \"psi\":\r\n f[level] = pd.to_numeric(f[level]) * 2.306726\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n elif level_units == \"m\" or level_units == \"meters\":\r\n f[level] = pd.to_numeric(f[level]) * 3.28084\r\n printmes(\"Units in psi, converting {:} to ft...\".format(os.path.basename(infile)))\r\n else:\r\n f[level] = pd.to_numeric(f[level])\r\n printmes(\"Unknown units, no conversion\")\r\n\r\n if temp_units == 'Deg C' or temp_units == u'\\N{DEGREE SIGN}' + u'C':\r\n f[temp] = f[temp]\r\n elif temp_units == 'Deg F' or temp_units == u'\\N{DEGREE SIGN}' + u'F':\r\n printmes('Temp in F, converting {:} to C...'.format(os.path.basename(infile)))\r\n f[temp] = (f[temp] - 32.0) * 5.0 / 9.0\r\n return f\r\n\r\n elif 'Date' in txt[1]:\r\n print('{:} is Global'.format(infile))\r\n f = pd.read_csv(infile, skiprows=1, parse_dates=[[0, 1]])\r\n # f = f.reset_index()\r\n f['DateTime'] = pd.to_datetime(f['Date_ Time'], errors='coerce')\r\n f = f[f.DateTime.notnull()]\r\n if ' Feet' in list(f.columns.values):\r\n f['Level'] = f[' Feet']\r\n f.drop([' Feet'], inplace=True, axis=1)\r\n elif 'Feet' in list(f.columns.values):\r\n f['Level'] = f['Feet']\r\n f.drop(['Feet'], inplace=True, axis=1)\r\n else:\r\n f['Level'] = f.iloc[:, 1]\r\n # Remove first and/or last measurements if the transducer was out of the water\r\n # f = dataendclean(f, 'Level')\r\n flist = f.columns.tolist()\r\n if ' Temp C' in flist:\r\n f['Temperature'] = f[' Temp C']\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp C', 'Temperature'], inplace=True, axis=1)\r\n elif ' Temp F' in flist:\r\n f['Temperature'] = (f[' Temp F'] - 32) * 5 / 9\r\n f['Temp'] = f['Temperature']\r\n f.drop([' Temp F', 'Temperature'], inplace=True, axis=1)\r\n else:\r\n f['Temp'] = np.nan\r\n f.set_index(['DateTime'], inplace=True)\r\n f['date'] = f.index.to_julian_date().values\r\n f['datediff'] = f['date'].diff()\r\n f = f[f['datediff'] > 0]\r\n f = f[f['datediff'] < 1]\r\n # bse = int(pd.to_datetime(f.index).minute[0])\r\n # f = hourly_resample(f, bse)\r\n f.rename(columns={' Volts': 'Volts'}, inplace=True)\r\n f.drop([u'date', u'datediff', u'Date_ Time'], inplace=True, axis=1)\r\n return f\r\n else:\r\n print('{:} is unrecognized'.format(infile))", "def entry_parser():\n # from tools import file_importer, file_outporter\n from copy import copy\n from collections import defaultdict\n import os.path\n \n print(\"this is entry parser\")\n \n # inPathL = [\"bob/processed/proteinGroups - OST-1-09042017.txt\",\"bob/processed/proteinGroups_OST2.txt\",\"bob/processed/proteinGroups_OST3.txt\"]\n inpathL = []\n inpF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"txt_cav1ko-1-17082017\", \"proteinGroups.txt\"),\"r\")\n # outPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n fileCount = 1\n # outF = file_outporter(outPath)\n outF = open(os.path.join(os.path.split(os.path.dirname(__file__))[0], \"data\", \"cav1ko\", \"processed\", \"cav1ko-1.csv\"),\"w\")\n # newFlag = True\n \n finDict = defaultdict(list)\n cN = 0\n # for relPath in inPathL:\n outDict = {}\n # inpF = file_importer(relPath)\n headerFlag = True\n \n for inpLine in inpF:\n cN += 1\n if headerFlag:\n headerFlag = False\n headerLine = inpLine\n continue\n inpLine = inpLine.strip(\"\\n\\r\")\n inpItem = inpLine.split(\"\\t\")\n geneL = inpItem[0].split(\";\")\n lenS = len(geneL[0])\n curGene = geneL[0]\n for geneI in geneL: # find gene name with the shortest length\n if len(geneI) < lenS:\n lenS = len(geneI)\n curGene = geneI\n if \"__\" in curGene: continue # get rid of contaminant lines\n try: # get rid of wonky lines introduced by excel\n int(curGene)\n continue\n except ValueError: \n pass\n\n if curGene[-2] == \"-\":\n curGene = curGene[:-2]\n if curGene[-3] == \"-\":\n curGene = curGene[:-3]\n \n # remove ambiguities based on gene name from the entire entry:\n \n corrPos = geneL.index(curGene)\n corrLine = []\n targetCount = 46 # after the 45th item row in the list, peptide IDs and modification start to appear which are allowed to have multiple entries and do not need to be disambiguated\n currCount = 1\n pepFlag = True\n for inpE in inpItem:\n currCount += 1\n if currCount == targetCount:\n pepFlag = False\n # print inpE\n if \";\" in inpE and pepFlag:\n try:\n corrLine.append(inpE.split(\";\")[corrPos])\n except IndexError:\n corrLine.append(inpE.split(\";\")[0])\n else:\n corrLine.append(inpE.rstrip(\"\\n\"))\n\n \n if inpItem[6] == \"\":\n # print \"no protein name found. adding the uniprot ID.\"\n inpItem[6] = curGene\n \n \"\"\"\n try:\n for inpN in inpItem[4:10]:\n inpItem[inpItem.index(inpN)] = int(inpN)\n countFlag = True\n except ValueError:\n print inpItem[4:10]\n countFlag = False\n if countFlag:\n if sum(inpItem[4:10]) == 0: continue # there are some unexpressed proteins in there\n \n \"\"\"\n # print len(corrLine)\n if curGene in outDict: # handle duplicate protein entries and merge them together\n # print \"%s is duplicate\" % curGene\n if curGene == \"Protein IDs\": \n \"\"\"\n quickCount2 = 0\n for quickDictI in outDict[curGene]:\n print str(quickCount2) + \" \" + quickDictI\n quickCount2 += 1\n quickList = inpItem\n quickCount3 = 0\n for quickImp in quickList:\n print str(quickCount3) + \" \" + quickImp\n quickCount3 += 1 \n # print inpItem\n # print outDict[curGene]\n \"\"\"\n continue\n combList = []\n \n \"\"\"\n addL = []\n for i in outDict[curGene][3:]:\n addL.append(i)\n addL2 = []\n for j in corrLine[3:]:\n addL2.append(i)\n outL[3:] = map(add, addL, addL2) # admittedly this looks terrible\n \"\"\"\n \n indexN = 0\n for cItem in corrLine:\n # print indexN\n # print \"---\"\n # print len(corrLine)\n if indexN < 18 or 30 <= indexN <= 43:\n try:\n currC = int(cItem)\n currC = currC + int(outDict[curGene][indexN]) # numbers like peptide counts or LFQ values are added up during merge\n except ValueError:\n currC = cItem\n \n elif 18 <= indexN <= 25 or 28 <= indexN <= 29: # sequence coverage and scores\n currC = max([float(cItem),float(outDict[curGene][indexN])])\n \n elif 26 <= indexN <= 27 or indexN == 44:\n \"\"\"\n quickCount = 0\n for corrItem in corrLine:\n print str(quickCount) + \" \" + corrItem\n quickCount += 1\n \n import time\n \n print relPath\n print corrLine\n print outDict[curGene]\n print \"++++++++++++++++++++++++\"\n print indexN\n time.sleep(0.5)\"\"\"\n currC = cItem\n\n \n else:\n corrL = cItem.split(\";\")\n # print indexN\n # print corrLine\n # print outDict[curGene][indexN]\n dictL = outDict[curGene][indexN].split(\";\")\n mergeL = copy(dictL)\n for corrI in corrL:\n if corrI not in dictL:\n mergeL.append(corrI)\n \n currC = \";\".join(mergeL)\n\n combList.append(currC)\n\n \n indexN +=1\n \n \n combList[-1] = \"merged\" \n outDict[curGene] = combList \n # print \"merged:\"\n # print combList\n else:\n corrLine.append(\"unique\")\n outDict[curGene] = corrLine\n\n \n print(fileCount)\n \n\n # if not newFlag: print fileCount, testKey, finDict[testKey] \n # if newFlag:\n # newFlag = False\n \n for outKey,outValue in list(outDict.items()): \n if outKey in finDict: # add modified dicts together into single, unified dict\n # print fileCount, finDict[outKey]\n # print outValue\n outIndex = 0\n for outItem in outValue:\n finDict[outKey][outIndex].append(outItem)\n outIndex += 1\n # print finDict[outKey]\n\n else: # or just add new entries\n if fileCount == 1:\n for outItem in outValue:\n finDict[outKey].append([outItem])\n \n else: # fill up entries that were not present in the previous cycle\n loopCount = 0\n while loopCount < fileCount - 1:\n for i in range(len(outValue)):\n if len(finDict[outKey]) == i:\n finDict[outKey].append([])\n else:\n finDict[outKey][i].append(\"\")\n loopCount += 1\n outIndex = 0\n for outItem in outValue:\n # print finDict[outKey]\n finDict[outKey][outIndex].append(outItem) \n outIndex += 1\n\n for testKey in finDict: # fill up entries in result dict which were not present in previous file\n if len(finDict[testKey][0]) < fileCount:\n for i in range(len(finDict[testKey])):\n finDict[testKey][i].append(\"\")\n\n if len(inpathL) > 1: fileCount += 1 # this is needed if multiple files are parsed\n for finK, finV in list(finDict.items()):\n for finI in finV[-1]:\n if finI != \"unique\" and finI != \"\":\n print(finK, finV)\n\n \n \n outN = 0 \n # prepare header for file:\n headList = headerLine.strip(\"\\n\\r\").split(\"\\t\")\n if fileCount > 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\")\n headerCount = 1\n while headerCount < fileCount:\n outF.write(headerI + \"-\" + str(headerCount) + \"|\")\n headerCount += 1 \n outF.write(headerI + \"-\" + str(headerCount) + \"\\t\")\n \n headerCount = 1\n while headerCount < fileCount:\n outF.write(headList[-1] + \"-\" + str(headerCount) + \"|\")\n headerCount += 1\n \n outF.write(headList[-1] + \"-\" + str(headerCount) + \"\\n\")\n\n elif fileCount == 1:\n for headerItem in headList[:-1]:\n headerI = headerItem.replace(\",\",\".\") \n outF.write(headerI + \"\\t\")\n outF.write(headList[-1].replace(\",\",\".\") + \"\\n\")\n \n else:\n print(\"number of input files should be at least one. Got less somehow\")\n raise ValueError\n \n \n for outDK, outDV in list(finDict.items()): # write out assembled results to a file\n outN += 1\n if len(outDK) > 30: print(\"this line should not be displayed\")\n # print outDV[1]\n # if outN == 100: break\n nameCount = 0\n for outI in outDV:\n # if nameCount == 0: print outI\n for outPiece in outI[:-1]:\n outU = outPiece.replace(\",\",\".\")\n if outU == \"\": outF.write(\"_|\")\n else: outF.write(str(outU) + \"|\")\n if outI[-1] == \"\": # handle missing entries\n if nameCount == 6: outF.write(outDV[0][0] + \"\\t\") # replace missing gene names with their uniprot ID\n else: outF.write(\"_\\t\")\n else: outF.write(str(outI[-1]).replace(\",\",\".\") + \"\\t\")\n nameCount += 1\n outF.write(\"\\n\")\n \n\n print(\"unique proteins: \", outN)\n print(\"lines parsed: \", cN)\n # print headerLine\n inpF.close()\n outF.close()", "def __init__(self, books_filename, authors_filename, books_authors_link_filename): \n with open(books_filename, newline='') as booksFile:\n books_reader = csv.reader(booksFile)\n try:\n bookArray = []\n newBookArray = []\n idCounter = 0\n for row in books_reader:\n newBookArray = [idCounter, row[0], int(row[1])]\n bookArray.append(newBookArray)\n idCounter = idCounter + 1\n self.simpleBookArray = bookArray\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(books_filename, books_reader.line_num, e))\n \n \"\"\"\n This function takes in the author_reader and puts the csv file into an array of arrays, in which\n each \"author\" is defined as an inner array, and each inner array is organized like so:\n [id, last name, first name, birth year, death year]\n \"\"\" \n with open(authors_filename, newline='') as authorsFile:\n authors_reader = csv.reader(authorsFile)\n try:\n authorArray = []\n newAuthorArray = []\n for row in authors_reader:\n if row[4] == \"NULL\":\n endDate = None\n else:\n endDate = row[4]\n if endDate == None:\n newAuthorArray = [int(row[0]), row[1], row[2], int(row[3]), endDate]\n else:\n newAuthorArray = [int(row[0]), row[1], row[2], int(row[3]), int(row[4])]\n authorArray.append(newAuthorArray)\n self.simpleAuthorArray = authorArray\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(authors_filename, authors_reader.line_num, e))\n \n \"\"\"\n This function takes in the link_reader and puts the csv file into an array of arrays, in which each\n \"book\" is defined as an inner array, and each inner array is organized as the following:\n [book id, author id]\n \"\"\"\n with open(books_authors_link_filename, newline='') as linkFile:\n link_reader = csv.reader(linkFile)\n try:\n linkArray = []\n newLinkArray = []\n for row in link_reader:\n newLinkArray = [int(row[0]), int(row[1])]\n linkArray.append(newLinkArray)\n self.simpleLinkArray = linkArray\n except csv.Error as e:\n sys.exit('file {}, line {}: {}'.format(books_authors_link_filename, link_reader.line_num, e))" ]
[ "0.6210108", "0.60456717", "0.60032433", "0.5947413", "0.5876277", "0.58508295", "0.58461136", "0.5834677", "0.57999545", "0.57364744", "0.5730089", "0.57000756", "0.5691697", "0.5639754", "0.5626317", "0.56223726", "0.55977935", "0.55886185", "0.5551414", "0.55287653", "0.5524884", "0.55187", "0.5513985", "0.55032635", "0.5498475", "0.54731965", "0.5472796", "0.5467737", "0.54560685", "0.54558706", "0.54397094", "0.54364574", "0.5428276", "0.5419001", "0.54149234", "0.5402264", "0.5395654", "0.53759587", "0.5375182", "0.5374135", "0.5362319", "0.5356736", "0.53558624", "0.53501594", "0.53432506", "0.53431654", "0.5340875", "0.5340185", "0.53375405", "0.5336292", "0.53341115", "0.5332425", "0.5331135", "0.5330486", "0.5329414", "0.53203285", "0.5317878", "0.5316806", "0.5308948", "0.52977467", "0.52965575", "0.52812296", "0.52785116", "0.5275987", "0.5267314", "0.52643824", "0.526198", "0.5261966", "0.5257718", "0.52569747", "0.5249602", "0.5249341", "0.52491367", "0.52392966", "0.52329713", "0.5223758", "0.52166265", "0.5210268", "0.5209754", "0.5204013", "0.52020174", "0.5194606", "0.51919633", "0.5187404", "0.5180995", "0.5169925", "0.51693857", "0.51492596", "0.51448596", "0.5143506", "0.5143429", "0.5134071", "0.5131802", "0.5127097", "0.51270884", "0.5125235", "0.51226175", "0.5122234", "0.51161546", "0.51088417" ]
0.6953013
0
Run a raw GraphQL query
def query(output, query): gqlapi = gql.get_api() print_output(output, gqlapi.query(query))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_graphql(raw_query, endpoint):\n query = \" \".join(shlex.split(raw_query, posix=False))\n r = requests.get(endpoint, params={\"query\": query})\n if r.status_code == 200:\n return r.json()\n elif r.status_code == 400:\n response = r.json()\n assert \"errors\" in response\n raise GraphQLError(\"\".join([e[\"message\"] for e in response[\"errors\"]]))\n else:\n raise requests.exceptions.RequestException(\n f\"HTTP Status: {r.status_code}, Response Body: {r.text}\"\n )", "def run_graphql(query: str, token: str):\n response = requests.post(\n 'https://api.github.com/graphql',\n json={'query': query},\n headers={'Authorization': 'Bearer ' + token})\n response.raise_for_status()\n return response.json()", "def run_graphql_query(\n self,\n query,\n headers,\n status_code=200):\n request = requests.post(PH_API_URL, data=json.dumps(query), headers=headers)\n if request.status_code == status_code:\n return request.json()\n else:\n raise Exception(\n \"Unexpected status code returned: {}\".format(\n request.status_code)\n )", "def raw_query(self, query: str):\n\n async def func():\n result = await LemkPgUtils.get_query_result(self.dsn, query)\n return result\n\n return self._run_async(func())", "async def raw_query(self, query: str):\n result = await LemkPgUtils.get_query_result(self.dsn, query)\n return result", "def raw_query(\n # TODO: Passing the whole clickhouse query here is needed as long\n # as the execute method depends on it. Otherwise we can make this\n # file rely either entirely on clickhouse query or entirely on\n # the formatter.\n clickhouse_query: Union[Query, CompositeQuery[Table]],\n request_settings: RequestSettings,\n formatted_query: FormattedQuery,\n reader: Reader,\n timer: Timer,\n query_metadata: SnubaQueryMetadata,\n stats: MutableMapping[str, Any],\n trace_id: Optional[str] = None,\n robust: bool = False,\n) -> QueryResult:\n all_confs = state.get_all_configs()\n query_settings: MutableMapping[str, Any] = {\n k.split(\"/\", 1)[1]: v\n for k, v in all_confs.items()\n if k.startswith(\"query_settings/\")\n }\n\n timer.mark(\"get_configs\")\n\n sql = formatted_query.get_sql()\n\n update_with_status = partial(\n update_query_metadata_and_stats,\n clickhouse_query,\n sql,\n timer,\n stats,\n query_metadata,\n query_settings,\n trace_id,\n )\n\n execute_query_strategy = (\n execute_query_with_readthrough_caching\n if state.get_config(\"use_readthrough_query_cache\", 1)\n else execute_query_with_caching\n )\n\n try:\n result = execute_query_strategy(\n clickhouse_query,\n request_settings,\n formatted_query,\n reader,\n timer,\n stats,\n query_settings,\n robust=robust,\n )\n except Exception as cause:\n if isinstance(cause, RateLimitExceeded):\n stats = update_with_status(QueryStatus.RATE_LIMITED)\n else:\n with configure_scope() as scope:\n if isinstance(cause, ClickhouseError):\n scope.fingerprint = [\"{{default}}\", str(cause.code)]\n if scope.span:\n if cause.code == errors.ErrorCodes.TOO_SLOW:\n sentry_sdk.set_tag(\"timeout\", \"predicted\")\n elif cause.code == errors.ErrorCodes.TIMEOUT_EXCEEDED:\n sentry_sdk.set_tag(\"timeout\", \"query_timeout\")\n elif cause.code in (\n errors.ErrorCodes.SOCKET_TIMEOUT,\n errors.ErrorCodes.NETWORK_ERROR,\n ):\n sentry_sdk.set_tag(\"timeout\", \"network\")\n elif isinstance(cause, (TimeoutError, ExecutionTimeoutError)):\n if scope.span:\n sentry_sdk.set_tag(\"timeout\", \"cache_timeout\")\n\n logger.exception(\"Error running query: %s\\n%s\", sql, cause)\n stats = update_with_status(QueryStatus.ERROR)\n raise QueryException(\n {\n \"stats\": stats,\n \"sql\": sql,\n \"experiments\": clickhouse_query.get_experiments(),\n }\n ) from cause\n else:\n stats = update_with_status(QueryStatus.SUCCESS)\n return QueryResult(\n result,\n {\n \"stats\": stats,\n \"sql\": sql,\n \"experiments\": clickhouse_query.get_experiments(),\n },\n )", "def query(self, query: str, variables: Optional[Any] = None) -> Dict:\n resp = self.post(\"graphql\", json={\"query\": query, \"variables\": variables})\n if \"errors\" in resp:\n raise Exception(resp[\"errors\"][0][\"message\"])\n return resp[\"data\"]", "def query(self, query, authorization_required=True):\n url = 'https://{}/api/v1/graphql'.format(self.host)\n headers = {\n 'Content-Type': 'application/json',\n }\n json = {\n 'query': query,\n }\n # Login if not yet done\n if authorization_required:\n if not self.authorized:\n self.login()\n if self.token:\n headers['Authorization'] = 'Bearer {}'.format(self.token)\n\n request = self.session.post(\n url, headers=headers, json=json,\n verify=self.verify)\n return request", "def _run_query(self):", "def run_query(self, query: str) -> BoltStatementResult:\n with self.neo4j_driver.driver.session() as session:\n return session.run(query)", "async def run_query(query):\n async with httpx.AsyncClient(timeout=None) as client:\n response = await client.post(\n BLAZEGRAPH_URL,\n headers=BLAZEGRAPH_HEADERS,\n data=query,\n )\n assert response.status_code < 300\n return response.json()['results']['bindings']", "def raw(self, query: Any, data: Any = None):\n raise NotImplementedError", "def soql_query(self, query):\n self.builtin.log(\"Running SOQL Query: {}\".format(query))\n return self.cumulusci.sf.query_all(query)", "def run_query(self):\n return _run_query(self.query)", "def query():\n query = request.json.get('query')\n variables = request.json.get('variables') # Todo: add handling variables\n logger.debug('Query: %s', request.json)\n result = schema.execute(query)\n result_hash = format_result(result)\n return result_hash", "def execute_gql_query(\n self, gql_name: str, **kwargs\n ) -> Union[dict, list, int]:\n self._query_type = \"query\"\n data = self._exec(gql_name, kwargs)\n return data", "def query(self, query):", "def run_sql(self, sql):\n def mk_run_sql_q(sql):\n return {\n 'type' : 'run_sql',\n 'args': {\n 'sql' : sql\n }\n }\n return self.v1q(mk_run_sql_q(sql))", "def make_query(self):", "def get_query():\n query = \"\"\"{\n repository(name: \"flux\", owner: \"fluxcd\") {\n forkCount\n issues {\n totalCount\n }\n pullRequests {\n totalCount\n }\n releases {\n totalCount\n }\n stargazers {\n totalCount\n }\n watchers {\n totalCount\n }\n }\n}\n \"\"\"\n return query", "def raw(self, query: Any, data: Any = None):\n if data is None:\n data = {}\n assert isinstance(query, str)\n assert isinstance(data, (dict, None))\n\n return self.get_connection().execute(query, data)", "def execute_graphql_request(\n schema, # type: GraphQLSchema\n params, # type: RequestParams\n allow_only_query=False, # type: bool\n backend=None, # type: GraphQLBackend\n **kwargs # type: Any\n):\n if not params.query:\n raise HttpQueryError(400, \"Must provide query string.\")\n\n try:\n if not backend:\n backend = get_default_backend()\n document = backend.document_from_string(schema, params.query)\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if allow_only_query:\n operation_type = document.get_operation_type(params.operation_name)\n if operation_type and operation_type != \"query\":\n raise HttpQueryError(\n 405,\n \"Can only perform a {} operation from a POST request.\".format(\n operation_type\n ),\n headers={\"Allow\": \"POST\"},\n )\n\n try:\n return document.execute(\n operation_name=params.operation_name, variables=params.variables, **kwargs\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)", "def _raw(self, query: Any, data: Any = None):\n raise NotImplementedError", "def sql(self, q):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'sql')\r\n\r\n return http.Request('POST', url, params), parsers.parse_json", "async def _run_query(query, conn = None):\n # run() it if caller didn't do that already\n if not inspect.isawaitable(query):\n if not isinstance(query, r.RqlQuery):\n raise TypeError(\"query is neither awaitable nor a RqlQuery\")\n cn = conn or await db_conn\n query = query.run(cn)\n\n return await query", "def test_raw_query(self):\n id = get_rand_string()\n prefix = get_rand_string()\n\n # Same data and user_id\n user_id = data = prefix + \"-\" + get_rand_string()\n\n self.conn.add(id=id, user_id=user_id, data=data)\n self.conn.commit()\n\n # Issue a prefix query, return data only (which should be equal\n # to user_id).\n response = self.conn.raw_query(q=\"user_id:%s*\" % prefix, fl=\"data\")\n\n # raw_query returns a string\n xml = parseString(response)\n\n doc_elem = xml.getElementsByTagName(\"doc\")\n\n self.assertEquals(len(doc_elem), 1,\n \"raw_query didn't return the document, id:%s, the response is:%s\" %\n (id, repr(response)))\n\n query_data = doc_elem[0].firstChild.firstChild.nodeValue\n\n self.assertEquals(query_data, data,\n (\"raw_query returned wrong value for data field, \"\n \"expected %s, got:%s\" % (data, query_data)))", "def showGqlQuery(query):\n proto = query._proto_query\n kind = query._model_class.kind()\n filters = proto.filters()\n boundfilters = proto._GQL__bound_filters\n orderings = proto.orderings()\n hint = proto.hint()\n limit = proto.limit()\n offset = proto._GQL__offset\n\n select = \"SELECT * FROM %s\" % kind\n where = []\n order = []\n\n for k in sorted(filters):\n for clause in filters[k]:\n name, op = clause\n if name==-1: name = 'ANCESTOR'\n where.append(\"%s %s :%s\" % (name, op.upper(), k))\n\n for k in sorted(boundfilters):\n if isinstance(k, tuple):\n op = ' '.join(k)\n else:\n op = k\n where.append(\"%s %r\" % (op, boundfilters[k]))\n\n for p, o in orderings:\n order.append(\"%s %s\" % (p, 'DESC' if o==datastore.Query.DESCENDING else 'ASC'))\n\n gql = select\n if where:\n gql += ' WHERE '+' AND '.join(where)\n if order:\n gql += ' ORDER BY ' + ', '.join(order)\n if limit != -1:\n if offset != -1:\n gql += ' LIMIT %s,%s' % (offset,limit)\n else:\n gql += ' LIMIT %s' % limit\n elif offset != -1:\n gql += ' OFFSET %s' % offset\n return gql", "def execute_graphql_request(\n self, request, data, query, variables, operation_name, show_graphiql=False\n ):\n if not query:\n if show_graphiql:\n return None\n raise HttpError(HttpResponseBadRequest(\"Must provide query string.\"))\n\n try:\n backend = self.get_backend(request)\n document = backend.document_from_string(self.schema, query)\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n if request.method.lower() == \"get\":\n operation_type = document.get_operation_type(operation_name)\n if operation_type and operation_type != \"query\":\n if show_graphiql:\n return None\n\n raise HttpError(\n HttpResponseNotAllowed(\n [\"POST\"],\n \"Can only perform a {} operation from a POST request.\".format(\n operation_type\n ),\n )\n )\n\n # Check request weight\n try:\n if self.list_limit or self.weight_limit or self.depth_limit:\n if document:\n fragments = get_fragments(document.document_ast.definitions)\n definitions_total_weight = 0\n for definition in document.document_ast.definitions:\n if not isinstance(definition, OperationDefinition):\n continue\n\n if operation_name and definition.name != operation_name:\n continue\n\n def_weight = self.calculate_action_weight(\n definition.selection_set,\n fragments)\n definitions_total_weight += def_weight\n if self.weight_limit and definitions_total_weight > self.weight_limit:\n raise QueryWeightExceeded(\"Your query exceeds the maximum query weight allowed\")\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)\n\n try:\n extra_options = {}\n if self.executor:\n # We only include it optionally since\n # executor is not a valid argument in all backends\n extra_options[\"executor\"] = self.executor\n\n return document.execute(\n root_value=self.get_root_value(request),\n variable_values=variables,\n operation_name=operation_name,\n context_value=self.get_context(request),\n middleware=self.get_middleware(request),\n **extra_options\n )\n except Exception as e:\n return ExecutionResult(errors=[e], invalid=True)", "def _run_query(self, query):\n cursor = self.conn.cursor()\n cursor.execute(query)\n return cursor.fetchall()", "def raw_as_qs(self, raw_query, params=()):\n cursor = connection.cursor()\n try:\n cursor.execute(raw_query, params)\n return self.filter(id__in=(x[0] for x in cursor))\n finally:\n cursor.close()", "def execute_query(self, *, scope: Scope, params: QueryParams) -> QueryResults:", "def sql_query(self, query_text, edges = False):\n q = QueryWrapper(self.graph, QueryString(query_text, 'sql'),\n edges = edges, debug = self._debug)\n return q", "def query(self, query, request_type=None):\n\n #encode to UTF-8\n try: query = query.encode(\"utf-8\")\n except: query = query.decode('raw_unicode_escape').encode(\"utf-8\")\n\n lowercase_query = query.lower()\n if lowercase_query.startswith(\"select\") or \\\n lowercase_query.startswith(\"describe\") or \\\n lowercase_query.startswith(\"show\") or \\\n request_type==\"GET\":\n\n return self._get(urllib.urlencode({'sql': query}))\n\n else:\n return self._post(urllib.urlencode({'sql': query}))", "def run_query(query, data_only=True):\n cmd = ['wp', 'db', 'query', '--skip-plugins', '--skip-themes', query]\n if data_only:\n cmd.append('--skip-column-names')\n return subprocess.check_output(cmd).decode('utf-8').strip()", "def execute_query(self):\n try:\n # get query and templates\n query = self.request.data.get(\"query\", None)\n templates = self.request.data.get(\"templates\", \"[]\")\n registries = self.get_registries()\n order_by_field = self.request.data.get(\"order_by_field\", \"\")\n\n if order_by_field:\n order_by_field = order_by_field.split(\",\")\n\n if query is None:\n content = {\"message\": \"Query should be passed in parameter.\"}\n return Response(content, status=status.HTTP_400_BAD_REQUEST)\n\n # prepare query\n raw_query = self.build_query(query, templates, registries)\n # execute query\n data_list = self.execute_json_query(raw_query, order_by_field)\n # build and return response\n return self.build_response(data_list)\n\n except Exception as api_exception:\n content = {\"message\": str(api_exception)}\n return Response(\n content, status=status.HTTP_500_INTERNAL_SERVER_ERROR\n )", "def query(self, *, sparql: str) -> Result:\n pass", "def _raw(self, query: Any, data: Any = None):\n assert isinstance(query, str)\n\n conn = self._get_session()\n try:\n results = conn.execute(query)\n\n entity_items = []\n for item in results:\n entity = self.model_cls.to_entity(item)\n entity.state_.mark_retrieved()\n entity_items.append(entity)\n\n result = ResultSet(\n offset=0,\n limit=len(entity_items),\n total=len(entity_items),\n items=entity_items,\n )\n except DatabaseError as exc:\n logger.error(f\"Error while running raw query: {exc}\")\n raise\n finally:\n if not current_uow:\n conn.commit()\n conn.close()\n\n return result", "async def _perform_query(self, query, login_token):\n data = {\"username\": self.user, \"query\": query.lower(), \"token\": login_token}\n return await self._perform_request(\"query\", data, lambda r: r.text())", "def run_query(db, query):\n log.debug(\"run query on %s: %s\", db, query)\n conn = _connect(show_dbs(db)[db][\"uri\"])\n return conn.cursor().execute(query).fetchall()", "def execute_sparql(client: NeptuneClient, query: str) -> pd.DataFrame:\n data = client.read_sparql(query)\n df = None\n if \"results\" in data and \"bindings\" in data[\"results\"]:\n df = pd.DataFrame(data[\"results\"][\"bindings\"])\n df.applymap(lambda x: x[\"value\"])\n else:\n df = pd.DataFrame(data)\n\n return df", "def make_query(graph, ns, request_schema, response_schema):\n @graph.route(\"/v1/foo/get\", Operation.Query, ns)\n @qs(request_schema)\n @response(response_schema)\n def foo_query():\n \"\"\"\n My doc string\n \"\"\"\n request_data = load_query_string_data(request_schema)\n response_data = dict(\n result=True,\n value=request_data[\"required_value\"],\n )\n return dump_response_data(response_schema, response_data, Operation.Query.value.default_code)", "def query(self):\n pass", "def query(env):\n if app.config['ENABLE_QUERY']:\n envs = environments()\n check_env(env, envs)\n\n form = QueryForm(meta={\n 'csrf_secret': app.config['SECRET_KEY'],\n 'csrf_context': session})\n if form.validate_on_submit():\n if form.endpoints.data == 'pql':\n query = form.query.data\n elif form.query.data[0] == '[':\n query = form.query.data\n else:\n query = '[{0}]'.format(form.query.data)\n result = get_or_abort(\n puppetdb._query,\n form.endpoints.data,\n query=query)\n return render_template('query.html',\n form=form,\n result=result,\n envs=envs,\n current_env=env)\n return render_template('query.html',\n form=form,\n envs=envs,\n current_env=env)\n else:\n log.warn('Access to query interface disabled by administrator..')\n abort(403)", "def generate_query(self):\n return", "def run_query(\n self, project_id, partition_id, read_options=None, query=None, gql_query=None\n ):\n request_pb = _datastore_pb2.RunQueryRequest(\n project_id=project_id,\n partition_id=partition_id,\n read_options=read_options,\n query=query,\n gql_query=gql_query,\n )\n return _rpc(\n self.client._http,\n project_id,\n \"runQuery\",\n self.client._base_url,\n self.client._client_info,\n request_pb,\n _datastore_pb2.RunQueryResponse,\n )", "def _run_query(self, inputs, state):\n in_shape = (self._query_batch,) + self._layer.input_shape\n queries = self._projection('Query', inputs, in_shape)\n return self._layer.apply(queries, list(state))", "def query(self):", "def query(self, **kwargs):", "def execute_query(self, *args, **kwargs):", "async def execute(entity, query: Union[ClauseElement, str], values: Union[List, Dict] = None) -> Any:\n return await uvicore.db.execute(query=query, values=values, connection=entity.__connection__)", "def pp_query(query):\n print(format_query(query))", "def run_http_query(\n schema, # type: GraphQLSchema\n request_method, # type: str\n data, # type: Union[Dict, List[Dict]]\n query_data=None, # type: Optional[Dict]\n batch_enabled=False, # type: bool\n catch=False, # type: bool\n **execute_options # type: Any\n):\n if not isinstance(schema, GraphQLSchema):\n raise TypeError(\"Expected a GraphQL schema, but received {!r}.\".format(schema))\n if request_method not in (\"get\", \"post\"):\n raise HttpQueryError(\n 405,\n \"GraphQL only supports GET and POST requests.\",\n headers={\"Allow\": \"GET, POST\"},\n )\n if catch:\n catch_exc = (\n HttpQueryError\n ) # type: Union[Type[HttpQueryError], Type[_NoException]]\n else:\n catch_exc = _NoException\n is_batch = isinstance(data, list)\n\n is_get_request = request_method == \"get\"\n allow_only_query = is_get_request\n\n if not is_batch:\n if not isinstance(data, (dict, MutableMapping)):\n raise HttpQueryError(\n 400, \"GraphQL params should be a dict. Received {!r}.\".format(data)\n )\n data = [data]\n elif not batch_enabled:\n raise HttpQueryError(400, \"Batch GraphQL requests are not enabled.\")\n\n if not data:\n raise HttpQueryError(400, \"Received an empty list in the batch request.\")\n\n extra_data = {} # type: Dict[str, Any]\n # If is a batch request, we don't consume the data from the query\n if not is_batch:\n extra_data = query_data or {}\n\n all_params = [get_graphql_params(entry, extra_data) for entry in data]\n\n executor = execute_options.get(\"executor\")\n response_executor = executor if executor else SyncExecutor()\n\n response_promises = [\n response_executor.execute(\n get_response, schema, params, catch_exc, allow_only_query, **execute_options\n )\n for params in all_params\n ]\n response_executor.wait_until_finished()\n\n results = [\n result.get() if is_thenable(result) else result for result in response_promises\n ]\n\n return ServerResults(results, all_params)", "def _make_query(self):\r\n raise NotImplementedError()", "def _send_gql_request(gateway_port):\n mutation = (\n f'mutation {{'\n + '''docs(data: {text: \"abcd\"}) { \n id \n } \n }\n '''\n )\n c = Client(host='localhost', port=gateway_port, protocol='http')\n return c.mutate(mutation=mutation)", "def execute_query(query, params={}, transaction=True, context=\"\"):\r\n if transaction:\r\n query = \"g.stopTransaction(FAILURE)\\n\" + query\r\n\r\n # If we have no hosts available raise an exception\r\n if len(_hosts) <= 0:\r\n raise ThunderdomeConnectionError('Attempt to execute query before calling thunderdome.connection.setup')\r\n \r\n host = _hosts[0]\r\n #url = 'http://{}/graphs/{}/tp/gremlin'.format(host.name, _graph_name)\r\n data = json.dumps({'script':query, 'params': params})\r\n headers = {'Content-Type':'application/json', 'Accept':'application/json', 'Accept-Charset':'utf-8'}\r\n import time\r\n try:\r\n start_time = time.time()\r\n conn = httplib.HTTPConnection(host.name, host.port)\r\n conn.request(\"POST\", '/graphs/{}/tp/gremlin'.format(_graph_name), data, headers)\r\n response = conn.getresponse()\r\n content = response.read()\r\n\r\n total_time = int((time.time() - start_time) * 1000)\r\n\r\n if context and _statsd:\r\n _statsd.timing(\"{}.timer\".format(context), total_time)\r\n _statsd.incr(\"{}.counter\".format(context))\r\n\r\n\r\n except socket.error as sock_err:\r\n if _statsd:\r\n total_time = int((time.time() - start_time) * 1000)\r\n _statsd.incr(\"thunderdome.socket_error\".format(context), total_time)\r\n raise ThunderdomeQueryError('Socket error during query - {}'.format(sock_err))\r\n except:\r\n raise\r\n \r\n logger.info(json.dumps(data))\r\n logger.info(content)\r\n\r\n try:\r\n response_data = json.loads(content)\r\n except ValueError as ve:\r\n raise ThunderdomeQueryError('Loading Rexster results failed: \"{}\"'.format(ve))\r\n \r\n if response.status != 200:\r\n if 'message' in response_data and len(response_data['message']) > 0:\r\n graph_missing_re = r\"Graph \\[(.*)\\] could not be found\"\r\n if re.search(graph_missing_re, response_data['message']):\r\n raise ThunderdomeGraphMissingError(response_data['message'])\r\n else:\r\n raise ThunderdomeQueryError(\r\n response_data['message'],\r\n response_data\r\n )\r\n else:\r\n if _statsd:\r\n _statsd.incr(\"{}.error\".format(context))\r\n raise ThunderdomeQueryError(\r\n response_data['error'],\r\n response_data\r\n )\r\n\r\n return response_data['results']", "def get_raw_query(self, row_id):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"\n SELECT raw_query FROM queries WHERE rowid=(?);\n \"\"\", (row_id,))\n return cursor.fetchone()[0]", "def visit_query(self, query):\n return query", "def query(self, query, initNs=None, initBindings=None, queryGraph=None, **kwargs):\n\n# r_queryType = pattern.search(query).group(\"prefixes\").upper()\n# print(r_queryType)\n uri = self.rest_services[\"repository\"]\n infer = kwargs.get('infer',None)\n #timeout = kwargs.get('timeout',\"0\")\n payload = {\"$\"+k: v.n3() for k,v in initBindings.items()}\n\n payload[\"infer\"] = self.infer if infer is None else infer\n payload[\"infer\"] = str(payload[\"infer\"]).lower()\n #payload[\"$\"+timeout]=0\n payload[\"query\"] = query\n r = requests.post(uri, data=payload,\n stream=True,\n headers= {\"Accept\" : \"application/sparql-results+json,application/trix\",\n 'connection': 'keep-alive',\n 'Accept-Encoding': 'gzip,deflate',\n \"Content-Type\" :\"application/x-www-form-urlencoded\"})\n\n r.raw.decode_content = True\n if r.headers['Content-Type'] == 'application/sparql-results+json;charset=UTF-8':\n return self.__make_result(r)\n elif r.headers['Content-Type'] == 'application/trix;charset=UTF-8':\n return self.__make_trix_generator__(r)\n else:\n raise ValueError(\"Response content type not parsable {r}\".format(r=r.text))", "def execute_json_query(self, raw_query, order_by_field):\n return oai_record_api.execute_json_query(\n raw_query, self.request.user, order_by_field\n )", "def executeQuery(payload, newQuery):\r\n\tq = newQuery.format(**payload)\r\n\tdb.query(q)\r\n\tdata = db.fetchall()\r\n\treturn data", "def execute(query):\n print query\n cursor.execute(query)", "def query(self) -> None:\n raise NotImplementedError()", "def run_query(query):\n conn = connection.get_db_connection()\n cursor = conn.cursor()\n cursor.execute(query)\n return cursor", "def query(self, q, *args):\n\n if self.dbtype == 'pg':\n print q\n return self.db.execute(q, *args).fetchall()\n else:\n cur = self.db.cursor()\n try:\n print q\n print args\n if args:\n cur.execute(q, args)\n else:\n cur.execute(q)\n ret = cur.fetchall()\n return ret\n except:\n self.db.rollback()\n raise\n finally:\n cur.close()", "def _traced_graphql(func, _, args, kwargs):\n\n schema = args[0]\n\n # get the query as a string\n if len(args) > 1:\n request_string = args[1]\n else:\n request_string = kwargs.get('request_string')\n\n if isinstance(request_string, Document):\n query = request_string.loc.source.body\n else:\n query = request_string\n\n # allow schemas their own tracer with fall-back to the global\n tracer = getattr(schema, 'datadog_tracer', ddtrace.tracer)\n\n if not tracer.enabled:\n return func(*args, **kwargs)\n\n with tracer.trace(\n RES,\n span_type=TYPE,\n service=SERVICE,\n resource=_resolve_query_res(query)\n ) as span:\n span.set_tag(QUERY, query)\n result = None\n try:\n result = func(*args, **kwargs)\n return result\n finally:\n # `span.error` must be integer\n span.error = int(result is None or result.invalid)\n if result is not None:\n span.set_tag(ERRORS, result.errors)\n span.set_metric(INVALID, int(result.invalid))", "def run_query(self, query, limit = None):\n if limit is not None:\n query += 'LIMIT ' + str(limit)\n query_job = self.bigquery_client.query(query) # API request\n return query_job.result() # Waits for query to finish", "def execute_query():\n start_time = time.time()\n\n queries = request.json[\"queries\"]\n random_command = request.json[\"random_command\"]\n\n \"\"\" Running the queries against the pre-loaded index. \"\"\"\n output_dict = runner.run_queries(queries, random_command)\n\n \"\"\" Dumping the results to a JSON file. \"\"\"\n with open(output_location, 'w') as fp:\n json.dump(output_dict, fp)\n\n response = {\n \"Response\": output_dict,\n \"time_taken\": str(time.time() - start_time),\n \"username_hash\": username_hash\n }\n return flask.jsonify(response)", "def test_psycopg_binary_query_works(instrument, postgres_connection, elasticapm_client):\n cursor = postgres_connection.cursor()\n query = b\"SELECT * FROM test WHERE name LIKE 't%'\"\n\n baked_query = query.decode()\n try:\n elasticapm_client.begin_transaction(\"web.django\")\n cursor.execute(query)\n result = cursor.fetchall()\n elasticapm_client.end_transaction(None, \"test-transaction\")\n finally:\n # make sure we've cleared out the spans for the other tests.\n assert [(2, \"two\"), (3, \"three\")] == result\n transactions = elasticapm_client.events[TRANSACTION]\n spans = elasticapm_client.spans_for_transaction(transactions[0])\n span = spans[0]\n assert span[\"name\"] == \"SELECT FROM test\"\n assert \"db\" in span[\"context\"]\n assert span[\"context\"][\"db\"][\"instance\"] == \"elasticapm_test\"\n assert span[\"context\"][\"db\"][\"type\"] == \"sql\"\n assert span[\"context\"][\"db\"][\"statement\"] == baked_query", "def run_codeql_query(query, database, output, search_path):\n # --search-path is required when the CLI needs to upgrade the database scheme.\n subprocess_run([\"codeql\", \"query\", \"run\", query, \"--database\", database,\n \"--output\", output + \".bqrs\", \"--search-path\", search_path])\n subprocess_run([\"codeql\", \"bqrs\", \"decode\", output + \".bqrs\",\n \"--format=csv\", \"--no-titles\", \"--output\", output])\n os.remove(output + \".bqrs\")", "def graphiql(request):\n del request\n graphiql_filepath = pathlib.Path(__file__).absolute().parent / \"graphiql.html\"\n with open(graphiql_filepath) as f:\n return django.http.response.HttpResponse(f.read())", "def query(self, query):\n cursor = self.database.cursor()\n cursor.execute(query)\n # If it's a query that's expected to return a value (EG: SELECT)\n if query.strip().lower().startswith('select'): return cursor.fetchall()", "def graphql_query(self, end_cursor, user_id) -> tuple[dict, list, tuple[str, bool]]:\n query_params = {\n 'query_hash': '8c2a529969ee035a5063f2fc8602a0fd',\n 'variables': json.dumps({\"id\":user_id,\"first\":8,\"after\":end_cursor})\n }\n\n url_post = 'https://www.instagram.com/graphql/query/'\n\n _, res_post = self.request_safe(url_post, params_request_safe=query_params)\n \n try:\n res_json = res_post.json()\n except json.JSONDecodeError:\n soup = BeautifulSoup(res_post.text, 'html.parser')\n elm = soup.find('div', attrs={'class':'error-container'})\n if not elm is None and \"Error\" in elm.text:\n raise Exception(elm.text)\n if res_post.json().get('message') == 'rate limited':\n raise Exception(res_post.json().get('message'))\n \n has_next_page = res_post.json()['data']['user']['edge_owner_to_timeline_media']['page_info']['has_next_page']\n end_cursor = res_post.json()['data']['user']['edge_owner_to_timeline_media']['page_info']['end_cursor']\n edges = res_post.json()['data']['user']['edge_owner_to_timeline_media']['edges']\n return (res_post.json(), edges, (end_cursor, has_next_page))", "def query(\n self,\n statement, # type: str\n *options, # type: QueryOptions\n **kwargs # type: Any\n ) -> QueryResult:\n\n query = N1QLQuery.create_query_object(statement,\n *options,\n **kwargs)\n return QueryResult(N1QLRequest.generate_n1ql_request(self.connection,\n query.params,\n default_serializer=self.default_serializer))", "def query(self, fieldname, value, *args, **kwargs):\n qg = self.schema.query_generator(fieldname)\n return qg(value, *args, **kwargs).connect(self)", "def run_query(conn, query):\n\tcur = conn.cursor()\n\tcur.execute(query)\n\trows = cur.fetchall()\n\treturn rows", "def construct_query(self):\n reader = QueryReader(filepath=self.filepath, filename=self.filename, raw_sql=self.raw_sql, params=self.params)\n return reader.sql", "def __call__(self, data):\n return data.eval(self.query)", "def _Dynamic_RunQuery(self, query, query_result, request_id=None):\n if query.has_transaction():\n if not query.has_ancestor():\n raise apiproxy_errors.ApplicationError(\n datastore_pb.Error.BAD_REQUEST,\n 'Only ancestor queries are allowed inside transactions.')\n (filters, orders) = datastore_index.Normalize(query.filter_list(),\n query.order_list(), [])\n \n old_datastore_stub_util.FillUsersInQuery(filters)\n\n if not query.has_app():\n query.set_app(self.project_id)\n self.__ValidateAppId(query.app())\n\n self._RemoteSend(query, query_result, \"RunQuery\", request_id)\n results = query_result.result_list()\n for result in results:\n old_datastore_stub_util.PrepareSpecialPropertiesForLoad(result)\n\n last_cursor = None\n if query_result.has_compiled_cursor():\n last_cursor = query_result.compiled_cursor()\n\n if query_result.more_results():\n new_cursor = InternalCursor(query, last_cursor, len(results))\n cursor_id = self.__getCursorID()\n cursor = query_result.mutable_cursor()\n cursor.set_app(self.project_id)\n cursor.set_cursor(cursor_id)\n self.__queries[cursor_id] = new_cursor\n\n if query.compile():\n compiled_query = query_result.mutable_compiled_query()\n compiled_query.set_keys_only(query.keys_only())\n compiled_query.mutable_primaryscan().set_index_name(query.Encode())", "def do_query(self, query):\n query = query.replace('\\n', ' ')\n self.cursor.execute(query)\n return self.cursor.fetchall()", "def pg_execute(pg_conn, sql):\n print sql\n # XXX execute command", "def query(self, q, **kwargs):\n return self._client.query(self._db_name, q, **kwargs)", "async def scalar(self, query, connection=None):\n async with self.connection(connection) as conn:\n r = await conn.execute(query)\n return await r.scalar()", "def submit_query(self, username, password):\n\n #username = os.environ['NEWSREADER_USERNAME']\n #password = os.environ['NEWSREADER_PASSWORD']\n payload = {'id': self.query}\n \n endpoint_url = self.endpoint_stub_url.format(action=self.action)\n print \"\\n\\n**New CRUD query**\"\n print endpoint_url, payload\n t0 = time.time()\n try:\n response = requests.get(endpoint_url, auth=(username, password),\n params=payload)\n except Exception as e:\n print \"Query raised an exception\"\n print type(e)\n t1 = time.time()\n total = t1-t0\n raise QueryException(\"Query raised an exception: {0}\".format(type(e).__name__))\n else:\n t1 = time.time()\n total = t1-t0\n print \"Time to return from query: {0:.2f} seconds\".format(total)\n print \"Response code: {0}\".format(response.status_code)\n print \"From cache: {0}\".format(response.from_cache)\n\n #print response.content\n \n if response and (response.status_code == requests.codes.ok):\n self.json_result = {\"content\":response.content}\n self.clean_json = self.json_result\n else:\n raise QueryException(\"Response code not OK: {0}\".format(response.status_code))", "def query(self):\r\n raise NotImplementedError", "def swis_query_command(client: Client, args: Dict[str, Any]) -> CommandResults:\n query = args.get('query')\n if not query:\n raise ValueError(ERR_MSG['REQUIRED_ARGUMENT'])\n\n response = client.http_request(method=\"GET\", url_suffix=URL_SUFFIX[\"QUERY\"],\n params={\"query\": query})\n outputs = createContext(response.get(\"results\", []), removeNull=True)\n readable_response = convert_query_output_to_hr(outputs)\n return CommandResults(\n outputs_prefix=\"SolarWinds.Query\",\n outputs=outputs,\n readable_output=readable_response,\n raw_response=response\n )", "def get_data_query(file_name):\n with open(file_name, 'r') as graphql_query:\n return graphql_query.read()", "def runningwithqueries(query):\n print(\"\\nRunning Query: \" + str(query) + \"\\nResult :\\n\")\n crsr = cnxn.execute(query)\n columns = [column[0] for column in crsr.description]\n print(columns)\n for row in crsr.fetchall():\n print(row)\n crsr.close()", "def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)", "def _handle_query(self, text, loop=False):\n \n # lazy complete\n text = line_add_lazy_return(text)\n text = line_add_lazy_describe(text)\n\n # RUN QUERY\n if not loop:\n res = self.dslobject.query(text)\n return res\n else:\n res = self.dslobject.query_iterative(text)\n return res", "def inspect_query(query):\n return _parse_query(query)", "def test_custom_query_basic(self):\n\n # Create a simple query statement a\n query = \"SELECT * FROM system.local\"\n statement = SimpleStatement(query)\n # Validate that various types of custom payloads are sent and received okay\n self.validate_various_custom_payloads(statement=statement)", "def n1qlQueryAll(self, *args, **kwargs):\n if not self.connected:\n cb = lambda x: self.n1qlQueryAll(*args, **kwargs)\n return self.connect().addCallback(cb)\n\n kwargs['itercls'] = BatchedN1QLRequest\n o = super(RawBucket, self).n1ql_query(*args, **kwargs)\n o.start()\n return o._getDeferred()", "def execute_all_templated(self, conn, query, *values, system=False):\n query_filled = query.format(*values)\n self.last_query=query_filled if not system else self.last_query\n queries = [item for item in query_filled.split(\";\") if item != \"\"]\n results = []\n if len(queries) > 1 and not system:\n self.health = Health.SICK\n self.message = \"SQL injection detected\"\n for query in queries:\n response = conn.execute(query + \";\")\n results.extend([item for item in response])\n return results", "def execute(self, context):\n logging.info(f\"Running SQL :{self.sql}\")\n self.hook = TrinoHook()\n query = self.hook.run(self.sql, autocommit=self.autocommit, parameters=self.parameters)\n if self.xcom_push:\n return query", "def query(self, block):\n raise NotImplementedError(\"Querying is an experimental feature\")", "def run_query(cur, query, show_results=False):\n num_rows = cur.execute(query)\n print('the query returned {} rows'.format(num_rows))\n if show_results:\n for row in cur.fetchall():\n print(row)", "def execute_query(\n # TODO: Passing the whole clickhouse query here is needed as long\n # as the execute method depends on it. Otherwise we can make this\n # file rely either entirely on clickhouse query or entirely on\n # the formatter.\n clickhouse_query: Union[Query, CompositeQuery[Table]],\n request_settings: RequestSettings,\n formatted_query: FormattedQuery,\n reader: Reader,\n timer: Timer,\n stats: MutableMapping[str, Any],\n query_settings: MutableMapping[str, Any],\n robust: bool,\n) -> Result:\n # Experiment, if we are going to grab more than X columns worth of data,\n # don't use uncompressed_cache in ClickHouse.\n uc_max = state.get_config(\"uncompressed_cache_max_cols\", 5)\n assert isinstance(uc_max, int)\n column_counter = ReferencedColumnsCounter()\n column_counter.visit(clickhouse_query.get_from_clause())\n if column_counter.count_columns() > uc_max:\n query_settings[\"use_uncompressed_cache\"] = 0\n\n # Force query to use the first shard replica, which\n # should have synchronously received any cluster writes\n # before this query is run.\n consistent = request_settings.get_consistent()\n stats[\"consistent\"] = consistent\n if consistent:\n query_settings[\"load_balancing\"] = \"in_order\"\n query_settings[\"max_threads\"] = 1\n\n result = reader.execute(\n formatted_query,\n query_settings,\n with_totals=clickhouse_query.has_totals(),\n robust=robust,\n )\n\n timer.mark(\"execute\")\n stats.update(\n {\"result_rows\": len(result[\"data\"]), \"result_cols\": len(result[\"meta\"])}\n )\n\n return result", "def query(self, qpath):\n return data.Query(self, qpath)", "def dbexecute(cxn, query, payload):\n\tcursor = cxn.cursor()\n\tif not payload:\n\t\tcursor.execute(query)\n\telse:\n\t\tcursor.execute(query, payload)", "def doQuery(self, s):\n self.setQuery(s)\n\n try:\n rval = self.query()\n g = rval.convert()\n return g['results']['bindings']\n except:\n print \"doQuery failed\"\n traceback.print_exc(file=sys.stdout)" ]
[ "0.7544899", "0.68699944", "0.6803751", "0.67968833", "0.661308", "0.65708804", "0.65654963", "0.6560553", "0.65150213", "0.65003735", "0.6493559", "0.64276546", "0.6414967", "0.63659316", "0.63526833", "0.63463426", "0.63447905", "0.6304541", "0.6291603", "0.629144", "0.627261", "0.6256023", "0.6243312", "0.6233386", "0.6124932", "0.6122041", "0.607239", "0.60442835", "0.60100925", "0.60000885", "0.59766424", "0.59434503", "0.5935973", "0.59264266", "0.59224856", "0.590932", "0.59076685", "0.58862346", "0.588159", "0.58773655", "0.5860784", "0.5840499", "0.58327806", "0.58200336", "0.58132464", "0.57995933", "0.57864153", "0.57752365", "0.57666415", "0.5763448", "0.57444775", "0.5740678", "0.5740271", "0.57274157", "0.57169026", "0.5713286", "0.57129943", "0.570822", "0.5694414", "0.56774026", "0.56690943", "0.56477594", "0.56432694", "0.56401515", "0.56327134", "0.56285405", "0.5621374", "0.561963", "0.56021446", "0.5596355", "0.5587982", "0.5584437", "0.55697304", "0.5542437", "0.55382985", "0.5533828", "0.5511129", "0.5508427", "0.550368", "0.549996", "0.54992515", "0.547893", "0.54777217", "0.54621935", "0.54607415", "0.54602844", "0.5449952", "0.5441183", "0.543115", "0.5428106", "0.5421768", "0.54153645", "0.541076", "0.54092634", "0.5405287", "0.5405015", "0.53956246", "0.53953934", "0.53927946", "0.53922576" ]
0.6531391
8
passing in event_loop helps avoid 'attached to a different loop' error
def test_app(event_loop): app.finalize() app.conf.store = "memory://" app.flow_control.resume() return app
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def event_loop(self):\n logging.warning('loop undefined')", "def dispatch_loop(self):\n pass", "def check_event_loop():\n loop = asyncio.get_event_loop()\n if loop.is_closed():\n asyncio.set_event_loop(asyncio.new_event_loop())", "def _handle_loop(self):\n pass", "def event_loop(request):\n loop = asyncio.get_event_loop()\n yield loop", "def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()", "def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)", "def load_event_loop():\n while True:\n try:\n async_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(async_loop)\n return async_loop\n except:\n time.sleep(3)", "def loop(self) -> AbstractEventLoop:", "def loop(self) -> AbstractEventLoop:", "def get_event_loop(*args, **kwargs):\r\n\r\n return get_loop(*args, **kwargs)", "def __init__(self, loop=None):\n object.__setattr__(self, '_loop', loop or get_event_loop())", "def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)", "def _io_event_loop_thread(self):\r\n io_event_loop = asyncio.get_event_loop_policy().new_event_loop()\r\n asyncio.set_event_loop(io_event_loop)\r\n assert isinstance(io_event_loop, AbstractEventLoop)\r\n self._io_event_loop = io_event_loop\r\n self._event_loop_started.release()\r\n self._io_event_loop.run_forever()", "def getLoop():\n return asyncio.get_event_loop_policy().get_event_loop()", "def get_event_loop() -> KivyEventLoop:\n return asyncio.get_event_loop()", "def mainloop(self, *args, **kwargs):\n if in_idle():\n return\n self._top.mainloop(*args, **kwargs)", "def mainloop(self, *args, **kwargs):\n if in_idle():\n return\n self.top.mainloop(*args, **kwargs)", "def set_event_loop(self, loop):\n if _in_trio_context():\n current_loop.set(loop)\n elif _faked_policy.policy is not None:\n _faked_policy.policy.set_event_loop(loop)\n else:\n super().set_event_loop(loop)", "def run_message_loop(self):\n raise NotImplementedError", "def test_no_sideeffects(self):\n c = EventLoop(\n lambda: None,\n lambda f, g: 1 / 0,\n lambda *args: 1 / 0,\n watchdog_thread=object(),\n reapAllProcesses=lambda: 1 / 0)\n del c", "def current_event_loop(self):\n loop = current_loop.get()\n if loop is None:\n loop = super().get_event_loop()\n return loop", "def mainloop(self):\n self.app.mainloop()", "def event_loop():\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()", "def run_main_loop():\n mainloop = GObject.MainLoop()", "def event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def startLoop():\n patchAsyncio()", "def mainloop(self):\n self.master.mainloop()", "def loop(event_loop: AbstractEventLoop) -> AbstractEventLoop:\n yield event_loop\n event_loop.close()", "def setup_test_loop():\n loop = asyncio.get_event_loop()\n # asyncio.set_event_loop(None)\n return loop", "def set_asyncio_event_loop_policy() -> None:\n _get_asyncio_event_loop_policy()", "def loop(self):\n pass", "def mainloop(self):\n self.root.mainloop()", "def mainloop(self):\n self.root.mainloop()", "def event_loop(self):\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.done = True\n elif event.type == pg.KEYDOWN:\n self.keys = pg.key.get_pressed()\n self.toggle_show_fps(event.key)\n elif event.type == pg.KEYUP:\n self.keys = pg.key.get_pressed()\n self.toggle_fullscreen(event.key)\n self._scene.get_event(event)", "def event_loop(self) -> None:\n logging.info(\"Initializing default window opacity...\")\n self._set_all_window_opacity_to_default()\n try:\n logging.info(\"Initializing threads...\")\n for producer in self.producers:\n producer.start()\n for producer in self.producers:\n while not producer.ready:\n pass\n self.ready = True\n logging.info(\"Threads initialized, waiting for events...\")\n while self.keep_going:\n self._flash_queued_window()\n except (KeyboardInterrupt, SystemExit):\n logging.warn(\"Interrupt received, shutting down...\")\n self.shutdown()", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "def pre_loop(self, event):\n self.do_sync()", "def game_loop(self):\n self.interface.game_loop(self)", "def mainloop(self):\n\t\tself.root.after(100, self.tkloop)\n\t\tself.root.mainloop()", "def startLoop(self):\n if(self.loop is not None):\n raise Exception(\"Event loop is already started!\")\n self.loop = asyncio.new_event_loop()\n self.thread = Thread(target=start_thread_loop, args=(self.loop,))\n self.thread.setDaemon(True)\n self.thread.start()", "def event_loop(self):\n for event in pygame.event.get():\n self.scene.get_event(event)", "def blocking_input_loop(figure, event_names, timeout, handler):\n if figure.canvas.manager:\n figure.show() # Ensure that the figure is shown if we are managing it.\n # Connect the events to the on_event function call.\n cids = [figure.canvas.mpl_connect(name, handler) for name in event_names]\n try:\n figure.canvas.start_event_loop(timeout) # Start event loop.\n finally: # Run even on exception like ctrl-c.\n # Disconnect the callbacks.\n for cid in cids:\n figure.canvas.mpl_disconnect(cid)", "def _run(self):\n while(self._loop):\n pass", "def loop_run(self):\n self.log_debug(\"Running loop\")\n import cothread\n self.cothread = cothread\n self._loop_state = LState.Running\n if self.loop_event:\n # Call unbound function with a weak reference to self so that\n # garbage collector will call __del__ when we finish\n event_loop = weak_method(self.event_loop)\n loop_event = weak_method(self.loop_event)\n self.event_loop_proc = cothread.Spawn(event_loop, loop_event)\n else:\n self.event_loop_proc = cothread.Pulse()", "def main():\n PanelDemo().mainloop()", "def takeControl(self):\n mainloop()", "def takeControl(self):\n mainloop()", "def rfactor_event_loop():\n if RfactorLiveEvent.event.is_set():\n is_live = RfactorLiveEvent.get_nowait()\n # -- Update rFactor live state to front end\n if is_live is not None:\n eel.rfactor_live(is_live)\n\n if RfactorStatusEvent.event.is_set():\n status = RfactorStatusEvent.get_nowait()\n # -- Update rFactor status message in front end\n if status is not None:\n logging.debug('Updating rf2 status message: %s', status)\n eel.rfactor_status(status)\n\n RfactorStatusEvent.reset()", "def get_test_event_loop():\n if not _IS_XOS_ASYNC:\n loop = _asynclib.get_event_loop()\n else:\n loop = _get_xos_async_test_event_loop()\n return loop", "def set_main(self, main_loop):\n self.main_loop = main_loop", "def start_call_back_loop(loop: asyncio.AbstractEventLoop) -> None:\n asyncio.set_event_loop(loop)\n loop.run_forever()", "def _start_io_event_loop(self):\r\n self._event_loop_started = threading.Lock()\r\n self._event_loop_started.acquire()\r\n threading.Thread(None, self._io_event_loop_thread).start()\r\n self._event_loop_started.acquire()", "def loop(self):\n raise NotImplementedError()", "def main_loop(self):\n dt = 0\n self.clock.tick(FPS)\n while not self.done:\n self.event_loop()\n self.update(dt)\n self.render()\n dt = self.clock.tick(FPS) / 1000.0", "def _preloop_hook(self) -> None:\n self.selectorloop = SelectorThread(daemon=True)\n self.selectorloop.start()", "def test_eventloop_api(self):\n from twisted.python.log import startLoggingWithObserver\n from crochet import _shutdown\n self.assertIsInstance(_main, EventLoop)\n self.assertEqual(_main.setup, setup_crochet)\n self.assertEqual(_main.no_setup, no_setup)\n self.assertEqual(_main.run_in_reactor, run_in_reactor)\n self.assertEqual(_main.wait_for, wait_for)\n self.assertIdentical(_main._atexit_register, _shutdown.register)\n self.assertIdentical(\n _main._startLoggingWithObserver, startLoggingWithObserver)\n self.assertIdentical(_main._watchdog_thread, _shutdown._watchdog)", "def loop(self):\n return self.caller.location.ndb.event_line_loop", "def _spin_wx(self):\n import wx\n app = wx.GetApp()\n if app is not None and wx.Thread_IsMain():\n evtloop = wx.EventLoop()\n ea = wx.EventLoopActivator(evtloop)\n while evtloop.Pending():\n evtloop.Dispatch()\n app.ProcessIdle()\n del ea", "def set_asyncio_event_loop(event_loop_path: Optional[str]) -> AbstractEventLoop:\n if event_loop_path is not None:\n event_loop_class: Type[AbstractEventLoop] = load_object(event_loop_path)\n event_loop = event_loop_class()\n asyncio.set_event_loop(event_loop)\n else:\n try:\n with catch_warnings():\n # In Python 3.10.9, 3.11.1, 3.12 and 3.13, a DeprecationWarning\n # is emitted about the lack of a current event loop, because in\n # Python 3.14 and later `get_event_loop` will raise a\n # RuntimeError in that event. Because our code is already\n # prepared for that future behavior, we ignore the deprecation\n # warning.\n filterwarnings(\n \"ignore\",\n message=\"There is no current event loop\",\n category=DeprecationWarning,\n )\n event_loop = asyncio.get_event_loop()\n except RuntimeError:\n # `get_event_loop` raises RuntimeError when called with no asyncio\n # event loop yet installed in the following scenarios:\n # - Previsibly on Python 3.14 and later.\n # https://github.com/python/cpython/issues/100160#issuecomment-1345581902\n event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(event_loop)\n return event_loop", "async def _main(self):\n while True:\n time.sleep(1)", "def get_event_loop():\n try:\n return asyncio.get_running_loop()\n except RuntimeError:\n return asyncio.new_event_loop()", "def run(self):\n self.cmdloop()", "def run():\n gui = GUI()\n gui.mainloop()", "def run(self):\n if self.okay:\n ExtLoopWin32.run()", "def reactor_loop():\n def on_running():\n \"\"\"\n called when the twisted reactor is running\n \"\"\"\n log.msg('reactor_loop Starting')\n try:\n conn = client.connect(reactor)\n si446x_do = Si446xComponent(conn)\n conn.addCallback(si446x_do.start)\n conn.addErrback(si446x_do.on_error)\n except error.DBusException, e:\n log.msg('reactor_loop Setup Error: {}'.format(e))\n reactor.stop()\n\n signal.signal(signal.SIGINT, SIGINT_CustomEventHandler)\n signal.signal(signal.SIGHUP, SIGINT_CustomEventHandler)\n reactor.callWhenRunning(on_running)\n reactor.run()", "def run(self):\n\n while not self.done:\n\n self.event_loop()\n\n self.update()", "def event_loop(self, index):\n logger.debug(\"Starting event loop \"+str(index))\n self.stop_flag = Event()\n stop_flag = self.stop_flag # Saving a reference.\n # stop_flag is an object that will signal the current input thread to exit or not exit once it's done processing a callback.\n # It'll be called just before self.stop_flag will be overwritten. However, we've got a reference to it and now can check the exact flag this thread itself constructed.\n # Praise the holy garbage collector.\n stop_flag.clear()\n while not stop_flag.isSet():\n if self.get_current_proxy() is not None:\n try:\n key = self.queue.get(False, 0.1)\n except Queue.Empty:\n # here an active event_loop spends most of the time\n sleep(0.1)\n except AttributeError:\n # typically happens upon program termination\n pass\n else:\n # here event_loop is usually busy\n self.process_key(key)\n else:\n # No current proxy set yet, not processing anything\n sleep(0.1)\n logger.debug(\"Stopping event loop \"+str(index))", "def run_event_loop(async_loop, wait_tasks):\n while True:\n try:\n async_loop.run_until_complete(wait_tasks)\n break\n except RuntimeError:\n time.sleep(3)", "async def test_events_handled_on_event_loop(self):\n session = _create_test_session(asyncio.get_running_loop())\n\n handle_event_spy = MagicMock(\n side_effect=session._handle_scriptrunner_event_on_event_loop\n )\n session._handle_scriptrunner_event_on_event_loop = handle_event_spy\n\n # Send a ScriptRunner event from another thread\n thread = threading.Thread(\n target=lambda: session._on_scriptrunner_event(\n sender=MagicMock(), event=ScriptRunnerEvent.SCRIPT_STARTED\n )\n )\n thread.start()\n thread.join()\n\n # _handle_scriptrunner_event_on_event_loop won't have been called\n # yet, because we haven't yielded the eventloop.\n handle_event_spy.assert_not_called()\n\n # Yield to let the AppSession's callbacks run.\n # _handle_scriptrunner_event_on_event_loop will be called here.\n await asyncio.sleep(0)\n\n handle_event_spy.assert_called_once()", "def Gameloop():", "def start_loop(self, loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()", "def update(self):\n asyncio.set_event_loop(asyncio.new_event_loop())\n self.listen(self.port)\n self.loop = IOLoop.instance()\n self.loop.start()", "def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise", "def mock_event_loop() -> Generator[AbstractEventLoop, Any, None]:\n loop = get_event_loop()\n yield loop\n loop.close()", "def main():\n BouncyGUI().mainloop()", "def postprocess(self, loop):\n return loop", "def use_twisted(app):\n activity.EventLoop <<= activity.TwistedEventLoop\n REACTOR_INIT.notify(app)", "def event_loop(self):\n for event in pg.event.get():\n self.keys = pg.key.get_pressed()\n if event.type == pg.QUIT or self.keys[pg.K_ESCAPE]:\n self.done = True\n self.cannon.get_event(event, self.objects)", "def main_loop(self):\n try:\n self.state_machine.set_state('wait')\n\n while True:\n events = list(reversed(pygame.event.get())) # Take all events, most recent first\n\n if self.find_quit_event(events):\n break\n\n if self.find_fullscreen_event(events):\n self.window.toggle_fullscreen()\n\n event = self.find_resize_event(events)\n if event:\n self.window.resize(event.size)\n\n self.state_machine.process(events)\n\n finally:\n self.led_picture.quit()\n self.led_print.quit()\n GPIO.cleanup()\n self.camera.quit()\n self.printer.quit()\n pygame.quit()", "def maybe_patch_ioloop():\n if (\n sys.platform.startswith(\"win\")\n and tornado.version_info < (6, 1)\n and sys.version_info >= (3, 8)\n ):\n try:\n from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy\n except ImportError:\n pass\n # not affected\n else:\n from asyncio import get_event_loop_policy, set_event_loop_policy\n\n if type(get_event_loop_policy()) is WindowsProactorEventLoopPolicy:\n # WindowsProactorEventLoopPolicy is not compatible with tornado 6\n # fallback to the pre-3.8 default of Selector\n set_event_loop_policy(WindowsSelectorEventLoopPolicy())", "def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True", "def _event_loop_thread(self):\n _logger.debug(f'Started DataSource event loop thread {self._thread}.')\n self._event_loop.set_debug(True)\n asyncio.set_event_loop(self._event_loop)\n try:\n self._event_loop.call_soon(self._main_helper)\n self._event_loop.run_forever()\n finally:\n self._event_loop.close()\n _logger.info(f'Source [{(self._addr, self._port)}] closed.')", "def main(self):\n self.root.mainloop()", "def _get_loop(self, *args: typing.Any, **kwargs: typing.Any) -> typing.Optional[asyncio.AbstractEventLoop]:\n if callable(self.loop_getter):\n if self.loop_getter_need_context:\n return self.loop_getter(*args, **kwargs) # pylint: disable=not-callable\n return self.loop_getter() # pylint: disable=not-callable\n return self.loop_getter", "def run(self):\n self.window.mainloop()", "def _hijack_tk(self):\n import Tkinter\n orig_mainloop = gtk.main\n dumb_ml = _DummyMainloop(orig_mainloop, self, GUI_TK)\n Tkinter.Misc.mainloop = dumb_ml\n Tkinter.mainloop = dumb_ml", "def mainloop(duration=1):\n\n _triggered.clear()\n NSApp = _NSApp()\n _stop_after(duration)\n msg(NSApp, n(\"run\"))\n if not _triggered.is_set():\n # app closed without firing callback,\n # probably due to last window being closed.\n # Run the loop manually in this case,\n # since there may be events still to process (ipython/ipython#9734)\n CoreFoundation.CFRunLoopRun()", "def loop(self):\n\n print(self.id + \" needs an overridden loop method\")", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "async def checkNewLoop(self):\n pass", "def __loop(self):\n\n self.__update_table()\n self.__update_labels()\n if self.remote_stop:\n self.__stop(\"remote telegram admin\")\n else:\n self.__main_window.after(1000, self.__loop)", "def __exit__(self, exc_type, exc_val, exc_tb):\n if self.event_loop:\n self.event_loop.stop()", "def postloop(self):\n print 'Bye!'", "async def main(logic):\n await GLOBE.connect()\n await asyncio.wait([GLOBE.run_loop(), logic()])", "def run(self):\n try:\n self.event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.event_loop)\n template_system = LegionTemplateEngine(self.template_path, self.output_path)\n template_system.render_loop()\n\n except Exception as exception:\n self.raised_exception = exception\n raise self.raised_exception", "def __init__(self):\n self.gameloop()" ]
[ "0.81281793", "0.7433964", "0.71915084", "0.71593684", "0.7092627", "0.70529735", "0.70341283", "0.70070803", "0.69741666", "0.69741666", "0.69633144", "0.69001657", "0.6889111", "0.6859079", "0.6846811", "0.6844431", "0.68335426", "0.68281376", "0.6722399", "0.6682619", "0.66650563", "0.6625124", "0.661982", "0.66001344", "0.65723383", "0.65689635", "0.6408248", "0.6408248", "0.6408248", "0.6408248", "0.63876116", "0.63644767", "0.63459575", "0.6328928", "0.6306496", "0.63052034", "0.6303287", "0.6303287", "0.62774456", "0.6238053", "0.61861664", "0.61696285", "0.6160671", "0.61584836", "0.6114627", "0.61127836", "0.6074787", "0.6070557", "0.6065858", "0.606554", "0.60187423", "0.60187423", "0.59994286", "0.5994193", "0.5973154", "0.5955565", "0.5932075", "0.5930467", "0.5928529", "0.59139395", "0.591257", "0.5911105", "0.58966964", "0.58927506", "0.5888072", "0.58858407", "0.58642143", "0.58580196", "0.5851221", "0.5846065", "0.583578", "0.5828538", "0.58175385", "0.5812145", "0.58116597", "0.5810035", "0.5808205", "0.5807698", "0.58030474", "0.5792686", "0.5784407", "0.57761276", "0.57671946", "0.5756962", "0.57500523", "0.57380444", "0.5736097", "0.57273394", "0.57204276", "0.57167107", "0.5716562", "0.5708114", "0.56995285", "0.5698656", "0.5695172", "0.56891704", "0.56885856", "0.5688358", "0.5678936", "0.56783384", "0.56728864" ]
0.0
-1
Updates x, y (memoryshared) coordinates with actual mouse position with a given frequency.
def stream(bus, address, frequency, x, y, stop_trigger): mouse = Mouse.list_connected(bus=bus, address=address)[0] delay = 1./frequency while not stop_trigger: x1, y1 = mouse.get_position_change() x.value += x1 y.value += y1 time.sleep(delay)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def mouse_position_event(self, x: int, y: int):\n pass", "def update_pointer(self):\n pointer_length = -self.pointer_frac * self.radius\n # Add pi/2 to the angle because we consider 0 radians to be pi/2 in standard position.\n x = pointer_length * math.cos(self._radians + math.pi / 2)\n y = pointer_length * math.sin(self._radians + math.pi / 2)\n self.coords(self.pointer, 0, 0, x, y)", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n \n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass", "def update(self):\n # Get the current mouse position. This returns the position\n # as a list of two numbers.\n pos = pygame.mouse.get_pos()\n\n # Set the player x position to the mouse x position\n self.rect.x = pos[0]", "def update(self):\r\n self.x = games.mouse.x\r\n self.y = games.mouse.y\r\n self.check_collide()", "def update(self):\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def update(self):\n self.x = games.mouse.x\n self.y = games.mouse.y\n self.check_collide()", "def handle_mouse(self, x, y):\n self.x = x\n self.y = y\n global _pending_handle_mouse\n if not _pending_handle_mouse:\n _pending_handle_mouse = True\n if self.fig.document is not None:\n self.fig.document.add_timeout_callback(self.handle_mouse_callback, 100)\n else:\n self.handle_mouse_callback()", "def mousePosition(self):", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def handle_mouse(self, x, y):\n pass", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n\n print(x)\n print(y)\n print(delta_x)\n print(delta_y)\n\n\n #self.manage_crosshair()\n \n \n\n #self.crosshair_sprite.center_x += delta_x\n #self.crosshair_sprite.center_y += delta_y\n\n\n self.crosshair_relative_xoffset += delta_x\n self.crosshair_relative_yoffset += delta_y", "def update(self):\n\n\t\tself.x = games.mouse.x\n\t\tself.y = games.mouse.y\n\t\tself.check_collide()", "def __master_cursor_pos_callback(self, glfw_window, xpos, ypos):\n # flip glfw window space to match OGL space(like texture that has bottom left origin)\n ypos = self.window.glyph.size[1] - ypos\n\n # update values\n self.__pos_instant = Vec(xpos, ypos, 0)\n self.__accel = self.__pos_instant - self.__pos_prev\n self.__pos_prev = self.__pos_instant\n\n # call registered callbacks\n self.call_cursor_pos_callback(glfw_window, *self.__pos_instant.xy, mouse=self)", "def update(self, delta_time):\r\n #for pixels in self.pixel:\r\n for line in self.cursor:\r\n line.draw()\r\n \r\n self.check_keys()", "def update(self):\r\n self.x = 60\r\n self.y = games.mouse.y\r\n self.check_collide()", "def on_mouse_motion(self, x, y, delta_x, delta_y):\n \n pass", "def on_mouse_move(self, event: PointEvent):\n self.x = event.x\n self.y = event.y\n self.handle_mouse(self.x, self.y)", "def xy(self, xy_position):\n print(f\"xy: {xy_position}\")\n self.device_control.xy = xy_position\n yield", "def OnMouseMotion(self, evt):\n if evt.Dragging() and evt.LeftIsDown():\n self.lastx, self.lasty = self.x, self.y\n self.x, self.y = evt.GetPosition()\n self.Refresh(False)", "def handle_mouse(self, x, y):\n # we are in aperture mode\n if self.aperture_id:\n if self.aperture_id not in self.aperture_model.aperture_models.keys():\n pass\n model = self.aperture_model.aperture_models[self.aperture_id]\n location = model.source.data['location'][0]\n\n if self.mode == 'width':\n width = abs(location - x)\n model.update_values(start=location - width,\n end=location + width)\n elif self.mode == 'left':\n if x < location:\n model.update_values(start=x)\n elif self.mode == 'right':\n if x > location:\n model.update_values(end=x)\n elif self.mode == 'location':\n diff = x - location\n model.update_values(location=x,\n start=model.source.data['start'][0] + diff,\n end=model.source.data['end'][0] + diff)\n\n self.last_x = x\n self.last_y = y\n return False", "def on_mouse_move(self, event):\n self.mouse = [event.xdata, event.ydata]\n\n # Update pan view on mouse move\n if self.panning is True:\n for a in self.pan_axes:\n a.drag_pan(1, event.key, event.x, event.y)\n\n # Async re-draw (redraws only on thread idle state, uses timer on backend)\n self.canvas.draw_idle()\n\n ##### Temporary place-holder for cached update #####\n self.update_screen_request.emit([0, 0, 0, 0, 0])", "def update_tempLists(self):\n self.current_position = self.mediaPlayer.position()\n\n # I add the current value, calculates its index, and removes it. This method is used to know which index the pointer is at.\n bisect.insort(self.xValues,self.current_position)\n self.position_index = self.xValues.index(self.current_position)\n self.xValues.remove(self.current_position)\n\n n = 120\n if self.position_index < n: \n self.tempXList = self.xValues[:self.position_index + n]\n self.tempYList = self.yValues[:self.position_index + n]\n self.tempCList = self.colors[:self.position_index + n]\n else:\n self.tempXList = self.xValues[self.position_index - n :self.position_index + n]\n self.tempYList = self.yValues[self.position_index - n :self.position_index + n]\n self.tempCList = self.colors[self.position_index - n :self.position_index + n]", "def handle_motion(self, x, y):\n if self.pressed_flag:\n self.last_point = (x, y)\n\n # trigger canvas to redraw itself\n self.redraw()", "def move_to_point(self, destination, frequency):\r\n # index 0 represents x, index 1 represents y\r\n if frequency > 1:\r\n x_towards = int(round((destination[0] - current_position[0]) / frequency))\r\n if x_towards > 1:\r\n x_towards -= 1\r\n y_towards = int(round((destination[1] - current_position[1]) / frequency))\r\n if y_towards > 1:\r\n y_towards -= 1\r\n current_position = list(self.current_position)\r\n if current_position[0] < destination[0]: # if x is west\r\n current_position[0] = current_position[0] + x_towards\r\n x_change = int(frequency)\r\n if current_position[0] >= destination[0]: # if this overshoots:\r\n current_position[0] = destination[0]\r\n elif current_position[0] == destination[0]:\r\n pass # don't move\r\n else:\r\n current_position[0] = current_position[0] - x_towards\r\n x_change = 0 - int(frequency)\r\n if current_position[0] <= destination[0]: # if this overshoots:\r\n current_position[0] = destination[0]\r\n if current_position[1] < destination[1]:\r\n current_position[1] = current_position[1] + y_towards\r\n y_change = int(frequency)\r\n if current_position[1] >= destination[1]: # if this overshoots:\r\n current_position[1] = destination[1]\r\n elif current_position[1] == destination[1]:\r\n pass\r\n else:\r\n current_position[1] = current_position[1] - y_towards\r\n y_change = 0 - int(frequency)\r\n if current_position[1] <= destination[1]: # if this overshoots:\r\n current_position[1] = destination[1]\r\n\r\n current_position = tuple(current_position)\r\n self.move_to(current_position)\r\n for i in list(range(1, int(frequency))):\r\n human_avoidance_list.append((current_position[0] + x_change, current_position[1] + y_change))\r\n self.current_position = current_position\r\n if current_position[0] == destination[0] and current_position[1] == destination[1]:\r\n self.resource_check = 1\r\n else:\r\n self.resource_frequency += self.resource_frequency / 6 # 6 * 5 days in a step = 30 days in a month\r\n # resource frequency is listed monthly in the source file\r\n # this indicates time passing each step until the gatherer can move\r", "def track_point(self, flags, x, y):\n ret_val, x.value, y.value = self._track_point(flags, x.value, y.value)\n return ret_val", "def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))", "def click_action(event, ax):\n global newcoords, oldcoords, count\n\n if count % 2 == 0:\n newcoords.append((event.xdata, event.ydata))\n print('NEW', event.xdata, event.ydata)\n else:\n oldcoords.append((event.xdata, event.ydata))\n print('OLD', event.xdata, event.ydata)\n # update count\n count += 1", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def update(self):\n self.mousePos = pygame.mouse.get_pos()\n self.update_button_hover_status()", "def update(self):\n self.bpos_x += 3", "def _motion(self, event):\n if self.current:\n # modify the current line by changing the end coordinates\n # to be the current mouse position\n coords = event.widget.coords(self.current)\n coords[2] = event.x\n coords[3] = event.y\n\n event.widget.coords(self.current, *coords)", "def cmdMouse(self, dev):\n self.hitsMouses[dev] = False\n f = open(self.inputPath + dev, 'rb')\n while self.live:\n f.read(500) # 144 kan eigenlijk alles zijn, behalve absurbt hoge waarden..\n self.hitsMouses[dev] = True\n time.sleep(0.1)", "def on_mousemove(event, x, y, flags, userparam):\n global mouse_pos\n global source_img, source_msk, display_img\n global DRAW_MODE\n\n if event == cv.EVENT_MOUSEMOVE:\n mouse_pos = (x, y)\n\n if flags & cv.EVENT_FLAG_SHIFTKEY:\n current_label = LABEL_BACKGROUND\n else:\n current_label = CURRENT_LABEL\n\n if DRAW_MODE == \"point\":\n if flags & cv.EVENT_FLAG_CTRLKEY:\n cv.circle(source_msk, (x, y), SHAPE_SIZE, current_label, -1)\n elif DRAW_MODE == \"line\":\n # line drawing is done in the line-mode keypress handler (keydown())\n pass", "def update(self, xdata=[], ydata=[]):\n for i in range(4):\n self.lines[i].set_data(xdata[i], ydata[i])\n\n self.blit()", "def set_new_location(self, xPos, yPos):", "def update_position_and_spike_frame(self, step=0):\n if self._spk_pos_ax is not None:\n # print(self._pos_x)\n # print(self._pos_y)\n # TODO: Add colors based on which cluster the spikes are coming from\n self._spk_pos_frame[0].set_data((self._spk_pos_x, self._spk_pos_y))\n # self._spk_pos_frame[0].set_color(self._spk_clusters)\n self._spk_pos_frame[1].set_data((self._pos_x, self._pos_y))\n if len(self._speed) > 0:\n self._spk_pos_frame[2].set_text('speed = %.2fcm/s'%self._speed[-1])\n return self._spk_pos_frame", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += 200", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def changespeed(self, x, y):\n self.change_x += x\n self.change_y += y", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def ev_mousemotion(self, event: MouseMotion) -> None:", "def update(self):\n self.t = time()\n self.frame += 1\n self.loop(self)\n self.draw_bg()\n self.draw_C()\n if self.cursor:\n self.draw_rect(*self.pos, RED, 2)\n self.draw_grid()\n self.draw_T()\n self.show_info()\n for (surf, rect) in self.surf_list:\n self.screen.blit(surf, rect)\n pygame.display.update()\n self.clock.tick(self.fps)", "def normal_mouse_move(self, event):\n plot = self.component\n if plot is not None:\n if isinstance(plot, BaseXYPlot):\n ndx = plot.map_index((event.x, event.y), index_only = True)\n x = plot.index.get_data()[ndx]\n y = plot.value.get_data()[ndx]\n print self.format % (x,y)\n else:\n print \"dataprinter: don't know how to handle plots of type\",\n print plot.__class__.__name__\n return", "def ev_MOUSEMOTION(self, event):", "def mouse_move(self, pos):\n if (self.setup_type == \"position\"):\n x, y = pos\n self.canvas.move(x, y)", "def report_mouse_position(x_pos=0, y_pos=0):\n print('x-axis:', x_pos, ' Y-axis: ', y_pos, flush=True)", "def update():\n\n # Get last new x value as last x value + 1\n x_n0 = data_source.data['x'][-1]\n x_n1 = x_n0 + 0.1\n\n # Assign a new y value\n y_n1 = param_source.data['amp_sine'][0] * np.sin(x_n1) +\\\n param_source.data['amp_rand'][0] * np.random.rand(1)\n\n # Get old last average and use to calculate new average\n avg_n1 = _get_new_avg(data_source,\n y_n1,\n param_source.data['rollover'][0])\n\n # Make a dict of data to add on to the end of the source\n additional_data = dict(x=[x_n1], y=[y_n1], avg=[avg_n1])\n\n # Stream the new data with a rollover value of 10\n data_source.stream(additional_data,\n rollover=param_source.data['rollover'][0])\n\n # logger.debug(param_source.data['update_delay'][0])", "def update():\n global dragon, x, y, position, angle_left, angle_right, size, new\n x, y, position, angle_left, angle_right, new = modify_pos(x, y, position,\n angle_left,\n angle_right,\n size, new)\n dragon.setData(x, y) # update plot", "def pointer_notify_motion(\n self, time_msec: int, surface_x: float, surface_y: float\n ) -> None:\n lib.wlr_seat_pointer_notify_motion(self._ptr, time_msec, surface_x, surface_y)", "def handle_mouse(obj, event):\n if event:\n x = event.globalX()\n y = event.globalY()\n x_w = obj.offset.x()\n y_w = obj.offset.y()\n obj.move(x - x_w, y - y_w)", "def follow_mouse(self, mouse):\n half_width = self.width() / 2\n self.left = mouse.get_x() - half_width\n self.right = mouse.get_x() + half_width", "def onMove(self, event):\n\t\tif (event.xdata != None and event.ydata != None and event.xdata != self.xdata and event.ydata != self.ydata):\n\n\t\t\tself.xdata = event.xdata\n\t\t\tself.ydata = event.ydata\n\n\t\t\tfor loop in range(4):\n\t\t\t\tself.stokesFig.canvas.restore_region(self.background[loop])\n\t\t\t\tself.obsStokes[loop].set_ydata(self.stokes[loop][event.ydata, event.xdata, :])\n\t\t\t\tself.axStokes[loop].draw_artist(self.obsStokes[loop])\n\t\t\t\tself.axStokes[loop].draw_artist(self.axStokes[loop].get_yaxis())\n\t\t\t\tself.stokesFig.canvas.blit(self.axStokes[loop].bbox.expanded(1.4, 1.1))", "def trackMouse(self, mouse_event):\n if self.kinect.DepthFrameRaw.any() != 0:\n u = mouse_event.pos().x()\n v = mouse_event.pos().y()\n d = self.kinect.DepthFrameRaw[v,u]\n self.ui.rdoutMousePixels.setText(\"(\"+str(u)+\",\"+str(v)+\",\"+str(d)+\")\")\n worldCoords = self.kinect.pix2Glob(np.array([u,v,1])) * 1000 #1000 for mm\n self.ui.rdoutMouseWorld.setText(f\"({np.round(worldCoords[0])},{np.round(worldCoords[1])},{np.round(worldCoords[2])})\")", "def cambiovelocidad(self,x,y):\n self.change_x += x\n self.change_y += y", "def move_mouse(kf_x, m, img): \n exponent = 1.6\n x, y, x_vel, y_vel = (int(kf_x[0]), int(kf_x[1]), kf_x[2], kf_x[3])\n mx, my = m.position()\n win_height, win_width, channel = img.shape\n x_screen, y_screen = m.screen_size()\n min_x, max_x = 0, x_screen\n min_y, max_y = 0, y_screen \n\n #Calculations\n speed = np.sqrt(x_vel**2 + y_vel**2) \n power = math.pow(speed, exponent) \n ratio = speed / power\n theta = math.atan2(y_vel, x_vel) \n x_comp = power * math.cos(theta) \n y_comp = power * math.sin(theta) \n xf, yf = mx + x_comp, my + y_comp\n\n if xf < min_x: \n xf = min_x\n elif xf > max_x: \n xf = max_x\n elif yf < min_y: \n yf = min_y\n elif yf > max_y: \n yf = max_y\n m.move(xf, yf)\n return speed", "def mouseReleaseEvent(self, event):\n width = self.frameGeometry().width()\n height = self.frameGeometry().height()\n cursor = QtGui.QCursor()\n new_pos = self.mapFromGlobal(cursor.pos())\n x = new_pos.x()\n y = new_pos.y()\n self.__selector_y = y/float(height) # normalized value of the y position\n \tself.__selector_x = x/float(width) #normalised value of the x position\n self.updatePixelColor()\n self.repaint()", "def mouseDragged(self, point, delta):\n pass", "def update_position(self, time_step):\n if not self.gpu:\n self.psi_hat[...] = fft.fftn(self.psi)\n update_position(self.psi_hat, time_step, self.m)\n self.psi[...] = fft.ifftn(self.psi_hat)\n else:\n cufft.cufftExecZ2Z(self.psi_plan, self.g_psi.ptr, self.g_psi_hat.ptr, cufft.CUFFT_FORWARD)\n self.g_psi_hat /= self.N\n self.g_mom_func(self.g_psi_hat, np.float64(time_step), np.int64(self.psi_hat.shape[0]), np.int64(self.psi_hat.shape[1]), np.int64(self.psi_hat.shape[2]), block=(8,8,8), grid=tuple([(i+7)/8 for i in self.psi_hat.shape]))\n cufft.cufftExecZ2Z(self.psi_plan, self.g_psi_hat.ptr, self.g_psi.ptr, cufft.CUFFT_INVERSE)", "def seek(self,event):\r\n if self.app.controlLock.locked():\r\n return\r\n self.app.controlLock.acquire()\r\n x = event.x\r\n scalex,_ = self.getScale()\r\n scalex_secs = [scalex[0]/self.samplerate,scalex[1]/self.samplerate]# Get x scale in seconds\r\n seekTo = (x/self.w) * (scalex_secs[1]-scalex_secs[0]) + scalex_secs[0]# Transform pixel coordinates to represented time\r\n self.app.videoPlayer.pause()\r\n self.app.videoPlayer.seek(seekTo-self.app.dataOffset)\r\n self.app.videoPlayer.pause()# Restart audio to sync\r\n self.update(self.app.videoPlayer.startTimestamp)\r\n self.draw()\r\n self.app.videoPlayer.play()\r\n self.app.controlLock.release()", "def updateAccelList():\n # Get new sensor update/s\n x, y, z = getSensorAccel()\n \n # Pop out old values (idx 0)\n listAccelX.pop(0) # ignore outgoing value\n listAccelY.pop(0) # ignore outgoing value\n listAccelZ.pop(0) # ignore outgoing value\n \n # Append the new values\n listAccelX.append(x)\n listAccelY.append(y)\n listAccelZ.append(z)", "def getPoslbFFmpeg(self, event):\n # if self.measurement_w:\n\n if hasattr(self, \"measurement_w\") and self.measurement_w is not None and self.measurement_w.isVisible():\n x = event.pos().x()\n y = event.pos().y()\n\n # distance\n if self.measurement_w.rbDistance.isChecked():\n if event.button() == 1: # left\n self.draw_point(x, y, \"lime\")\n self.memx, self.memy = x, y\n\n if event.button() == 2 and self.memx != -1 and self.memy != -1:\n self.draw_point(x, y, \"lime\")\n self.draw_line(self.memx, self.memy, x, y, \"lime\")\n\n if self.FFmpegGlobalFrame in self.measurement_w.draw_mem:\n self.measurement_w.draw_mem[self.FFmpegGlobalFrame].append([\"line\", self.memx, self.memy, x, y])\n else:\n self.measurement_w.draw_mem[self.FFmpegGlobalFrame] = [[\"line\", self.memx, self.memy, x, y]]\n\n d = ((x - self.memx) ** 2 + (y - self.memy) ** 2) ** 0.5\n try:\n d = d / float(self.measurement_w.lePx.text()) * float(self.measurement_w.leRef.text())\n except:\n QMessageBox.critical(self, programName,\n \"Check reference and pixel values! Values must be numeric.\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n self.measurement_w.pte.appendPlainText(\"Time: {}\\tFrame: {}\\tDistance: {}\".format(self.getLaps(),\n self.FFmpegGlobalFrame,\n round(d, 1)))\n self.measurement_w.flagSaved = False\n self.memx, self.memy = -1, -1\n\n # angle 1st clic -> vertex\n if self.measurement_w.rbAngle.isChecked():\n if event.button() == 1: # left for vertex\n self.draw_point(x, y, \"lime\")\n self.memPoints = [(x, y)]\n\n if event.button() == 2 and len(self.memPoints):\n self.draw_point(x, y, \"lime\")\n self.draw_line(self.memPoints[0][0], self.memPoints[0][1], x, y, \"lime\")\n\n self.memPoints.append((x, y))\n\n if len(self.memPoints) == 3:\n self.measurement_w.pte.appendPlainText(\"Time: {}\\tFrame: {}\\tAngle: {}\".format(self.getLaps(),\n self.FFmpegGlobalFrame,\n round(angle(\n self.memPoints[\n 0],\n self.memPoints[\n 1],\n self.memPoints[\n 2]),\n 1)\n ))\n self.measurement_w.flagSaved = False\n if self.FFmpegGlobalFrame in self.measurement_w.draw_mem:\n self.measurement_w.draw_mem[self.FFmpegGlobalFrame].append([\"angle\", self.memPoints])\n else:\n self.measurement_w.draw_mem[self.FFmpegGlobalFrame] = [[\"angle\", self.memPoints]]\n\n self.memPoints = []\n\n # Area\n if self.measurement_w.rbArea.isChecked():\n if event.button() == 1: # left\n self.draw_point(x, y, \"lime\")\n if len(self.memPoints):\n self.draw_line(self.memPoints[-1][0], self.memPoints[-1][1], x, y, \"lime\")\n self.memPoints.append((x, y))\n\n if event.button() == 2 and len(self.memPoints) >= 2:\n self.draw_point(x, y, \"lime\")\n self.draw_line(self.memPoints[-1][0], self.memPoints[-1][1], x, y, \"lime\")\n self.memPoints.append((x, y))\n # close polygon\n self.draw_line(self.memPoints[-1][0], self.memPoints[-1][1], self.memPoints[0][0],\n self.memPoints[0][1], \"lime\")\n a = polygon_area(self.memPoints)\n\n if self.FFmpegGlobalFrame in self.measurement_w.draw_mem:\n self.measurement_w.draw_mem[self.FFmpegGlobalFrame].append([\"polygon\", self.memPoints])\n else:\n self.measurement_w.draw_mem[self.FFmpegGlobalFrame] = [[\"polygon\", self.memPoints]]\n try:\n a = a / (float(self.measurement_w.lePx.text()) ** 2) * float(\n self.measurement_w.leRef.text()) ** 2\n except:\n QMessageBox.critical(self, programName,\n \"Check reference and pixel values! Values must be numeric.\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n self.measurement_w.pte.appendPlainText(\"Time: {}\\tFrame: {}\\tArea: {}\".format(self.getLaps(),\n self.FFmpegGlobalFrame,\n round(a, 1)))\n\n self.memPoints = []", "def emulate_mouse(self, key_code, x_val, y_val, data):\n # Once again ignore Windows' relative time (since system\n # startup) and use the absolute time (since epoch i.e. 1st Jan\n # 1970).\n self.update_timeval()\n\n events = []\n\n if key_code == 0x0200:\n # We have a mouse move alone.\n # So just pass through to below\n pass\n elif key_code == 0x020A:\n # We have a vertical mouse wheel turn\n events.append(self.emulate_wheel(data, 'y', self.timeval))\n elif key_code == 0x020E:\n # We have a horizontal mouse wheel turn\n # https://msdn.microsoft.com/en-us/library/windows/desktop/\n # ms645614%28v=vs.85%29.aspx\n events.append(self.emulate_wheel(data, 'x', self.timeval))\n else:\n # We have a button press.\n\n # Distinguish the second extra button\n if key_code == 0x020B and data == 2:\n key_code = 0x020B2\n elif key_code == 0x020C and data == 2:\n key_code = 0x020C2\n\n # Get the mouse codes\n code, value, scan_code = self.mouse_codes[key_code]\n # Add in the press events\n scan_event, key_event = self.emulate_press(\n code, scan_code, value, self.timeval)\n events.append(scan_event)\n events.append(key_event)\n\n # Add in the absolute position of the mouse cursor\n x_event, y_event = self.emulate_abs(x_val, y_val, self.timeval)\n events.append(x_event)\n events.append(y_event)\n\n # End with a sync marker\n events.append(self.sync_marker(self.timeval))\n\n # We are done\n self.write_to_pipe(events)", "def update(self, *args):\n self.last_chunk += 1\n if self.last_chunk >= len(self.chunk0):\n # self.queue.put(\"Stop\")\n self.last_chunk = 0\n\n i = self.last_chunk\n i0, i1 = self.chunk0[i], self.chunk1[i]\n self.queue.put(self.audio_dat[i0:i1])\n t0, t1 = i0*self.to_t, i1*self.to_t\n print(t0, t1)\n for line_artist in args:\n line_artist.set_xdata([t1, t1])\n args[0].figure.canvas.draw()", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def on_mouse_move(self, win, xpos, ypos):\n old = self.mouse\n self.mouse = (xpos, glfw.get_window_size(win)[1] - ypos)\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_LEFT):\n self.drag(old, self.mouse, glfw.get_window_size(win))\n if glfw.get_mouse_button(win, glfw.MOUSE_BUTTON_RIGHT):\n self.pan(old, self.mouse)", "def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y", "def update(self):\n pygame.event.pump()\n self.pos_x += 0\n if (pygame.key.get_pressed()[pygame.K_w]) and self.pos_y > 0:\n self.pos_y -= 1\n if (pygame.key.get_pressed()[pygame.K_a]) and self.pos_x > 0:\n self.pos_x -= 1\n if (pygame.key.get_pressed()[pygame.K_d]) and self.pos_x < 1080:\n self.pos_x += 1\n if (pygame.key.get_pressed()[pygame.K_s]) and self.pos_y < 360:\n self.pos_y += 1", "def track_user_inputs(\n self,\n window,\n movement_speed: float = 1.0,\n yaw_speed: float = 2.0,\n pitch_speed: float = 2.0,\n hold_key=None,\n ):\n front = (self.curr_lookat - self.curr_position).normalized()\n position_change = Vector([0.0, 0.0, 0.0])\n left = self.curr_up.cross(front)\n up = self.curr_up\n\n if self.last_time is None:\n self.last_time = time.perf_counter_ns()\n time_elapsed = (time.perf_counter_ns() - self.last_time) * 1e-9\n self.last_time = time.perf_counter_ns()\n\n movement_speed *= time_elapsed * 60.0\n if window.is_pressed(\"w\"):\n position_change += front * movement_speed\n if window.is_pressed(\"s\"):\n position_change -= front * movement_speed\n if window.is_pressed(\"a\"):\n position_change += left * movement_speed\n if window.is_pressed(\"d\"):\n position_change -= left * movement_speed\n if window.is_pressed(\"e\"):\n position_change += up * movement_speed\n if window.is_pressed(\"q\"):\n position_change -= up * movement_speed\n self.position(*(self.curr_position + position_change))\n\n curr_mouse_x, curr_mouse_y = window.get_cursor_pos()\n\n if (hold_key is None) or window.is_pressed(hold_key):\n if (self.last_mouse_x is None) or (self.last_mouse_y is None):\n self.last_mouse_x, self.last_mouse_y = curr_mouse_x, curr_mouse_y\n dx = curr_mouse_x - self.last_mouse_x\n dy = curr_mouse_y - self.last_mouse_y\n\n yaw, pitch = vec_to_euler(front)\n\n yaw -= dx * yaw_speed * time_elapsed * 60.0\n pitch += dy * pitch_speed * time_elapsed * 60.0\n\n pitch_limit = pi / 2 * 0.99\n if pitch > pitch_limit:\n pitch = pitch_limit\n elif pitch < -pitch_limit:\n pitch = -pitch_limit\n\n front = euler_to_vec(yaw, pitch)\n\n self.lookat(*(self.curr_position + front))\n self.last_mouse_x, self.last_mouse_y = curr_mouse_x, curr_mouse_y", "def apply_changes(self):\n self.x = self.buff_x\n self.y = self.buff_y\n self.buff_x = None\n self.buff_y = None", "def update(self):\n pygame.event.pump()\n self.pos_x -= 1.5", "def set_speed_x(self, new_speed):\n self.__speed_x = new_speed", "def OnMouseDown(self, evt):\n self.CaptureMouse()\n self.x, self.y = self.lastx, self.lasty = evt.GetPosition()", "def changespeed(self, x1, y1):\n self.change_x += x1\n self.change_y += y1", "def tick(self, dt):\n self.x += dt * self.x_speed\n self.y += dt * self.y_speed", "def update_gps(\n self,\n x_m,\n y_m,\n x_accuracy_m,\n y_accuracy_m,\n heading_d,\n speed_m_s\n ):\n self.GPS_MEASUREMENT_NOISE[0].itemset(0, x_accuracy_m)\n self.GPS_MEASUREMENT_NOISE[1].itemset(1, y_accuracy_m)\n\n now = time.time()\n time_diff_s = now - self._last_observation_s\n self._last_observation_s = now\n\n if heading_d is None:\n heading_d = 0.0\n if speed_m_s is None:\n matrix = self.GPS_NO_HEADING_SPEED_OBSERVER_MATRIX\n speed_m_s = 0.0\n else:\n matrix = self.GPS_NO_HEADING_OBSERVER_MATRIX\n elif speed_m_s is None:\n matrix = self.GPS_NO_SPEED_OBSERVER_MATRIX\n speed_m_s = 0.0\n else:\n matrix = self.GPS_OBSERVER_MATRIX\n\n measurements = numpy.matrix(\n [x_m, y_m, heading_d, speed_m_s]\n ).transpose() # z\n\n self._update(\n measurements,\n matrix,\n self.GPS_MEASUREMENT_NOISE,\n time_diff_s\n )", "def on_mouse_motion(self, x, y, dx, dy):\n # hazlo aparecer donde este mi jugador en el mouse\n self.player_sprite.center_x = x\n self.player_sprite.center_y = y", "def getMouse(self):\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n #self.update()\n _tkCall(self.update)\n if self.isClosed(): raise GraphicsError, \"getMouse in closed window\"\n time.sleep(.1) # give up thread\n x,y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)", "def setXY(self, x, y):\n self.x = x\n self.y = y", "def pointer_notify_axis(\n self,\n time_msec: int,\n orientation: AxisOrientation,\n value: float,\n value_discrete: int,\n source: AxisSource,\n ) -> None:\n lib.wlr_seat_pointer_notify_axis(\n self._ptr, time_msec, orientation.value, value, value_discrete, source.value\n )", "def update(self, *args):\n\n # change picture every 100 milliseconds\n now = pygame.time.get_ticks()\n if now - self.last_update > 100:\n self.index = self.index ^ 1\n self.image = self.images[self.index]\n self.last_update = now\n prom = self.rect.center\n self.rect = self.image.get_rect()\n self.rect.center = prom\n\n self.rect.x -= args[0]", "def emit_mouse(self, report):\n for name, attr in self.layout.mouse.items():\n # If the attr is a tuple like (left_analog_y, \"-\")\n # then set the attr to just be the first item\n attr, modifier = attr\n\n if attr.startswith(\"trackpad_touch\"):\n active_attr = attr[:16] + \"active\"\n if not getattr(report, active_attr):\n self.mouse_pos.pop(name, None)\n continue\n\n pos = getattr(report, attr)\n if name not in self.mouse_pos:\n self.mouse_pos[name] = pos\n\n sensitivity = 0.5\n self.mouse_rel[name] += (pos - self.mouse_pos[name]) * sensitivity\n self.mouse_pos[name] = pos\n\n elif \"analog\" in attr:\n pos = getattr(report, attr)\n if (pos > (128 + self.mouse_analog_deadzone)\n or pos < (128 - self.mouse_analog_deadzone)):\n accel = (pos - 128) / 10\n else:\n continue\n\n # If a minus modifier has been given then minus the acceleration\n # to invert the direction.\n if (modifier and modifier == \"-\"):\n accel = -accel\n\n sensitivity = self.mouse_analog_sensitivity\n self.mouse_rel[name] += accel * sensitivity\n\n # Emulate mouse wheel (needs special handling)\n if name in (ecodes.REL_WHEELUP, ecodes.REL_WHEELDOWN):\n ecode = ecodes.REL_WHEEL # The real event we need to emit\n write = False\n if getattr(report, attr):\n self._scroll_details['direction'] = name\n now = time.time()\n last_write = self._scroll_details.get('last_write')\n if not last_write:\n # No delay for the first button press for fast feedback\n write = True\n self._scroll_details['count'] = 0\n if name == ecodes.REL_WHEELUP:\n value = 1\n elif name == ecodes.REL_WHEELDOWN:\n value = -1\n if last_write:\n # Delay at least one cycle before continual scrolling\n if self._scroll_details['count'] > 1:\n if now - last_write > self.scroll_delay:\n write = True\n elif now - last_write > self.scroll_repeat_delay:\n write = True\n if write:\n self.device.write(ecodes.EV_REL, ecode, value)\n self._scroll_details['last_write'] = now\n self._scroll_details['count'] += 1\n continue # No need to proceed further\n else:\n # Reset so you can quickly tap the button to scroll\n if self._scroll_details.get('direction') == name:\n self._scroll_details['last_write'] = 0\n self._scroll_details['count'] = 0\n\n rel = int(self.mouse_rel[name])\n self.mouse_rel[name] = self.mouse_rel[name] - rel\n self.device.write(ecodes.EV_REL, name, rel)\n\n self.device.syn()", "def show_mouse_position_with_px(self):\n self.main_menu_greets_fonts = pygame.font.Font(os.path.join(PATH_TO_RESOURCE, 'font_forever.ttf'), 10)\n self.positiontext(f'Mouse position {pygame.mouse.get_pos()}', (770, 20))\n self.mouse = pygame.mouse.get_pos()\n return self.mouse", "def Update(self, ticks=0):", "def handleMousePositionCallback(self, xy):\n\n if self.mouse_position_callback:\n (x, y) = xy\n posn = self.convertView2Geo(x, y)\n self.mouse_position_callback(posn)", "def set_global_coordinates(self):\r\n\r\n # alias:\r\n F = Turbine.F\r\n t = Turbine.t\r\n \r\n self.x = -F*np.sin(t) + self.x0\r\n self.y = +F*np.cos(t) + self.y0", "def mc_update_xy(self):\n i = random.randint(0,self.N-1)\n return self.mc_update_fixed(i,xy = True)", "def update_points(self, *args):\n points = [Window.width / 2, Window.height / 2, .5, .5]\n i = 0\n while i < 2 * pi:\n i += 0.01 * pi\n points.extend([\n Window.width / 2 + cos(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n Window.height / 2 + sin(i) * (self.radius + self.sin_wobble *\n sin(i * self.sin_wobble_speed)),\n self.offset_x + sin(i),\n self.offset_y + cos(i)])\n\n self.mesh_points = points", "def update(self):\n pos = pygame.mouse.get_pos()\n self.rect.midtop = pos\n if self.punching:\n self.rect.move_ip(5, 10) # move fist position in place", "def mouse_move_callback(self, event):\n # TODO drag and drop figuriek\n print(\"moving at \", event.x + self.offset_x, event.y + self.offset_y)", "def mouse_motion_current_mouse_position() -> EventType:\n x, y = pygame.mouse.get_pos()\n return pygame.event.Event(pygame.MOUSEMOTION, {'pos': (int(x), int(y))})", "def tick(self, dt: float):\n self.x_pos += dt * self.x_velocity\n self.y_pos += dt * self.y_velocity\n\n self.x_velocity += dt * self.x_acceleration\n self.y_velocity += dt * self.y_velocity", "def start(self, x, y):\n self.last_x = x\n self.last_y = y\n self.aperture_id = None", "def update(self):\r\n # Get where the mouse is\r\n pos = pygame.mouse.get_pos()\r\n # Set the left side of the player bar to the mouse position\r\n self.rect.x = pos[0]\r\n # Make sure we don't push the player paddle\r\n # off the right side of the screen\r\n if self.rect.x > self.screenwidth - self.width:\r\n self.rect.x = self.screenwidth - self.width", "def pos(self, x, y):\n\n if isinstance(x, float):\n x = int(x)\n\n self.screen.write(colorama.Cursor.POS(x, y), ansi=True)\n self.x = x\n self.y = y\n\n return x, y" ]
[ "0.5839249", "0.57032055", "0.5696799", "0.56878513", "0.5680848", "0.56477594", "0.5643513", "0.56162447", "0.5613641", "0.56051666", "0.558602", "0.55699193", "0.5541605", "0.5541605", "0.5534065", "0.5533534", "0.5528136", "0.5512954", "0.5495194", "0.54931223", "0.548661", "0.5484529", "0.5472189", "0.5459849", "0.54550296", "0.54440284", "0.5431041", "0.5381656", "0.53808516", "0.53771603", "0.5350924", "0.53425384", "0.5342128", "0.5341712", "0.53349406", "0.5310174", "0.5307245", "0.53021187", "0.5278707", "0.52759093", "0.52631426", "0.52097136", "0.5205697", "0.5205697", "0.5205697", "0.5205302", "0.5198206", "0.5188511", "0.51882607", "0.51860654", "0.515826", "0.515426", "0.5144423", "0.5104018", "0.5103439", "0.50818425", "0.5070548", "0.507045", "0.5064747", "0.50631535", "0.5052534", "0.5046333", "0.5016433", "0.5001794", "0.49904716", "0.49771985", "0.4976581", "0.49676645", "0.49652517", "0.49640688", "0.49640688", "0.49429524", "0.49325722", "0.49303797", "0.49284643", "0.49273118", "0.4922813", "0.49207786", "0.4919838", "0.49190274", "0.49133068", "0.49034247", "0.4900723", "0.48952404", "0.48894283", "0.48868567", "0.48865172", "0.4879298", "0.4869098", "0.48689216", "0.4855089", "0.48512068", "0.4846988", "0.4841186", "0.48409376", "0.48408723", "0.4833692", "0.48321015", "0.48240182", "0.48214075" ]
0.6736627
0
Returns the focal length of the telescope.
def focal_length(self): return self.f * self.diameter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def estimate_focal_length(self):\n fl = (self.fiber_diameter / 2) / np.tan(np.deg2rad(self.fov / 2))\n\n return fl", "def length(self) -> ir.FloatingValue:\n return ops.GeoLength(self).to_expr()", "def bspb_focalLength():\n shotCam = pm.PyNode('shot_cam').getShape()\n return str(shotCam.focalLength.get())", "def getLength(self):\n return self.geometry.length", "def getLength(self) -> float:\n return self.length", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def focallengthFromFOV(self, view_x=None, view_y=None): # pragma: no cover\n # to be overloaded by the child class.\n return 0", "def focal_point(self):\n return self._focal_point", "def length(self):\n return _property_op(arctern.ST_Length, self)", "def length(self) -> float:\n n = self.geodesic.extrinsicDimension()\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = self.geodesic.integrate(cp0,vectorops.mul(x[n:],third))\n cp3 = y[:n]\n cp2 = self.geodesic.integrate(cp3,vectorops.mul(y[n:],-third))\n return self.geodesic.distance(cp0,cp1) + self.geodesic.distance(cp1,cp2) + self.geodesic.distance(cp2,cp3)\n return Trajectory.length(self,distance)", "def obs_length(self):\n return self.lc.time[-1] - self.lc.time[0]", "def getVocalized(self,):\n\t\treturn self.vocalized;", "def get_length(self):\n\n return self.length", "def auxiliary_trail_length(self):\n return self.attributes[\"_aux_length\"]", "def _get_length(self):\n return self._length", "def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)", "def getLength(self):\n return self.length", "def getLength(self):\n return self.sideLength", "def time_length(self):\n return self._time_length", "def get_length(self):\n return self._length", "def get_length(self):\n return self._length", "def __len__(self):\n return len(self.focals)", "def getLength(self):\n flength = 0\n for quad in self._quadrilaterals:\n flength = flength + get_quad_length(quad)\n return flength", "def get_length(self):\n\n return self._length", "def _get_length(self):\n from math import sqrt\n\n if self._length is None:\n sum1 = 0\n for a in self.diff:\n sum1 += a * a\n self._length = sqrt(sum1)\n return self._length", "def length(self):\n self.convert_window(\"Length\", \"meters\", [\"Scandinavian mile\", \"angstroms\", \"au\", \"barleycorns\", \"cables\", \"centimeters\", \"chains\", \"decimeters\", \"ells\", \"ems\", \"fathoms\", \"feet(UK & US)\", \"feet(US survey)\", \"furlongs\", \"hands\", \"hectometers\", \"inches\", \"kilometers\", \"links\", \"light years\", \"meters\", \"micrometers\", \"mil\", \"miles(UK & US)\", \"miles(nautical, UK)\", \"miles(nautical, international)\", \"millimeters\", \"nanometers\", \"parsecs\", \"pica\", \"picometers\", \"rods\", \"spans\", \"thou\", \"yards\"])", "def length(self):\n return self.get_delta_value(self.Z_INDEX)", "def length(self) -> float:\n return pos.distance(self.start, self.end)", "def get_length(self):\n if(type(self._length) != float):\n self._logger.write(\"Error! length must be of type float\")\n elif(self._length == None):\n self._logger.write(\"Error! length contains no value\")\n else:\n try:\n return self._length\n except Exception as e:\n self._logger.write(\"Error! Could not fetch the value of length: \\n %s\" % e)", "def length(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def get_length(self):\n return math.sqrt(self.x**2 + self.y**2)", "def lfn(self):\n if self.precision:\n return self.evaluations.exposedWing.edges[1].point1.z - self.evaluations.chordIntersected.edges[1].length\n else:\n return (self.acW + self.longPosW) / 2 # first guess for a faster evaluation", "def length(self):\n return self.length2 ** 0.5", "def get_unstr_length(self):\n if self.unstr_length is None:\n return self.length()\n\n elif isinstance(self.unstr_length, str):\n return self.length() + float(self.unstr_length)\n\n return self.unstr_length", "def get_road_length(self):\r\n if self.road_length is None:\r\n return self.container.get_road_length()\r\n \r\n return self.road_length", "def get_length(self, ak_tpl: BKT) -> Optional[float]:\n ...", "def total_length(self):\n return abs(self.length)", "def length(self):\n return pyvista.Box(self.bounds).length", "def Length(self):\n return math.sqrt(self.x**2 + self.y**2 + self.z**2)", "def get_list_length(self):\r\n s = self.query('LIST:FREQ:POIN?')\r\n if s == None: return None\r\n return int(s)", "def length(self) -> float:\n n = len(self.milestones[0])//2\n third = 1.0/3.0\n def distance(x,y):\n cp0 = x[:n]\n cp1 = vectorops.madd(cp0,x[n:],third)\n cp3 = y[:n]\n cp2 = vectorops.madd(cp3,y[n:],-third)\n return third*vectorops.norm(x[n:]) + vectorops.distance(cp1,cp2) + third*vectorops.norm(y[n:])\n return Trajectory.length(self,distance)", "def maximumORFLength(self):\n return max(len(orf) for orf in self.ORFs())", "def length(self):\n return self.endpoints[0].distance_to(self.endpoints[1])", "def filmHeight(self):\r\n cls = mxs.classof(self._nativePointer)\r\n height = None\r\n if cls == mxs.VRayPhysicalCamera:\r\n\r\n # TODO: Why is that wrapped in a try except?\r\n try:\r\n height = self._nativePointer.film_height\r\n except AttributeError:\r\n pass\r\n\r\n elif cls == mxs.Physical:\r\n height = self._nativePointer.film_height_mm\r\n\r\n if not height:\r\n # If we failed to get a width from a camera, return the scene aperture setting.\r\n height = self.filmWidth() * (mxs.renderPixelAspect / mxs.getRendImageAspect())\r\n\r\n return height", "def getLength(self):\n return None", "def getLength(self):\n return None", "def TF_wavelength(self):\n return int(self.ask(self.headStr('TF')+'TWL?'))", "def edge_length(self):\n if self.edge_length_l is not None:\n return self.edge_length_l\n else:\n self.edge_length_l = (2 * self.radius * math.sin(math.pi / self.vert_count))\n return self.edge_length_l", "def length(self):\n if self._length_cache is None:\n cls = type(self)\n func = cls._length_extraction_fn()\n preprocessed_func = cls.preprocess_func(func)\n self._length_cache = self.apply(preprocessed_func)\n return self._length_cache", "def length(self):\n return self.length", "def get_path_length(self) :\n return self.path_length", "def length(self):\n if self.running:\n return ZERO_TIME\n else:\n return self.end - self.start", "def getFar(self):\n return self.light.node().getLens().getFar()", "def filmWidth(self):\r\n cls = mxs.classof(self._nativePointer)\r\n width = None\r\n if cls == mxs.VRayPhysicalCamera:\r\n width = self._nativePointer.film_width\r\n\r\n elif cls == mxs.Physical:\r\n width = self._nativePointer.film_width_mm\r\n\r\n if not width:\r\n \r\n # If we failed to get a width from a camera, return the scene aperture setting.\r\n width = mxs.getRendApertureWidth()\r\n\r\n return width", "def length(self):\n return float(np.max([self.width(),self.height()]))", "def calculate_length(self):\n raise NotImplementedError", "def get_length(self):\r\n return len(self.tweets)", "def TL_wavelength(self):\n return int(self.ask(self.headStr('TL')+'TWL?'))", "def get_length(self, ak_spec: Union[str, BKT]) -> Optional[float]:\n ...", "def get_angle_of_view(focal_length, crop_factor=FULL_FRAME):\n d = 36 # mm, full-frame\n d /= crop_factor\n alpha = 2 * math.atan(d/(2*focal_length))\n return alpha", "def length(self):\n return math.sqrt(self.x * self.x + self.y * self.y)", "def length(self):\n return self._length", "def length(self):\n return self._length", "def get_list_length(self):\r\n s = self.query('SOUR1:LIST:FREQ:POIN?')\r\n if s == None: return None\r\n return int(s)", "def get_list_length(self):\r\n s = self.query('SOUR1:LIST:FREQ:POIN?')\r\n if s == None: return None\r\n return int(s)", "def get_length(self):\n return self.run_command('get_length')[0]", "def _set_length(self):\n if self.nb_points <= 1:\n self.length = 0\n else:\n ldiff_degree = self.coord_list[1:] - self.coord_list[:-1]\n ldiff_meter = ldiff_degree * np.pi * EQUATORIAL_EARTH_RADIUS / 180\n ldiff_meter[:, 0] *= np.cos(self.mean_pos[1] * np.pi / 180)\n self.length = np.sum(\n np.sqrt(ldiff_meter[:, 0] ** 2 + ldiff_meter[:, 1] ** 2)\n )", "def Length(self):\n xyza = self.ga_ref.get_position() + self.wa\n xyzb = self.gb_ref.get_position() + self.wb\n if self.gc is not None:\n xyzc = self.gc_ref.get_position() + self.wc\n xa, ya, za = xyza\n length = self._integrate(\n xyza - xa,\n xyzb - ya,\n xyzc - za,\n )\n else:\n length = np.linalg.norm(xyzb - xyza)\n return length", "def length(self):\n return self._info.length # pylint: disable=E1101", "def getLengthAlien(self):\n return len(self._aliens)", "def length(self):\r\n\r\n return math.sqrt(self*self)", "def getLength(self):\n return self.vector.norm", "def length(self):\n return self.__length", "def length(self):\n return self.__length", "def zone_width(self):\n return self._zone_width", "def length(self):\n\n return self._length", "def getLength(self):\n return _libsbml.XMLAttributes_getLength(self)", "def getLen(self):\n return self.len", "def bspb_focalLengthUpdate():\n if pm.windows.headsUpDisplay('focalLengthUpdateEXP', q=True, ex=True):\n pm.delete('focalLengthUpdateEXP')\n pm.expression(s='headsUpDisplay -r \"focalLengthHUD\";', n='focalLengthUpdateEXP', ae=1, uc='all')", "def getFov(self):\n return self.light.node().getLens().getFov()", "def length(self):\n if not hasattr(self, '_length'):\n lengths = np.linalg.norm(np.diff(self.points, axis=0), axis=1)\n self._segment_lengths = lengths\n self._length = lengths.sum()\n return self._length", "def calc_length(self):\n return AtomMath.length(self.atom1.position - self.atom2.position)", "def length(self):\n return math.sqrt(\n (self.endpoint_a.northing - self.endpoint_b.northing) ** 2 +\n (self.endpoint_a.easting - self.endpoint_b.easting) ** 2\n )", "def computeDirLength(self):\n self.length , uv = dirAndLength(self.pointN, self.point1)\n self.unitv = uv\n return uv", "def getTotalLength(self):\n return self.length", "def full_frame_length(self):\n return self.height * self.width * 3", "def namelength(self):\n return self[\"namelength\"]", "def get_track_length(track_path):\n track_extension = os.path.splitext(track_path)[1]\n if track_extension:\n try:\n mutagen_track = File(track_path)\n track_total_length = mutagen_track.info.length\n except:\n track_total_length = 0\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=f\"Audio file incorrect : {track_path}\")\n finally:\n track_length_formated = strftime(\n '%M:%S', gmtime(track_total_length))\n track_length_label.configure(text=track_length_formated)\n track_pos_slider.configure(to=track_total_length)\n return track_total_length", "def get_length(self):\n return self._select_interface(self._rc_get_length,\n self._http_get_length)", "def ft(self):\n return self._ft", "def total_length(self):\n return self.length", "def get_list_length(self):\r\n _debug('simq03b_api.get_list_length')\r\n \r\n s = self.query('SOUR1:LIST:FREQ:POIN?')\r\n if s == None: return None\r\n return int(s)", "def length(self):\n return self.n * self.t.length()", "def length(self) -> ir.IntegerValue:\n return ops.MapLength(self).to_expr()", "def __abs__(self):\n return self.length", "def get_length(self, px, py):\n idx = px + py * self.width\n return len(self._rotations_buffer[idx])", "def Length(self) -> int:", "def Length(self) -> int:", "def getWindowLength(f0=10e3, fs=2.5e6, windfunc='blackman', error=0.1):\n # lowest detectable frequency by window\n ldf = f0 * error\n\n if windfunc == 'Rectangular':\n M = int(fs / ldf)\n elif windfunc in ('Bartlett', 'Hanning', 'Hamming'):\n M = int(4 * (fs / ldf))\n elif windfunc == 'blackman':\n M = int(6 * (fs / ldf))\n else:\n raise ValueError('Not a valid windowing function.')\n\n return M", "def getLength(self):\n return self.n" ]
[ "0.7237808", "0.6786899", "0.6465912", "0.63727885", "0.6352939", "0.62659967", "0.61646885", "0.616444", "0.61160403", "0.6020924", "0.5934602", "0.5932981", "0.59197545", "0.5891779", "0.588042", "0.58679926", "0.584611", "0.58384174", "0.58230686", "0.5822641", "0.5822641", "0.5792508", "0.5785025", "0.57838994", "0.5760595", "0.5746466", "0.57397634", "0.57205796", "0.571677", "0.5704886", "0.5702491", "0.56714946", "0.5665451", "0.5645467", "0.56437665", "0.56348485", "0.56137687", "0.55941147", "0.5590532", "0.5571991", "0.55626273", "0.55620104", "0.55570215", "0.5546112", "0.5532551", "0.5532551", "0.55304664", "0.5523874", "0.552371", "0.5508641", "0.54942715", "0.54661185", "0.54656863", "0.54496455", "0.5430939", "0.5427295", "0.54236674", "0.5417234", "0.5402647", "0.5394534", "0.5393492", "0.5383069", "0.5383069", "0.5382589", "0.5382589", "0.5382035", "0.5356461", "0.535187", "0.5348069", "0.5338397", "0.5337886", "0.5317383", "0.53170526", "0.53170526", "0.53149885", "0.53026015", "0.5295603", "0.52830654", "0.5276201", "0.52734417", "0.527062", "0.52702105", "0.52632165", "0.52553546", "0.52446485", "0.5241493", "0.52375567", "0.5209648", "0.5202467", "0.51955056", "0.5193148", "0.5179871", "0.5177486", "0.51762414", "0.51704115", "0.5165728", "0.51638186", "0.51638186", "0.5162487", "0.5161589" ]
0.8241378
0