Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def wav2complex(filename):
"""
Return a complex signal vector from a wav file that was used to store
the real (I) and imaginary (Q) values of a complex signal ndarray.
The rate is included as means of recalling the original signal sample
rate.
fs,x = wav2complex(filename)
Mark Wickert April 2014
"""
fs, x_LR_cols = ss.from_wav(filename)
x = x_LR_cols[:,0] + 1j*x_LR_cols[:,1]
return fs,x
|
def FIR_header(fname_out, h):
"""
Write FIR Filter Header Files
Mark Wickert February 2015
"""
M = len(h)
N = 3 # Coefficients per line
f = open(fname_out, 'wt')
f.write('//define a FIR coefficient Array\n\n')
f.write('#include <stdint.h>\n\n')
f.write('#ifndef M_FIR\n')
f.write('#define M_FIR %d\n' % M)
f.write('#endif\n')
f.write('/************************************************************************/\n');
f.write('/* FIR Filter Coefficients */\n');
f.write('float32_t h_FIR[M_FIR] = {')
kk = 0;
for k in range(M):
# k_mod = k % M
if (kk < N - 1) and (k < M - 1):
f.write('%15.12f,' % h[k])
kk += 1
elif (kk == N - 1) & (k < M - 1):
f.write('%15.12f,\n' % h[k])
if k < M:
f.write(' ')
kk = 0
else:
f.write('%15.12f' % h[k])
f.write('};\n')
f.write('/************************************************************************/\n')
f.close()
|
def FIR_fix_header(fname_out, h):
"""
Write FIR Fixed-Point Filter Header Files
Mark Wickert February 2015
"""
M = len(h)
hq = int16(rint(h * 2 ** 15))
N = 8 # Coefficients per line
f = open(fname_out, 'wt')
f.write('//define a FIR coefficient Array\n\n')
f.write('#include <stdint.h>\n\n')
f.write('#ifndef M_FIR\n')
f.write('#define M_FIR %d\n' % M)
f.write('#endif\n')
f.write('/************************************************************************/\n');
f.write('/* FIR Filter Coefficients */\n');
f.write('int16_t h_FIR[M_FIR] = {')
kk = 0;
for k in range(M):
# k_mod = k % M
if (kk < N - 1) and (k < M - 1):
f.write('%5d,' % hq[k])
kk += 1
elif (kk == N - 1) & (k < M - 1):
f.write('%5d,\n' % hq[k])
if k < M:
f.write(' ')
kk = 0
else:
f.write('%5d' % hq[k])
f.write('};\n')
f.write('/************************************************************************/\n')
f.close()
|
def IIR_sos_header(fname_out, SOS_mat):
"""
Write IIR SOS Header Files
File format is compatible with CMSIS-DSP IIR
Directform II Filter Functions
Mark Wickert March 2015-October 2016
"""
Ns, Mcol = SOS_mat.shape
f = open(fname_out, 'wt')
f.write('//define a IIR SOS CMSIS-DSP coefficient array\n\n')
f.write('#include <stdint.h>\n\n')
f.write('#ifndef STAGES\n')
f.write('#define STAGES %d\n' % Ns)
f.write('#endif\n')
f.write('/*********************************************************/\n');
f.write('/* IIR SOS Filter Coefficients */\n');
f.write('float32_t ba_coeff[%d] = { //b0,b1,b2,a1,a2,... by stage\n' % (5 * Ns))
for k in range(Ns):
if (k < Ns - 1):
f.write(' %+-13e, %+-13e, %+-13e,\n' % \
(SOS_mat[k, 0], SOS_mat[k, 1], SOS_mat[k, 2]))
f.write(' %+-13e, %+-13e,\n' % \
(-SOS_mat[k, 4], -SOS_mat[k, 5]))
else:
f.write(' %+-13e, %+-13e, %+-13e,\n' % \
(SOS_mat[k, 0], SOS_mat[k, 1], SOS_mat[k, 2]))
f.write(' %+-13e, %+-13e\n' % \
(-SOS_mat[k, 4], -SOS_mat[k, 5]))
# for k in range(Ns):
# if (k < Ns-1):
# f.write(' %15.12f, %15.12f, %15.12f,\n' % \
# (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))
# f.write(' %15.12f, %15.12f,\n' % \
# (-SOS_mat[k,4],-SOS_mat[k,5]))
# else:
# f.write(' %15.12f, %15.12f, %15.12f,\n' % \
# (SOS_mat[k,0],SOS_mat[k,1],SOS_mat[k,2]))
# f.write(' %15.12f, %15.12f\n' % \
# (-SOS_mat[k,4],-SOS_mat[k,5]))
f.write('};\n')
f.write('/*********************************************************/\n')
f.close()
|
def CA_code_header(fname_out, Nca):
"""
Write 1023 bit CA (Gold) Code Header Files
Mark Wickert February 2015
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
ca = loadtxt(dir_path + '/ca1thru37.txt', dtype=int16, usecols=(Nca - 1,), unpack=True)
M = 1023 # code period
N = 23 # code bits per line
Sca = 'ca' + str(Nca)
f = open(fname_out, 'wt')
f.write('//define a CA code\n\n')
f.write('#include <stdint.h>\n\n')
f.write('#ifndef N_CA\n')
f.write('#define N_CA %d\n' % M)
f.write('#endif\n')
f.write('/*******************************************************************/\n');
f.write('/* 1023 Bit CA Gold Code %2d */\n' \
% Nca);
f.write('int8_t ca%d[N_CA] = {' % Nca)
kk = 0;
for k in range(M):
# k_mod = k % M
if (kk < N - 1) and (k < M - 1):
f.write('%d,' % ca[k])
kk += 1
elif (kk == N - 1) & (k < M - 1):
f.write('%d,\n' % ca[k])
if k < M:
if Nca < 10:
f.write(' ')
else:
f.write(' ')
kk = 0
else:
f.write('%d' % ca[k])
f.write('};\n')
f.write('/*******************************************************************/\n')
f.close()
|
def farrow_resample(x, fs_old, fs_new):
"""
Parameters
----------
x : Input list representing a signal vector needing resampling.
fs_old : Starting/old sampling frequency.
fs_new : New sampling frequency.
Returns
-------
y : List representing the signal vector resampled at the new frequency.
Notes
-----
A cubic interpolator using a Farrow structure is used resample the
input data at a new sampling rate that may be an irrational multiple of
the input sampling rate.
Time alignment can be found for a integer value M, found with the following:
.. math:: f_{s,out} = f_{s,in} (M - 1) / M
The filter coefficients used here and a more comprehensive listing can be
found in H. Meyr, M. Moeneclaey, & S. Fechtel, "Digital Communication
Receivers," Wiley, 1998, Chapter 9, pp. 521-523.
Another good paper on variable interpolators is: L. Erup, F. Gardner, &
R. Harris, "Interpolation in Digital Modems--Part II: Implementation
and Performance," IEEE Comm. Trans., June 1993, pp. 998-1008.
A founding paper on the subject of interpolators is: C. W. Farrow, "A
Continuously variable Digital Delay Element," Proceedings of the IEEE
Intern. Symp. on Circuits Syst., pp. 2641-2645, June 1988.
Mark Wickert April 2003, recoded to Python November 2013
Examples
--------
The following example uses a QPSK signal with rc pulse shaping, and time alignment at M = 15.
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm import digitalcom as dc
>>> Ns = 8
>>> Rs = 1.
>>> fsin = Ns*Rs
>>> Tsin = 1 / fsin
>>> N = 200
>>> ts = 1
>>> x, b, data = dc.MPSK_bb(N+12, Ns, 4, 'rc')
>>> x = x[12*Ns:]
>>> xxI = x.real
>>> M = 15
>>> fsout = fsin * (M-1) / M
>>> Tsout = 1. / fsout
>>> xI = dc.farrow_resample(xxI, fsin, fsin)
>>> tx = arange(0, len(xI)) / fsin
>>> yI = dc.farrow_resample(xxI, fsin, fsout)
>>> ty = arange(0, len(yI)) / fsout
>>> plt.plot(tx - Tsin, xI)
>>> plt.plot(tx[ts::Ns] - Tsin, xI[ts::Ns], 'r.')
>>> plt.plot(ty[ts::Ns] - Tsout, yI[ts::Ns], 'g.')
>>> plt.title(r'Impact of Asynchronous Sampling')
>>> plt.ylabel(r'Real Signal Amplitude')
>>> plt.xlabel(r'Symbol Rate Normalized Time')
>>> plt.xlim([0, 20])
>>> plt.grid()
>>> plt.show()
"""
#Cubic interpolator over 4 samples.
#The base point receives a two sample delay.
v3 = signal.lfilter([1/6., -1/2., 1/2., -1/6.],[1],x)
v2 = signal.lfilter([0, 1/2., -1, 1/2.],[1],x)
v1 = signal.lfilter([-1/6., 1, -1/2., -1/3.],[1],x)
v0 = signal.lfilter([0, 0, 1],[1],x)
Ts_old = 1/float(fs_old)
Ts_new = 1/float(fs_new)
T_end = Ts_old*(len(x)-3)
t_new = np.arange(0,T_end+Ts_old,Ts_new)
if x.dtype == np.dtype('complex128') or x.dtype == np.dtype('complex64'):
y = np.zeros(len(t_new)) + 1j*np.zeros(len(t_new))
else:
y = np.zeros(len(t_new))
for n in range(len(t_new)):
n_old = int(np.floor(n*Ts_new/Ts_old))
mu = (n*Ts_new - n_old*Ts_old)/Ts_old
# Combine outputs
y[n] = ((v3[n_old+1]*mu + v2[n_old+1])*mu
+ v1[n_old+1])*mu + v0[n_old+1]
return y
|
def eye_plot(x,L,S=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
None : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
1000 bits at 10 samples per bit with 'rc' shaping.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> x,b, data = dc.NRZ_bits(1000,10,'rc')
>>> dc.eye_plot(x,20,60)
>>> plt.show()
"""
plt.figure(figsize=(6,4))
idx = np.arange(0,L+1)
plt.plot(idx,x[S:S+L+1],'b')
k_max = int((len(x) - S)/L)-1
for k in range(1,k_max):
plt.plot(idx,x[S+k*L:S+L+1+k*L],'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
|
def scatter(x,Ns,start):
"""
Sample a baseband digital communications waveform at the symbol spacing.
Parameters
----------
x : ndarray of the input digital comm signal
Ns : number of samples per symbol (bit)
start : the array index to start the sampling
Returns
-------
xI : ndarray of the real part of x following sampling
xQ : ndarray of the imaginary part of x following sampling
Notes
-----
Normally the signal is complex, so the scatter plot contains
clusters at point in the complex plane. For a binary signal
such as BPSK, the point centers are nominally +/-1 on the real
axis. Start is used to eliminate transients from the FIR
pulse shaping filters from appearing in the scatter plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> x,b, data = dc.NRZ_bits(1000,10,'rc')
Add some noise so points are now scattered about +/-1.
>>> y = dc.cpx_AWGN(x,20,10)
>>> yI,yQ = dc.scatter(y,10,60)
>>> plt.plot(yI,yQ,'.')
>>> plt.grid()
>>> plt.xlabel('In-Phase')
>>> plt.ylabel('Quadrature')
>>> plt.axis('equal')
>>> plt.show()
"""
xI = np.real(x[start::Ns])
xQ = np.imag(x[start::Ns])
return xI, xQ
|
def strips(x,Nx,fig_size=(6,4)):
"""
Plots the contents of real ndarray x as a vertical stacking of
strips, each of length Nx. The default figure size is (6,4) inches.
The yaxis tick labels are the starting index of each strip. The red
dashed lines correspond to zero amplitude in each strip.
strips(x,Nx,my_figsize=(6,4))
Mark Wickert April 2014
"""
plt.figure(figsize=fig_size)
#ax = fig.add_subplot(111)
N = len(x)
Mx = int(np.ceil(N/float(Nx)))
x_max = np.max(np.abs(x))
for kk in range(Mx):
plt.plot(np.array([0,Nx]),-kk*Nx*np.array([1,1]),'r-.')
plt.plot(x[kk*Nx:(kk+1)*Nx]/x_max*0.4*Nx-kk*Nx,'b')
plt.axis([0,Nx,-Nx*(Mx-0.5),Nx*0.5])
plt.yticks(np.arange(0,-Nx*Mx,-Nx),np.arange(0,Nx*Mx,Nx))
plt.xlabel('Index')
plt.ylabel('Strip Amplitude and Starting Index')
return 0
|
def bit_errors(tx_data,rx_data,Ncorr = 1024,Ntransient = 0):
"""
Count bit errors between a transmitted and received BPSK signal.
Time delay between streams is detected as well as ambiquity resolution
due to carrier phase lock offsets of :math:`k*\\pi`, k=0,1.
The ndarray tx_data is Tx 0/1 bits as real numbers I.
The ndarray rx_data is Rx 0/1 bits as real numbers I.
Note: Ncorr needs to be even
"""
# Remove Ntransient symbols and level shift to {-1,+1}
tx_data = 2*tx_data[Ntransient:]-1
rx_data = 2*rx_data[Ntransient:]-1
# Correlate the first Ncorr symbols at four possible phase rotations
R0 = np.fft.ifft(np.fft.fft(rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R1 = np.fft.ifft(np.fft.fft(-1*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
#Place the zero lag value in the center of the array
R0 = np.fft.fftshift(R0)
R1 = np.fft.fftshift(R1)
R0max = np.max(R0.real)
R1max = np.max(R1.real)
R = np.array([R0max,R1max])
Rmax = np.max(R)
kphase_max = np.where(R == Rmax)[0]
kmax = kphase_max[0]
# Correlation lag value is zero at the center of the array
if kmax == 0:
lagmax = np.where(R0.real == Rmax)[0] - Ncorr/2
elif kmax == 1:
lagmax = np.where(R1.real == Rmax)[0] - Ncorr/2
taumax = lagmax[0]
print('kmax = %d, taumax = %d' % (kmax, taumax))
# Count bit and symbol errors over the entire input ndarrays
# Begin by making tx and rx length equal and apply phase rotation to rx
if taumax < 0:
tx_data = tx_data[int(-taumax):]
tx_data = tx_data[:min(len(tx_data),len(rx_data))]
rx_data = (-1)**kmax*rx_data[:len(tx_data)]
else:
rx_data = (-1)**kmax * rx_data[int(taumax):]
rx_data = rx_data[:min(len(tx_data),len(rx_data))]
tx_data = tx_data[:len(rx_data)]
# Convert to 0's and 1's
Bit_count = len(tx_data)
tx_I = np.int16((tx_data.real + 1)/2)
rx_I = np.int16((rx_data.real + 1)/2)
Bit_errors = tx_I ^ rx_I
return Bit_count,np.sum(Bit_errors)
|
def QAM_bb(N_symb,Ns,mod_type='16qam',pulse='rect',alpha=0.35):
"""
QAM_BB_TX: A complex baseband transmitter
x,b,tx_data = QAM_bb(K,Ns,M)
//////////// Inputs //////////////////////////////////////////////////
N_symb = the number of symbols to process
Ns = number of samples per symbol
mod_type = modulation type: qpsk, 16qam, 64qam, or 256qam
alpha = squareroot raised codine pulse shape bandwidth factor.
For DOCSIS alpha = 0.12 to 0.18. In general alpha can
range over 0 < alpha < 1.
SRC = pulse shape: 0-> rect, 1-> SRC
//////////// Outputs /////////////////////////////////////////////////
x = complex baseband digital modulation
b = transmitter shaping filter, rectangle or SRC
tx_data = xI+1j*xQ = inphase symbol sequence +
1j*quadrature symbol sequence
Mark Wickert November 2014
"""
# Filter the impulse train waveform with a square root raised
# cosine pulse shape designed as follows:
# Design the filter to be of duration 12 symbols and
# fix the excess bandwidth factor at alpha = 0.35
# If SRC = 0 use a simple rectangle pulse shape
if pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,6)
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,6)
elif pulse.lower() == 'rect':
b = np.ones(int(Ns)) #alt. rect. pulse shape
else:
raise ValueError('pulse shape must be src, rc, or rect')
if mod_type.lower() == 'qpsk':
M = 2 # bits per symbol
elif mod_type.lower() == '16qam':
M = 4
elif mod_type.lower() == '64qam':
M = 8
elif mod_type.lower() == '256qam':
M = 16
else:
raise ValueError('Unknown mod_type')
# Create random symbols for the I & Q channels
xI = np.random.randint(0,M,N_symb)
xI = 2*xI - (M-1)
xQ = np.random.randint(0,M,N_symb)
xQ = 2*xQ - (M-1)
# Employ differential encoding to counter phase ambiquities
# Create a zero padded (interpolated by Ns) symbol sequence.
# This prepares the symbol sequence for arbitrary pulse shaping.
symbI = np.hstack((xI.reshape(N_symb,1),np.zeros((N_symb,int(Ns)-1))))
symbI = symbI.flatten()
symbQ = np.hstack((xQ.reshape(N_symb,1),np.zeros((N_symb,int(Ns)-1))))
symbQ = symbQ.flatten()
symb = symbI + 1j*symbQ
if M > 2:
symb /= (M-1)
# The impulse train waveform contains one pulse per Ns (or Ts) samples
# imp_train = [ones(K,1) zeros(K,Ns-1)]';
# imp_train = reshape(imp_train,Ns*K,1);
# Filter the impulse train signal
x = signal.lfilter(b,1,symb)
x = x.flatten() # out is a 1D vector
# Scale shaping filter to have unity DC gain
b = b/sum(b)
return x, b, xI+1j*xQ
|
def QAM_SEP(tx_data,rx_data,mod_type,Ncorr = 1024,Ntransient = 0,SEP_disp=True):
"""
Nsymb, Nerr, SEP_hat =
QAM_symb_errors(tx_data,rx_data,mod_type,Ncorr = 1024,Ntransient = 0)
Count symbol errors between a transmitted and received QAM signal.
The received symbols are assumed to be soft values on a unit square.
Time delay between streams is detected.
The ndarray tx_data is Tx complex symbols.
The ndarray rx_data is Rx complex symbols.
Note: Ncorr needs to be even
"""
#Remove Ntransient symbols and makes lengths equal
tx_data = tx_data[Ntransient:]
rx_data = rx_data[Ntransient:]
Nmin = min([len(tx_data),len(rx_data)])
tx_data = tx_data[:Nmin]
rx_data = rx_data[:Nmin]
# Perform level translation and quantize the soft symbol values
if mod_type.lower() == 'qpsk':
M = 2 # bits per symbol
elif mod_type.lower() == '16qam':
M = 4
elif mod_type.lower() == '64qam':
M = 8
elif mod_type.lower() == '256qam':
M = 16
else:
raise ValueError('Unknown mod_type')
rx_data = np.rint((M-1)*(rx_data + (1+1j))/2.)
# Fix-up edge points real part
s1r = np.nonzero(np.ravel(rx_data.real > M - 1))[0]
s2r = np.nonzero(np.ravel(rx_data.real < 0))[0]
rx_data.real[s1r] = (M - 1)*np.ones(len(s1r))
rx_data.real[s2r] = np.zeros(len(s2r))
# Fix-up edge points imag part
s1i = np.nonzero(np.ravel(rx_data.imag > M - 1))[0]
s2i = np.nonzero(np.ravel(rx_data.imag < 0))[0]
rx_data.imag[s1i] = (M - 1)*np.ones(len(s1i))
rx_data.imag[s2i] = np.zeros(len(s2i))
rx_data = 2*rx_data - (M - 1)*(1 + 1j)
#Correlate the first Ncorr symbols at four possible phase rotations
R0,lags = xcorr(rx_data,tx_data,Ncorr)
R1,lags = xcorr(rx_data*(1j)**1,tx_data,Ncorr)
R2,lags = xcorr(rx_data*(1j)**2,tx_data,Ncorr)
R3,lags = xcorr(rx_data*(1j)**3,tx_data,Ncorr)
#Place the zero lag value in the center of the array
R0max = np.max(R0.real)
R1max = np.max(R1.real)
R2max = np.max(R2.real)
R3max = np.max(R3.real)
R = np.array([R0max,R1max,R2max,R3max])
Rmax = np.max(R)
kphase_max = np.where(R == Rmax)[0]
kmax = kphase_max[0]
#Find correlation lag value is zero at the center of the array
if kmax == 0:
lagmax = lags[np.where(R0.real == Rmax)[0]]
elif kmax == 1:
lagmax = lags[np.where(R1.real == Rmax)[0]]
elif kmax == 2:
lagmax = lags[np.where(R2.real == Rmax)[0]]
elif kmax == 3:
lagmax = lags[np.where(R3.real == Rmax)[0]]
taumax = lagmax[0]
if SEP_disp:
print('Phase ambiquity = (1j)**%d, taumax = %d' % (kmax, taumax))
#Count symbol errors over the entire input ndarrays
#Begin by making tx and rx length equal and apply
#phase rotation to rx_data
if taumax < 0:
tx_data = tx_data[-taumax:]
tx_data = tx_data[:min(len(tx_data),len(rx_data))]
rx_data = (1j)**kmax*rx_data[:len(tx_data)]
else:
rx_data = (1j)**kmax*rx_data[taumax:]
rx_data = rx_data[:min(len(tx_data),len(rx_data))]
tx_data = tx_data[:len(rx_data)]
#Convert QAM symbol difference to symbol errors
errors = np.int16(abs(rx_data-tx_data))
# Detect symbols errors
# Could decode bit errors from symbol index difference
idx = np.nonzero(np.ravel(errors != 0))[0]
if SEP_disp:
print('Symbols = %d, Errors %d, SEP = %1.2e' \
% (len(errors), len(idx), len(idx)/float(len(errors))))
return len(errors), len(idx), len(idx)/float(len(errors))
|
def GMSK_bb(N_bits, Ns, MSK = 0,BT = 0.35):
"""
MSK/GMSK Complex Baseband Modulation
x,data = gmsk(N_bits, Ns, BT = 0.35, MSK = 0)
Parameters
----------
N_bits : number of symbols processed
Ns : the number of samples per bit
MSK : 0 for no shaping which is standard MSK, MSK <> 0 --> GMSK is generated.
BT : premodulation Bb*T product which sets the bandwidth of the Gaussian lowpass filter
Mark Wickert Python version November 2014
"""
x, b, data = NRZ_bits(N_bits,Ns)
# pulse length 2*M*Ns
M = 4
n = np.arange(-M*Ns,M*Ns+1)
p = np.exp(-2*np.pi**2*BT**2/np.log(2)*(n/float(Ns))**2);
p = p/np.sum(p);
# Gaussian pulse shape if MSK not zero
if MSK != 0:
x = signal.lfilter(p,1,x)
y = np.exp(1j*np.pi/2*np.cumsum(x)/Ns)
return y, data
|
def MPSK_bb(N_symb,Ns,M,pulse='rect',alpha = 0.25,MM=6):
"""
Generate a complex baseband MPSK signal with pulse shaping.
Parameters
----------
N_symb : number of MPSK symbols to produce
Ns : the number of samples per bit,
M : MPSK modulation order, e.g., 4, 8, 16, ...
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
MM : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the MPSK signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> from sk_dsp_comm import digitalcom as dc
>>> import scipy.signal as signal
>>> import matplotlib.pyplot as plt
>>> x,b,data = dc.MPSK_bb(500,10,8,'src',0.35)
>>> # Matched filter received signal x
>>> y = signal.lfilter(b,1,x)
>>> plt.plot(y.real[12*10:],y.imag[12*10:])
>>> plt.xlabel('In-Phase')
>>> plt.ylabel('Quadrature')
>>> plt.axis('equal')
>>> # Sample once per symbol
>>> plt.plot(y.real[12*10::10],y.imag[12*10::10],'r.')
>>> plt.show()
"""
data = np.random.randint(0,M,N_symb)
xs = np.exp(1j*2*np.pi/M*data)
x = np.hstack((xs.reshape(N_symb,1),np.zeros((N_symb,int(Ns)-1))))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(int(Ns))
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,MM)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,MM)
else:
raise ValueError('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
if M == 4:
x = x*np.exp(1j*np.pi/4); # For QPSK points in quadrants
return x,b/float(Ns),data
|
def QPSK_rx(fc,N_symb,Rs,EsN0=100,fs=125,lfsr_len=10,phase=0,pulse='src'):
"""
This function generates
"""
Ns = int(np.round(fs/Rs))
print('Ns = ', Ns)
print('Rs = ', fs/float(Ns))
print('EsN0 = ', EsN0, 'dB')
print('phase = ', phase, 'degrees')
print('pulse = ', pulse)
x, b, data = QPSK_bb(N_symb,Ns,lfsr_len,pulse)
# Add AWGN to x
x = cpx_AWGN(x,EsN0,Ns)
n = np.arange(len(x))
xc = x*np.exp(1j*2*np.pi*fc/float(fs)*n) * np.exp(1j*phase)
return xc, b, data
|
def QPSK_BEP(tx_data,rx_data,Ncorr = 1024,Ntransient = 0):
"""
Count bit errors between a transmitted and received QPSK signal.
Time delay between streams is detected as well as ambiquity resolution
due to carrier phase lock offsets of :math:`k*\\frac{\\pi}{4}`, k=0,1,2,3.
The ndarray sdata is Tx +/-1 symbols as complex numbers I + j*Q.
The ndarray data is Rx +/-1 symbols as complex numbers I + j*Q.
Note: Ncorr needs to be even
"""
#Remove Ntransient symbols
tx_data = tx_data[Ntransient:]
rx_data = rx_data[Ntransient:]
#Correlate the first Ncorr symbols at four possible phase rotations
R0 = np.fft.ifft(np.fft.fft(rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R1 = np.fft.ifft(np.fft.fft(1j*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R2 = np.fft.ifft(np.fft.fft(-1*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
R3 = np.fft.ifft(np.fft.fft(-1j*rx_data,Ncorr)*
np.conj(np.fft.fft(tx_data,Ncorr)))
#Place the zero lag value in the center of the array
R0 = np.fft.fftshift(R0)
R1 = np.fft.fftshift(R1)
R2 = np.fft.fftshift(R2)
R3 = np.fft.fftshift(R3)
R0max = np.max(R0.real)
R1max = np.max(R1.real)
R2max = np.max(R2.real)
R3max = np.max(R3.real)
R = np.array([R0max,R1max,R2max,R3max])
Rmax = np.max(R)
kphase_max = np.where(R == Rmax)[0]
kmax = kphase_max[0]
#Correlation lag value is zero at the center of the array
if kmax == 0:
lagmax = np.where(R0.real == Rmax)[0] - Ncorr/2
elif kmax == 1:
lagmax = np.where(R1.real == Rmax)[0] - Ncorr/2
elif kmax == 2:
lagmax = np.where(R2.real == Rmax)[0] - Ncorr/2
elif kmax == 3:
lagmax = np.where(R3.real == Rmax)[0] - Ncorr/2
taumax = lagmax[0]
print('kmax = %d, taumax = %d' % (kmax, taumax))
# Count bit and symbol errors over the entire input ndarrays
# Begin by making tx and rx length equal and apply phase rotation to rx
if taumax < 0:
tx_data = tx_data[-taumax:]
tx_data = tx_data[:min(len(tx_data),len(rx_data))]
rx_data = 1j**kmax*rx_data[:len(tx_data)]
else:
rx_data = 1j**kmax*rx_data[taumax:]
rx_data = rx_data[:min(len(tx_data),len(rx_data))]
tx_data = tx_data[:len(rx_data)]
#Convert to 0's and 1's
S_count = len(tx_data)
tx_I = np.int16((tx_data.real + 1)/2)
tx_Q = np.int16((tx_data.imag + 1)/2)
rx_I = np.int16((rx_data.real + 1)/2)
rx_Q = np.int16((rx_data.imag + 1)/2)
I_errors = tx_I ^ rx_I
Q_errors = tx_Q ^ rx_Q
#A symbol errors occurs when I or Q or both are in error
S_errors = I_errors | Q_errors
#return 0
return S_count,np.sum(I_errors),np.sum(Q_errors),np.sum(S_errors)
|
def BPSK_tx(N_bits,Ns,ach_fc=2.0,ach_lvl_dB=-100,pulse='rect',alpha = 0.25,M=6):
"""
Generates biphase shift keyed (BPSK) transmitter with adjacent channel interference.
Generates three BPSK signals with rectangular or square root raised cosine (SRC)
pulse shaping of duration N_bits and Ns samples per bit. The desired signal is
centered on f = 0, which the adjacent channel signals to the left and right
are also generated at dB level relative to the desired signal. Used in the
digital communications Case Study supplement.
Parameters
----------
N_bits : the number of bits to simulate
Ns : the number of samples per bit
ach_fc : the frequency offset of the adjacent channel signals (default 2.0)
ach_lvl_dB : the level of the adjacent channel signals in dB (default -100)
pulse : the pulse shape 'rect' or 'src'
alpha : square root raised cosine pulse shape factor (default = 0.25)
M : square root raised cosine pulse truncation factor (default = 6)
Returns
-------
x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)
b : the transmit pulse shape
data0 : the data bits used to form the desired signal; used for error checking
Notes
-----
Examples
--------
>>> x,b,data0 = BPSK_tx(1000,10,'src')
"""
x0,b,data0 = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1p,b,data1p = NRZ_bits(N_bits,Ns,pulse,alpha,M)
x1m,b,data1m = NRZ_bits(N_bits,Ns,pulse,alpha,M)
n = np.arange(len(x0))
x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n)
x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n)
ach_lvl = 10**(ach_lvl_dB/20.)
return x0 + ach_lvl*(x1p + x1m), b, data0
|
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the
truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
See Also
--------
sqrt_rc_imp
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
Ten samples per symbol and :math:`\\alpha = 0.35`.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.digitalcom import rc_imp
>>> from numpy import arange
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
a = alpha
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
|
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor :math:`0 < \\alpha < 1` is required as well as the
truncation factor M which sets the pulse duration to be :math:`2*M*T_{symbol}`.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used to generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, thus having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
Ten samples per symbol and :math:`\\alpha = 0.35`.
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.digitalcom import sqrt_rc_imp
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
|
def RZ_bits(N_bits,Ns,pulse='rect',alpha = 0.25,M=6):
"""
Generate return-to-zero (RZ) data bits with pulse shaping.
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping.
Parameters
----------
N_bits : number of RZ {0,1} data bits to produce
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the RZ signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.digitalcom import RZ_bits
>>> x,b,data = RZ_bits(100,10)
>>> t = arange(len(x))
>>> plt.plot(t,x)
>>> plt.ylim([-0.01, 1.01])
>>> plt.show()
"""
data = np.random.randint(0,2,N_bits)
x = np.hstack((data.reshape(N_bits,1),np.zeros((N_bits,int(Ns)-1))))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(int(Ns))
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
print('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns),data
|
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> from numpy import log10
>>> x,b, data = dc.NRZ_bits(10000,10)
>>> Px,f = dc.my_psd(x,2**10,10)
>>> plt.plot(f, 10*log10(Px))
>>> plt.show()
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
|
def time_delay(x,D,N=4):
"""
A time varying time delay which takes advantage of the Farrow structure
for cubic interpolation:
y = time_delay(x,D,N = 3)
Note that D is an array of the same length as the input signal x. This
allows you to make the delay a function of time. If you want a constant
delay just use D*zeros(len(x)). The minimum delay allowable is one sample
or D = 1.0. This is due to the causal system nature of the Farrow
structure.
A founding paper on the subject of interpolators is: C. W. Farrow, "A
Continuously variable Digital Delay Element," Proceedings of the IEEE
Intern. Symp. on Circuits Syst., pp. 2641-2645, June 1988.
Mark Wickert, February 2014
"""
if type(D) == float or type(D) == int:
#Make sure D stays with in the tapped delay line bounds
if int(np.fix(D)) < 1:
print('D has integer part less than one')
exit(1)
if int(np.fix(D)) > N-2:
print('D has integer part greater than N - 2')
exit(1)
# Filter 4-tap input with four Farrow FIR filters
# Since the time delay is a constant, the LTI filter
# function from scipy.signal is convenient.
D_frac = D - np.fix(D)
Nd = int(np.fix(D))
b = np.zeros(Nd + 4)
# Load Lagrange coefficients into the last four FIR taps
b[Nd] = -(D_frac-1)*(D_frac-2)*(D_frac-3)/6.
b[Nd + 1] = D_frac*(D_frac-2)*(D_frac-3)/2.
b[Nd + 2] = -D_frac*(D_frac-1)*(D_frac-3)/2.
b[Nd + 3] = D_frac*(D_frac-1)*(D_frac-2)/6.
# Do all of the filtering in one step for this special case
# of a fixed delay.
y = signal.lfilter(b,[1],x)
else:
# Make sure D stays with in the tapped delay line bounds
if np.fix(np.min(D)) < 1:
print('D has integer part less than one')
exit(1)
if np.fix(np.max(D)) > N-2:
print('D has integer part greater than N - 2')
exit(1)
y = np.zeros(len(x))
X = np.zeros(N+1)
# Farrow filter tap weights
W3 = np.array([[1./6, -1./2, 1./2, -1./6]])
W2 = np.array([[0, 1./2, -1., 1./2]])
W1 = np.array([[-1./6, 1., -1./2, -1./3]])
W0 = np.array([[0, 0, 1., 0]])
for k in range(len(x)):
Nd = int(np.fix(D[k]))
mu = 1 - (D[k]-np.fix(D[k]))
# Form a row vector of signal samples, present and past values
X = np.hstack((np.array(x[k]), X[:-1]))
# Filter 4-tap input with four Farrow FIR filters
# Here numpy dot(A,B) performs the matrix multiply
# since the filter has time-varying coefficients
v3 = np.dot(W3,np.array(X[Nd-1:Nd+3]).T)
v2 = np.dot(W2,np.array(X[Nd-1:Nd+3]).T)
v1 = np.dot(W1,np.array(X[Nd-1:Nd+3]).T)
v0 = np.dot(W0,np.array(X[Nd-1:Nd+3]).T)
#Combine sub-filter outputs using mu = 1 - d
y[k] = ((v3[0]*mu + v2[0])*mu + v1[0])*mu + v0[0]
return y
|
def xcorr(x1,x2,Nlags):
"""
r12, k = xcorr(x1,x2,Nlags), r12 and k are ndarray's
Compute the energy normalized cross correlation between the sequences
x1 and x2. If x1 = x2 the cross correlation is the autocorrelation.
The number of lags sets how many lags to return centered about zero
"""
K = 2*(int(np.floor(len(x1)/2)))
X1 = fft.fft(x1[:K])
X2 = fft.fft(x2[:K])
E1 = sum(abs(x1[:K])**2)
E2 = sum(abs(x2[:K])**2)
r12 = np.fft.ifft(X1*np.conj(X2))/np.sqrt(E1*E2)
k = np.arange(K) - int(np.floor(K/2))
r12 = np.fft.fftshift(r12)
idx = np.nonzero(np.ravel(abs(k) <= Nlags))
return r12[idx], k[idx]
|
def PCM_encode(x,N_bits):
"""
Parameters
----------
x : signal samples to be PCM encoded
N_bits ; bit precision of PCM samples
Returns
-------
x_bits = encoded serial bit stream of 0/1 values. MSB first.
Mark Wickert, Mark 2015
"""
xq = np.int16(np.rint(x*2**(N_bits-1)))
x_bits = np.zeros((N_bits,len(xq)))
for k, xk in enumerate(xq):
x_bits[:,k] = to_bin(xk,N_bits)
# Reshape into a serial bit stream
x_bits = np.reshape(x_bits,(1,len(x)*N_bits),'F')
return np.int16(x_bits.flatten())
|
def to_bin(data, width):
"""
Convert an unsigned integer to a numpy binary array with the first
element the MSB and the last element the LSB.
"""
data_str = bin(data & (2**width-1))[2:].zfill(width)
return [int(x) for x in tuple(data_str)]
|
def from_bin(bin_array):
"""
Convert binary array back a nonnegative integer. The array length is
the bit width. The first input index holds the MSB and the last holds the LSB.
"""
width = len(bin_array)
bin_wgts = 2**np.arange(width-1,-1,-1)
return int(np.dot(bin_array,bin_wgts))
|
def PCM_decode(x_bits,N_bits):
"""
Parameters
----------
x_bits : serial bit stream of 0/1 values. The length of
x_bits must be a multiple of N_bits
N_bits : bit precision of PCM samples
Returns
-------
xhat : decoded PCM signal samples
Mark Wickert, March 2015
"""
N_samples = len(x_bits)//N_bits
# Convert serial bit stream into parallel words with each
# column holdingthe N_bits binary sample value
xrs_bits = x_bits.copy()
xrs_bits = np.reshape(xrs_bits,(N_bits,N_samples),'F')
# Convert N_bits binary words into signed integer values
xq = np.zeros(N_samples)
w = 2**np.arange(N_bits-1,-1,-1) # binary weights for bin
# to dec conversion
for k in range(N_samples):
xq[k] = np.dot(xrs_bits[:,k],w) - xrs_bits[0,k]*2**N_bits
return xq/2**(N_bits-1)
|
def mux_pilot_blocks(IQ_data, Np):
"""
Parameters
----------
IQ_data : a 2D array of input QAM symbols with the columns
representing the NF carrier frequencies and each
row the QAM symbols used to form an OFDM symbol
Np : the period of the pilot blocks; e.g., a pilot block is
inserted every Np OFDM symbols (Np-1 OFDM data symbols
of width Nf are inserted in between the pilot blocks.
Returns
-------
IQ_datap : IQ_data with pilot blocks inserted
See Also
--------
OFDM_tx
Notes
-----
A helper function called by :func:`OFDM_tx` that inserts pilot block for use
in channel estimation when a delay spread channel is present.
"""
N_OFDM = IQ_data.shape[0]
Npb = N_OFDM // (Np - 1)
N_OFDM_rem = N_OFDM - Npb * (Np - 1)
Nf = IQ_data.shape[1]
IQ_datap = np.zeros((N_OFDM + Npb + 1, Nf), dtype=np.complex128)
pilots = np.ones(Nf) # The pilot symbol is simply 1 + j0
for k in range(Npb):
IQ_datap[Np * k:Np * (k + 1), :] = np.vstack((pilots,
IQ_data[(Np - 1) * k:(Np - 1) * (k + 1), :]))
IQ_datap[Np * Npb:Np * (Npb + N_OFDM_rem), :] = np.vstack((pilots,
IQ_data[(Np - 1) * Npb:, :]))
return IQ_datap
|
def NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3):
"""
zz,e_tau = NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3)
z = complex baseband input signal at nominally Ns samples
per symbol
Ns = Nominal number of samples per symbol (Ts/T) in the symbol
tracking loop, often 4
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
I_ord = interpolator order, 1, 2, or 3
e_tau = the timing error e(k) input to the loop filter
Kp = The phase detector gain in the symbol tracking loop; for the
NDA algoithm used here always 1
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
# Loop filter parameters
K0 = -1.0 # The modulo 1 counter counts down so a sign change in loop
Kp = 1.0
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0
zz = np.zeros(len(z),dtype=np.complex128)
#zz = np.zeros(int(np.floor(len(z)/float(Ns))),dtype=np.complex128)
e_tau = np.zeros(len(z))
#e_tau = np.zeros(int(np.floor(len(z)/float(Ns))))
#z_TED_buff = np.zeros(Ns)
c1_buff = np.zeros(2*L+1)
vi = 0
CNT_next = 0
mu_next = 0
underflow = 0
epsilon = 0
mm = 1
z = np.hstack(([0], z))
for nn in range(1,Ns*int(np.floor(len(z)/float(Ns)-(Ns-1)))):
# Define variables used in linear interpolator control
CNT = CNT_next
mu = mu_next
if underflow == 1:
if I_ord == 1:
# Decimated interpolator output (piecewise linear)
z_interp = mu*z[nn] + (1 - mu)*z[nn-1]
elif I_ord == 2:
# Decimated interpolator output (piecewise parabolic)
# in Farrow form with alpha = 1/2
v2 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[1, -1, -1, 1])
v1 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[-1, 3, -1, -1])
v0 = z[nn]
z_interp = (mu*v2 + v1)*mu + v0
elif I_ord == 3:
# Decimated interpolator output (piecewise cubic)
# in Farrow form
v3 = np.sum(z[nn+2:nn-1-1:-1]*[1/6., -1/2., 1/2., -1/6.])
v2 = np.sum(z[nn+2:nn-1-1:-1]*[0, 1/2., -1, 1/2.])
v1 = np.sum(z[nn+2:nn-1-1:-1]*[-1/6., 1, -1/2., -1/3.])
v0 = z[nn]
z_interp = ((mu*v3 + v2)*mu + v1)*mu + v0
else:
print('Error: I_ord must 1, 2, or 3')
# Form TED output that is smoothed using 2*L+1 samples
# We need Ns interpolants for this TED: 0:Ns-1
c1 = 0
for kk in range(Ns):
if I_ord == 1:
# piecewise linear interp over Ns samples for TED
z_TED_interp = mu*z[nn+kk] + (1 - mu)*z[nn-1+kk]
elif I_ord == 2:
# piecewise parabolic in Farrow form with alpha = 1/2
v2 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1, -1, -1, 1])
v1 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1, 3, -1, -1])
v0 = z[nn+kk]
z_TED_interp = (mu*v2 + v1)*mu + v0
elif I_ord == 3:
# piecewise cubic in Farrow form
v3 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1/6., -1/2., 1/2., -1/6.])
v2 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[0, 1/2., -1, 1/2.])
v1 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1/6., 1, -1/2., -1/3.])
v0 = z[nn+kk]
z_TED_interp = ((mu*v3 + v2)*mu + v1)*mu + v0
else:
print('Error: I_ord must 1, 2, or 3')
c1 = c1 + np.abs(z_TED_interp)**2 * np.exp(-1j*2*np.pi/Ns*kk)
c1 = c1/Ns
# Update 2*L+1 length buffer for TED output smoothing
c1_buff = np.hstack(([c1], c1_buff[:-1]))
# Form the smoothed TED output
epsilon = -1/(2*np.pi)*np.angle(np.sum(c1_buff)/(2*L+1))
# Save symbol spaced (decimated to symbol rate) interpolants in zz
zz[mm] = z_interp
e_tau[mm] = epsilon # log the error to the output vector e
mm += 1
else:
# Simple zezo-order hold interpolation between symbol samples
# we just coast using the old value
#epsilon = 0
pass
vp = K1*epsilon # proportional component of loop filter
vi = vi + K2*epsilon # integrator component of loop filter
v = vp + vi # loop filter output
W = 1/float(Ns) + v # counter control word
# update registers
CNT_next = CNT - W # Update counter value for next cycle
if CNT_next < 0: # Test to see if underflow has occured
CNT_next = 1 + CNT_next # Reduce counter value modulo-1 if underflow
underflow = 1 # Set the underflow flag
mu_next = CNT/W # update mu
else:
underflow = 0
mu_next = mu
# Remove zero samples at end
zz = zz[:-(len(zz)-mm+1)]
# Normalize so symbol values have a unity magnitude
zz /=np.std(zz)
e_tau = e_tau[:-(len(e_tau)-mm+1)]
return zz, e_tau
|
def DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0):
"""
z_prime,a_hat,e_phi = DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0)
Decision directed carrier phase tracking
z = complex baseband PSK signal at one sample per symbol
M = The PSK modulation order, i.e., 2, 8, or 8.
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
type = Phase error detector type: 0 <> ML, 1 <> heuristic
z_prime = phase rotation output (like soft symbol values)
a_hat = the hard decision symbol values landing at the constellation
values
e_phi = the phase error e(k) into the loop filter
Ns = Nominal number of samples per symbol (Ts/T) in the carrier
phase tracking loop, almost always 1
Kp = The phase detector gain in the carrier phase tracking loop;
This value depends upon the algorithm type. For the ML scheme
described at the end of notes Chapter 9, A = 1, K 1/sqrt(2),
so Kp = sqrt(2).
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
Ns = 1
Kp = np.sqrt(2.) # for type 0
z_prime = np.zeros_like(z)
a_hat = np.zeros_like(z)
e_phi = np.zeros(len(z))
theta_h = np.zeros(len(z))
theta_hat = 0
# Tracking loop constants
K0 = 1;
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0;
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0;
# Initial condition
vi = 0
for nn in range(len(z)):
# Multiply by the phase estimate exp(-j*theta_hat[n])
z_prime[nn] = z[nn]*np.exp(-1j*theta_hat)
if M == 2:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*0
elif M == 4:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*np.sign(z_prime[nn].imag)
elif M == 8:
a_hat[nn] = np.angle(z_prime[nn])/(2*np.pi/8.)
# round to the nearest integer and fold to nonnegative
# integers; detection into M-levels with thresholds at mid points.
a_hat[nn] = np.mod(round(a_hat[nn]),8)
a_hat[nn] = np.exp(1j*2*np.pi*a_hat[nn]/8)
else:
raise ValueError('M must be 2, 4, or 8')
if type == 0:
# Maximum likelihood (ML)
e_phi[nn] = z_prime[nn].imag * a_hat[nn].real - \
z_prime[nn].real * a_hat[nn].imag
elif type == 1:
# Heuristic
e_phi[nn] = np.angle(z_prime[nn]) - np.angle(a_hat[nn])
else:
raise ValueError('Type must be 0 or 1')
vp = K1*e_phi[nn] # proportional component of loop filter
vi = vi + K2*e_phi[nn] # integrator component of loop filter
v = vp + vi # loop filter output
theta_hat = np.mod(theta_hat + v,2*np.pi)
theta_h[nn] = theta_hat # phase track output array
#theta_hat = 0 # for open-loop testing
# Normalize outputs to have QPSK points at (+/-)1 + j(+/-)1
#if M == 4:
# z_prime = z_prime*np.sqrt(2)
return z_prime, a_hat, e_phi, theta_h
|
def time_step(z,Ns,t_step,Nstep):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param t_step: in samples relative to Ns
:param Nstep: symbol sample location where the step turns on
:return: the one sample per symbol signal containing the phase step
Mark Wickert July 2014
"""
z_step = np.hstack((z[:Ns*Nstep], z[(Ns*Nstep+t_step):], np.zeros(t_step)))
return z_step
|
def phase_step(z,Ns,p_step,Nstep):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param p_step: size in radians of the phase step
:param Nstep: symbol sample location where the step turns on
:return: the one sample symbol signal containing the phase step
Mark Wickert July 2014
"""
nn = np.arange(0,len(z[::Ns]))
theta = np.zeros(len(nn))
idx = np.where(nn >= Nstep)
theta[idx] = p_step*np.ones(len(idx))
z_rot = z[::Ns]*np.exp(1j*theta)
return z_rot
|
def PLL1(theta,fs,loop_type,Kv,fn,zeta,non_lin):
"""
Baseband Analog PLL Simulation Model
:param theta: input phase deviation in radians
:param fs: sampling rate in sample per second or Hz
:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator
with lead compensation F(s) = (1 + s tau2)/(s tau1),
i.e., a type II, or 3, lowpass with lead compensation
F(s) = (1 + s tau2)/(1 + s tau1)
:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad
and K_LF = 1; the user can easily change this
:param fn: Loop natural frequency (loops 2 & 3) or cutoff
frquency (loop 1)
:param zeta: Damping factor for loops 2 & 3
:param non_lin: 0, linear phase detector; 1, sinusoidal phase detector
:return: theta_hat = Output phase estimate of the input theta in radians,
ev = VCO control voltage,
phi = phase error = theta - theta_hat
Notes
-----
Alternate input in place of natural frequency, fn, in Hz is
the noise equivalent bandwidth Bn in Hz.
Mark Wickert, April 2007 for ECE 5625/4625
Modified February 2008 and July 2014 for ECE 5675/4675
Python version August 2014
"""
T = 1/float(fs)
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
# Note Bn = K/4 Hz but K has units of rad/s
#fn = 4*Bn/(2*pi);
K = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = 4 *np.pi*zeta*fn # loop natural frequency in rad/s
tau2 = zeta/(np.pi*fn)
elif loop_type == 3:
# Second-order loop parameters for one-pole lowpass with
# phase lead correction.
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = Kv # Essentially the VCO gain sets the single-sided
# hold-in range in Hz, as it is assumed that Kp = 1
# and KLF = 1.
tau1 = K/((2*np.pi*fn)**2)
tau2 = 2*zeta/(2*np.pi*fn)*(1 - 2*np.pi*fn/K*1/(2*zeta))
else:
print('Loop type must be 1, 2, or 3')
# Initialize integration approximation filters
filt_in_last = 0; filt_out_last = 0;
vco_in_last = 0; vco_out = 0; vco_out_last = 0;
# Initialize working and final output vectors
n = np.arange(len(theta))
theta_hat = np.zeros_like(theta)
ev = np.zeros_like(theta)
phi = np.zeros_like(theta)
# Begin the simulation loop
for k in range(len(n)):
phi[k] = theta[k] - vco_out
if non_lin == 1:
# sinusoidal phase detector
pd_out = np.sin(phi[k])
else:
# Linear phase detector
pd_out = phi[k]
# Loop gain
gain_out = K/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = (1/tau2)*gain_out
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
elif loop_type == 3:
filt_in = (tau2/tau1)*gain_out - (1/tau1)*filt_out_last
u3 = filt_in + (1/tau2)*filt_out_last
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
else:
filt_out = gain_out;
# VCO
vco_in = filt_out
if loop_type == 3:
vco_in = u3
vco_out = vco_out_last + T/2*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
# Measured loop signals
ev[k] = vco_in
theta_hat[k] = vco_out
return theta_hat, ev, phi
|
def PLL_cbb(x,fs,loop_type,Kv,fn,zeta):
"""
Baseband Analog PLL Simulation Model
:param x: input phase deviation in radians
:param fs: sampling rate in sample per second or Hz
:param loop_type: 1, first-order loop filter F(s)=K_LF; 2, integrator
with lead compensation F(s) = (1 + s tau2)/(s tau1),
i.e., a type II, or 3, lowpass with lead compensation
F(s) = (1 + s tau2)/(1 + s tau1)
:param Kv: VCO gain in Hz/v; note presently assume Kp = 1v/rad
and K_LF = 1; the user can easily change this
:param fn: Loop natural frequency (loops 2 & 3) or cutoff
frequency (loop 1)
:param zeta: Damping factor for loops 2 & 3
:return: theta_hat = Output phase estimate of the input theta in radians,
ev = VCO control voltage,
phi = phase error = theta - theta_hat
Mark Wickert, April 2007 for ECE 5625/4625
Modified February 2008 and July 2014 for ECE 5675/4675
Python version August 2014
"""
T = 1/float(fs)
Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v
if loop_type == 1:
# First-order loop parameters
# Note Bn = K/4 Hz but K has units of rad/s
#fn = 4*Bn/(2*pi);
K = 2*np.pi*fn # loop natural frequency in rad/s
elif loop_type == 2:
# Second-order loop parameters
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = 4 *np.pi*zeta*fn # loop natural frequency in rad/s
tau2 = zeta/(np.pi*fn)
elif loop_type == 3:
# Second-order loop parameters for one-pole lowpass with
# phase lead correction.
#fn = 1/(2*pi) * 2*Bn/(zeta + 1/(4*zeta));
K = Kv # Essentially the VCO gain sets the single-sided
# hold-in range in Hz, as it is assumed that Kp = 1
# and KLF = 1.
tau1 = K/((2*np.pi*fn)^2);
tau2 = 2*zeta/(2*np.pi*fn)*(1 - 2*np.pi*fn/K*1/(2*zeta))
else:
print('Loop type must be 1, 2, or 3')
# Initialize integration approximation filters
filt_in_last = 0; filt_out_last = 0;
vco_in_last = 0; vco_out = 0; vco_out_last = 0;
vco_out_cbb = 0
# Initialize working and final output vectors
n = np.arange(len(x))
theta_hat = np.zeros(len(x))
ev = np.zeros(len(x))
phi = np.zeros(len(x))
# Begin the simulation loop
for k in range(len(n)):
#phi[k] = theta[k] - vco_out
phi[k] = np.imag(x[k] * np.conj(vco_out_cbb))
pd_out = phi[k]
# Loop gain
gain_out = K/Kv*pd_out # apply VCO gain at VCO
# Loop filter
if loop_type == 2:
filt_in = (1/tau2)*gain_out
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
filt_out = filt_out + gain_out
elif loop_type == 3:
filt_in = (tau2/tau1)*gain_out - (1/tau1)*filt_out_last
u3 = filt_in + (1/tau2)*filt_out_last
filt_out = filt_out_last + T/2*(filt_in + filt_in_last)
filt_in_last = filt_in
filt_out_last = filt_out
else:
filt_out = gain_out;
# VCO
vco_in = filt_out
if loop_type == 3:
vco_in = u3
vco_out = vco_out_last + T/2*(vco_in + vco_in_last)
vco_in_last = vco_in
vco_out_last = vco_out
vco_out = Kv*vco_out # apply Kv
vco_out_cbb = np.exp(1j*vco_out)
# Measured loop signals
ev[k] = vco_in
theta_hat[k] = vco_out
return theta_hat, ev, phi
|
def conv_Pb_bound(R,dfree,Ck,SNRdB,hard_soft,M=2):
"""
Coded bit error probabilty
Convolution coding bit error probability upper bound
according to Ziemer & Peterson 7-16, p. 507
Mark Wickert November 2014
Parameters
----------
R: Code rate
dfree: Free distance of the code
Ck: Weight coefficient
SNRdB: Signal to noise ratio in dB
hard_soft: 0 hard, 1 soft, 2 uncoded
M: M-ary
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm import fec_conv as fec
>>> import matplotlib.pyplot as plt
>>> SNRdB = np.arange(2,12,.1)
>>> Pb = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,2)
>>> Pb_1_2 = fec.conv_Pb_bound(1./2,10,[36, 0, 211, 0, 1404, 0, 11633],SNRdB,1)
>>> Pb_3_4 = fec.conv_Pb_bound(3./4,4,[164, 0, 5200, 0, 151211, 0, 3988108],SNRdB,1)
>>> plt.semilogy(SNRdB,Pb)
>>> plt.semilogy(SNRdB,Pb_1_2)
>>> plt.semilogy(SNRdB,Pb_3_4)
>>> plt.axis([2,12,1e-7,1e0])
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/2, K=7, Soft','R=3/4 (punc), K=7, Soft'),loc='best')
>>> plt.grid();
>>> plt.show()
Notes
-----
The code rate R is given by :math:`R_{s} = \\frac{k}{n}`.
Mark Wickert and Andrew Smit 2018
"""
Pb = np.zeros_like(SNRdB)
SNR = 10.**(SNRdB/10.)
for n,SNRn in enumerate(SNR):
for k in range(dfree,len(Ck)+dfree):
if hard_soft == 0: # Evaluate hard decision bound
Pb[n] += Ck[k-dfree]*hard_Pk(k,R,SNRn,M)
elif hard_soft == 1: # Evaluate soft decision bound
Pb[n] += Ck[k-dfree]*soft_Pk(k,R,SNRn,M)
else: # Compute Uncoded Pe
if M == 2:
Pb[n] = Q_fctn(np.sqrt(2.*SNRn))
else:
Pb[n] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn));
return Pb
|
def hard_Pk(k,R,SNR,M=2):
"""
Pk = hard_Pk(k,R,SNR)
Calculates Pk as found in Ziemer & Peterson eq. 7-12, p.505
Mark Wickert and Andrew Smit 2018
"""
k = int(k)
if M == 2:
p = Q_fctn(np.sqrt(2.*R*SNR))
else:
p = 4./np.log2(M)*(1 - 1./np.sqrt(M))*\
Q_fctn(np.sqrt(3*R*np.log2(M)/float(M-1)*SNR))
Pk = 0
#if 2*k//2 == k:
if np.mod(k,2) == 0:
for e in range(int(k/2+1),int(k+1)):
Pk += float(factorial(k))/(factorial(e)*factorial(k-e))*p**e*(1-p)**(k-e);
# Pk += 1./2*float(factorial(k))/(factorial(int(k/2))*factorial(int(k-k/2)))*\
# p**(k/2)*(1-p)**(k//2);
Pk += 1./2*float(factorial(k))/(factorial(int(k/2))*factorial(int(k-k/2)))*\
p**(k/2)*(1-p)**(k/2);
elif np.mod(k,2) == 1:
for e in range(int((k+1)//2),int(k+1)):
Pk += factorial(k)/(factorial(e)*factorial(k-e))*p**e*(1-p)**(k-e);
return Pk
|
def soft_Pk(k,R,SNR,M=2):
"""
Pk = soft_Pk(k,R,SNR)
Calculates Pk as found in Ziemer & Peterson eq. 7-13, p.505
Mark Wickert November 2014
"""
if M == 2:
Pk = Q_fctn(np.sqrt(2.*k*R*SNR))
else:
Pk = 4./np.log2(M)*(1 - 1./np.sqrt(M))*\
Q_fctn(np.sqrt(3*k*R*np.log2(M)/float(M-1)*SNR))
return Pk
|
def viterbi_decoder(self,x,metric_type='soft',quant_level=3):
"""
A method which performs Viterbi decoding of noisy bit stream,
taking as input soft bit values centered on +/-1 and returning
hard decision 0/1 bits.
Parameters
----------
x: Received noisy bit values centered on +/-1 at one sample per bit
metric_type:
'hard' - Hard decision metric. Expects binary or 0/1 input values.
'unquant' - unquantized soft decision decoding. Expects +/-1
input values.
'soft' - soft decision decoding.
quant_level: The quantization level for soft decoding. Expected
input values between 0 and 2^quant_level-1. 0 represents the most
confident 0 and 2^quant_level-1 represents the most confident 1.
Only used for 'soft' metric type.
Returns
-------
y: Decoded 0/1 bit stream
Examples
--------
>>> import numpy as np
>>> from numpy.random import randint
>>> import sk_dsp_comm.fec_conv as fec
>>> import sk_dsp_comm.digitalcom as dc
>>> import matplotlib.pyplot as plt
>>> # Soft decision rate 1/2 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 4
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc1 = fec.fec_conv(('11101','10011'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc1.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-3,1) # Channel SNR is 3 dB less for rate 1/2
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc1.viterbi_decoder(yn_hard,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/2 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 77, BEP = 7.72e-03
kmax = 0, taumax = 0
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
*****************************************************
Bits Received = 19952, Bit errors = 175, BEP = 8.77e-03
>>> # Consider the trellis traceback after the sim completes
>>> cc1.traceback_plot()
>>> plt.show()
>>> # Compare a collection of simulation results with soft decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3 = fec.conv_Pb_bound(1/3,8,[3, 0, 15],SNRdB,1)
>>> Pb_s_third_4 = fec.conv_Pb_bound(1/3,10,[6, 0, 6, 0],SNRdB,1)
>>> Pb_s_third_5 = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56],SNRdB,1)
>>> Pb_s_third_6 = fec.conv_Pb_bound(1/3,13,[1, 8, 26, 20, 19, 62],SNRdB,1)
>>> Pb_s_third_7 = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,1)
>>> Pb_s_third_8 = fec.conv_Pb_bound(1/3,16,[1, 0, 24, 0, 113, 0, 287, 0],SNRdB,1)
>>> Pb_s_half = fec.conv_Pb_bound(1/2,7,[4, 12, 20, 72, 225],SNRdB,1)
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_4,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_5,'g')
>>> plt.semilogy(SNRdB,Pb_s_third_6,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_7,'--')
>>> plt.semilogy(SNRdB,Pb_s_third_8,'--')
>>> plt.semilogy([0,1,2,3,4,5],[9.08e-02,2.73e-02,6.52e-03,\
8.94e-04,8.54e-05,5e-6],'gs')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Soft Decision Rate 1/2 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Soft',\
'R=1/3, K=4, Soft','R=1/3, K=5, Soft',\
'R=1/3, K=6, Soft','R=1/3, K=7, Soft',\
'R=1/3, K=8, Soft','R=1/3, K=5, Sim', \
'Simulation'),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Hard decision rate 1/3 simulation
>>> N_bits_per_frame = 10000
>>> EbN0 = 3
>>> total_bit_errors = 0
>>> total_bit_count = 0
>>> cc2 = fec.fec_conv(('11111','11011','10101'),25)
>>> # Encode with shift register starting state of '0000'
>>> state = '0000'
>>> while total_bit_errors < 100:
>>> # Create 100000 random 0/1 bits
>>> x = randint(0,2,N_bits_per_frame)
>>> y,state = cc2.conv_encoder(x,state)
>>> # Add channel noise to bits, include antipodal level shift to [-1,1]
>>> yn_soft = dc.cpx_AWGN(2*y-1,EbN0-10*np.log10(3),1) # Channel SNR is 10*log10(3) dB less
>>> yn_hard = ((np.sign(yn_soft.real)+1)/2).astype(int)
>>> z = cc2.viterbi_decoder(yn_hard.real,'hard')
>>> # Count bit errors
>>> bit_count, bit_errors = dc.bit_errors(x,z)
>>> total_bit_errors += bit_errors
>>> total_bit_count += bit_count
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
>>> print('*****************************************************')
>>> print('Bits Received = %d, Bit errors = %d, BEP = %1.2e' %\
(total_bit_count, total_bit_errors,\
total_bit_errors/total_bit_count))
Rate 1/3 Object
kmax = 0, taumax = 0
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
*****************************************************
Bits Received = 9976, Bit errors = 251, BEP = 2.52e-02
>>> # Compare a collection of simulation results with hard decision
>>> # bounds
>>> SNRdB = np.arange(0,12,.1)
>>> Pb_uc = fec.conv_Pb_bound(1/3,7,[4, 12, 20, 72, 225],SNRdB,2)
>>> Pb_s_third_3_hard = fec.conv_Pb_bound(1/3,8,[3, 0, 15, 0, 58, 0, 201, 0],SNRdB,0)
>>> Pb_s_third_5_hard = fec.conv_Pb_bound(1/3,12,[12, 0, 12, 0, 56, 0, 320, 0],SNRdB,0)
>>> Pb_s_third_7_hard = fec.conv_Pb_bound(1/3,14,[1, 0, 20, 0, 53, 0, 184],SNRdB,0)
>>> Pb_s_third_5_hard_sim = np.array([8.94e-04,1.11e-04,8.73e-06])
>>> plt.figure(figsize=(5,5))
>>> plt.semilogy(SNRdB,Pb_uc)
>>> plt.semilogy(SNRdB,Pb_s_third_3_hard,'r--')
>>> plt.semilogy(SNRdB,Pb_s_third_5_hard,'g--')
>>> plt.semilogy(SNRdB,Pb_s_third_7_hard,'k--')
>>> plt.semilogy(np.array([5,6,7]),Pb_s_third_5_hard_sim,'sg')
>>> plt.axis([0,12,1e-7,1e0])
>>> plt.title(r'Hard Decision Rate 1/3 Coding Measurements')
>>> plt.xlabel(r'$E_b/N_0$ (dB)')
>>> plt.ylabel(r'Symbol Error Probability')
>>> plt.legend(('Uncoded BPSK','R=1/3, K=3, Hard',\
'R=1/3, K=5, Hard', 'R=1/3, K=7, Hard',\
),loc='upper right')
>>> plt.grid();
>>> plt.show()
>>> # Show the traceback for the rate 1/3 hard decision case
>>> cc2.traceback_plot()
"""
if metric_type == 'hard':
# If hard decision must have 0/1 integers for input else float
if np.issubdtype(x.dtype, np.integer):
if x.max() > 1 or x.min() < 0:
raise ValueError('Integer bit values must be 0 or 1')
else:
raise ValueError('Decoder inputs must be integers on [0,1] for hard decisions')
# Initialize cumulative metrics array
cm_present = np.zeros((self.Nstates,1))
NS = len(x) # number of channel symbols to process;
# must be even for rate 1/2
# must be a multiple of 3 for rate 1/3
y = np.zeros(NS-self.decision_depth) # Decoded bit sequence
k = 0
symbolL = self.rate.denominator
# Calculate branch metrics and update traceback states and traceback bits
for n in range(0,NS,symbolL):
cm_past = self.paths.cumulative_metric[:,0]
tb_states_temp = self.paths.traceback_states[:,:-1].copy()
tb_bits_temp = self.paths.traceback_bits[:,:-1].copy()
for m in range(self.Nstates):
d1 = self.bm_calc(self.branches.bits1[m],
x[n:n+symbolL],metric_type,
quant_level)
d1 = d1 + cm_past[self.branches.states1[m]]
d2 = self.bm_calc(self.branches.bits2[m],
x[n:n+symbolL],metric_type,
quant_level)
d2 = d2 + cm_past[self.branches.states2[m]]
if d1 <= d2: # Find the survivor assuming minimum distance wins
cm_present[m] = d1
self.paths.traceback_states[m,:] = np.hstack((self.branches.states1[m],
tb_states_temp[int(self.branches.states1[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input1[m],
tb_bits_temp[int(self.branches.states1[m]),:]))
else:
cm_present[m] = d2
self.paths.traceback_states[m,:] = np.hstack((self.branches.states2[m],
tb_states_temp[int(self.branches.states2[m]),:]))
self.paths.traceback_bits[m,:] = np.hstack((self.branches.input2[m],
tb_bits_temp[int(self.branches.states2[m]),:]))
# Update cumulative metric history
self.paths.cumulative_metric = np.hstack((cm_present,
self.paths.cumulative_metric[:,:-1]))
# Obtain estimate of input bit sequence from the oldest bit in
# the traceback having the smallest (most likely) cumulative metric
min_metric = min(self.paths.cumulative_metric[:,0])
min_idx = np.where(self.paths.cumulative_metric[:,0] == min_metric)
if n >= symbolL*self.decision_depth-symbolL: # 2 since Rate = 1/2
y[k] = self.paths.traceback_bits[min_idx[0][0],-1]
k += 1
y = y[:k] # trim final length
return y
|
def bm_calc(self,ref_code_bits, rec_code_bits, metric_type, quant_level):
"""
distance = bm_calc(ref_code_bits, rec_code_bits, metric_type)
Branch metrics calculation
Mark Wickert and Andrew Smit October 2018
"""
distance = 0
if metric_type == 'soft': # squared distance metric
bits = binary(int(ref_code_bits),self.rate.denominator)
for k in range(len(bits)):
ref_bit = (2**quant_level-1)*int(bits[k],2)
distance += (int(rec_code_bits[k]) - ref_bit)**2
elif metric_type == 'hard': # hard decisions
bits = binary(int(ref_code_bits),self.rate.denominator)
for k in range(len(rec_code_bits)):
distance += abs(rec_code_bits[k] - int(bits[k]))
elif metric_type == 'unquant': # unquantized
bits = binary(int(ref_code_bits),self.rate.denominator)
for k in range(len(bits)):
distance += (float(rec_code_bits[k])-float(bits[k]))**2
else:
print('Invalid metric type specified')
raise ValueError('Invalid metric type specified. Use soft, hard, or unquant')
return distance
|
def conv_encoder(self,input,state):
"""
output, state = conv_encoder(input,state)
We get the 1/2 or 1/3 rate from self.rate
Polys G1 and G2 are entered as binary strings, e.g,
G1 = '111' and G2 = '101' for K = 3
G1 = '1011011' and G2 = '1111001' for K = 7
G3 is also included for rate 1/3
Input state as a binary string of length K-1, e.g., '00' or '0000000'
e.g., state = '00' for K = 3
e.g., state = '000000' for K = 7
Mark Wickert and Andrew Smit 2018
"""
output = []
if(self.rate == Fraction(1,2)):
for n in range(len(input)):
u1 = int(input[n])
u2 = int(input[n])
for m in range(1,self.constraint_length):
if int(self.G_polys[0][m]) == 1: # XOR if we have a connection
u1 = u1 ^ int(state[m-1])
if int(self.G_polys[1][m]) == 1: # XOR if we have a connection
u2 = u2 ^ int(state[m-1])
# G1 placed first, G2 placed second
output = np.hstack((output, [u1, u2]))
state = bin(int(input[n]))[-1] + state[:-1]
elif(self.rate == Fraction(1,3)):
for n in range(len(input)):
if(int(self.G_polys[0][0]) == 1):
u1 = int(input[n])
else:
u1 = 0
if(int(self.G_polys[1][0]) == 1):
u2 = int(input[n])
else:
u2 = 0
if(int(self.G_polys[2][0]) == 1):
u3 = int(input[n])
else:
u3 = 0
for m in range(1,self.constraint_length):
if int(self.G_polys[0][m]) == 1: # XOR if we have a connection
u1 = u1 ^ int(state[m-1])
if int(self.G_polys[1][m]) == 1: # XOR if we have a connection
u2 = u2 ^ int(state[m-1])
if int(self.G_polys[2][m]) == 1: # XOR if we have a connection
u3 = u3 ^ int(state[m-1])
# G1 placed first, G2 placed second, G3 placed third
output = np.hstack((output, [u1, u2, u3]))
state = bin(int(input[n]))[-1] + state[:-1]
return output, state
|
def puncture(self,code_bits,puncture_pattern = ('110','101')):
"""
Apply puncturing to the serial bits produced by convolutionally
encoding.
:param code_bits:
:param puncture_pattern:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> cc.puncture(y, ('110','101'))
array([ 0., 0., 0., 1., 1., 0., 0., 0., 1., 1., 0., 0.])
"""
# Check to see that the length of code_bits is consistent with a rate
# 1/2 code.
L_pp = len(puncture_pattern[0])
N_codewords = int(np.floor(len(code_bits)/float(2)))
if 2*N_codewords != len(code_bits):
warnings.warn('Number of code bits must be even!')
warnings.warn('Truncating bits to be compatible.')
code_bits = code_bits[:2*N_codewords]
# Extract the G1 and G2 encoded bits from the serial stream.
# Assume the stream is of the form [G1 G2 G1 G2 ... ]
x_G1 = code_bits.reshape(N_codewords,2).take([0],
axis=1).reshape(1,N_codewords).flatten()
x_G2 = code_bits.reshape(N_codewords,2).take([1],
axis=1).reshape(1,N_codewords).flatten()
# Check to see that the length of x_G1 and x_G2 is consistent with the
# length of the puncture pattern
N_punct_periods = int(np.floor(N_codewords/float(L_pp)))
if L_pp*N_punct_periods != N_codewords:
warnings.warn('Code bit length is not a multiple pp = %d!' % L_pp)
warnings.warn('Truncating bits to be compatible.')
x_G1 = x_G1[:L_pp*N_punct_periods]
x_G2 = x_G2[:L_pp*N_punct_periods]
#Puncture x_G1 and x_G1
g1_pp1 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '1']
g2_pp1 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '1']
N_pp = len(g1_pp1)
y_G1 = x_G1.reshape(N_punct_periods,L_pp).take(g1_pp1,
axis=1).reshape(N_pp*N_punct_periods,1)
y_G2 = x_G2.reshape(N_punct_periods,L_pp).take(g2_pp1,
axis=1).reshape(N_pp*N_punct_periods,1)
# Interleave y_G1 and y_G2 for modulation via a serial bit stream
y = np.hstack((y_G1,y_G2)).reshape(1,2*N_pp*N_punct_periods).flatten()
return y
|
def depuncture(self,soft_bits,puncture_pattern = ('110','101'),
erase_value = 3.5):
"""
Apply de-puncturing to the soft bits coming from the channel. Erasure bits
are inserted to return the soft bit values back to a form that can be
Viterbi decoded.
:param soft_bits:
:param puncture_pattern:
:param erase_value:
:return:
Examples
--------
This example uses the following puncture matrix:
.. math::
\\begin{align*}
\\mathbf{A} = \\begin{bmatrix}
1 & 1 & 0 \\\\
1 & 0 & 1
\\end{bmatrix}
\\end{align*}
The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of
the :math:`G_{2}` polynomial.
>>> import numpy as np
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv(('101','111'))
>>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0])
>>> state = '00'
>>> y, state = cc.conv_encoder(x, state)
>>> yp = cc.puncture(y, ('110','101'))
>>> cc.depuncture(yp, ('110', '101'), 1)
array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.]
"""
# Check to see that the length of soft_bits is consistent with a rate
# 1/2 code.
L_pp = len(puncture_pattern[0])
L_pp1 = len([g1 for g1 in puncture_pattern[0] if g1 == '1'])
L_pp0 = len([g1 for g1 in puncture_pattern[0] if g1 == '0'])
#L_pp0 = len([g1 for g1 in pp1 if g1 == '0'])
N_softwords = int(np.floor(len(soft_bits)/float(2)))
if 2*N_softwords != len(soft_bits):
warnings.warn('Number of soft bits must be even!')
warnings.warn('Truncating bits to be compatible.')
soft_bits = soft_bits[:2*N_softwords]
# Extract the G1p and G2p encoded bits from the serial stream.
# Assume the stream is of the form [G1p G2p G1p G2p ... ],
# which for QPSK may be of the form [Ip Qp Ip Qp Ip Qp ... ]
x_G1 = soft_bits.reshape(N_softwords,2).take([0],
axis=1).reshape(1,N_softwords).flatten()
x_G2 = soft_bits.reshape(N_softwords,2).take([1],
axis=1).reshape(1,N_softwords).flatten()
# Check to see that the length of x_G1 and x_G2 is consistent with the
# puncture length period of the soft bits
N_punct_periods = int(np.floor(N_softwords/float(L_pp1)))
if L_pp1*N_punct_periods != N_softwords:
warnings.warn('Number of soft bits per puncture period is %d' % L_pp1)
warnings.warn('The number of soft bits is not a multiple')
warnings.warn('Truncating soft bits to be compatible.')
x_G1 = x_G1[:L_pp1*N_punct_periods]
x_G2 = x_G2[:L_pp1*N_punct_periods]
x_G1 = x_G1.reshape(N_punct_periods,L_pp1)
x_G2 = x_G2.reshape(N_punct_periods,L_pp1)
#Depuncture x_G1 and x_G1
g1_pp1 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '1']
g1_pp0 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '0']
g2_pp1 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '1']
g2_pp0 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '0']
x_E = erase_value*np.ones((N_punct_periods,L_pp0))
y_G1 = np.hstack((x_G1,x_E))
y_G2 = np.hstack((x_G2,x_E))
[g1_pp1.append(val) for idx,val in enumerate(g1_pp0)]
g1_comp = list(zip(g1_pp1,list(range(L_pp))))
g1_comp.sort()
G1_col_permute = [g1_comp[idx][1] for idx in range(L_pp)]
[g2_pp1.append(val) for idx,val in enumerate(g2_pp0)]
g2_comp = list(zip(g2_pp1,list(range(L_pp))))
g2_comp.sort()
G2_col_permute = [g2_comp[idx][1] for idx in range(L_pp)]
#permute columns to place erasure bits in the correct position
y = np.hstack((y_G1[:,G1_col_permute].reshape(L_pp*N_punct_periods,1),
y_G2[:,G2_col_permute].reshape(L_pp*N_punct_periods,
1))).reshape(1,2*L_pp*N_punct_periods).flatten()
return y
|
def trellis_plot(self,fsize=(6,4)):
"""
Plots a trellis diagram of the possible state transitions.
Parameters
----------
fsize : Plot size for matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> cc = fec_conv()
>>> cc.trellis_plot()
>>> plt.show()
"""
branches_from = self.branches
plt.figure(figsize=fsize)
plt.plot(0,0,'.')
plt.axis([-0.01, 1.01, -(self.Nstates-1)-0.05, 0.05])
for m in range(self.Nstates):
if branches_from.input1[m] == 0:
plt.plot([0, 1],[-branches_from.states1[m], -m],'b')
plt.plot([0, 1],[-branches_from.states1[m], -m],'r.')
if branches_from.input2[m] == 0:
plt.plot([0, 1],[-branches_from.states2[m], -m],'b')
plt.plot([0, 1],[-branches_from.states2[m], -m],'r.')
if branches_from.input1[m] == 1:
plt.plot([0, 1],[-branches_from.states1[m], -m],'g')
plt.plot([0, 1],[-branches_from.states1[m], -m],'r.')
if branches_from.input2[m] == 1:
plt.plot([0, 1],[-branches_from.states2[m], -m],'g')
plt.plot([0, 1],[-branches_from.states2[m], -m],'r.')
#plt.grid()
plt.xlabel('One Symbol Transition')
plt.ylabel('-State Index')
msg = 'Rate %s, K = %d Trellis' %(self.rate, int(np.ceil(np.log2(self.Nstates)+1)))
plt.title(msg)
|
def traceback_plot(self,fsize=(6,4)):
"""
Plots a path of the possible last 4 states.
Parameters
----------
fsize : Plot size for matplotlib.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.fec_conv import fec_conv
>>> from sk_dsp_comm import digitalcom as dc
>>> import numpy as np
>>> cc = fec_conv()
>>> x = np.random.randint(0,2,100)
>>> state = '00'
>>> y,state = cc.conv_encoder(x,state)
>>> # Add channel noise to bits translated to +1/-1
>>> yn = dc.cpx_AWGN(2*y-1,5,1) # SNR = 5 dB
>>> # Translate noisy +1/-1 bits to soft values on [0,7]
>>> yn = (yn.real+1)/2*7
>>> z = cc.viterbi_decoder(yn)
>>> cc.traceback_plot()
>>> plt.show()
"""
traceback_states = self.paths.traceback_states
plt.figure(figsize=fsize)
plt.axis([-self.decision_depth+1, 0,
-(self.Nstates-1)-0.5, 0.5])
M,N = traceback_states.shape
traceback_states = -traceback_states[:,::-1]
plt.plot(range(-(N-1),0+1),traceback_states.T)
plt.xlabel('Traceback Symbol Periods')
plt.ylabel('State Index $0$ to -$2^{(K-1)}$')
plt.title('Survivor Paths Traced Back From All %d States' % self.Nstates)
plt.grid()
|
def up(self,x):
"""
Upsample and filter the signal
"""
y = self.M*ssd.upsample(x,self.M)
y = signal.lfilter(self.b,self.a,y)
return y
|
def dn(self,x):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,self.a,x)
y = ssd.downsample(y,self.M)
return y
|
def filter(self,x):
"""
Filter the signal
"""
y = signal.lfilter(self.b,[1],x)
return y
|
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.lfilter(self.b,[1],y)
return y
|
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.lfilter(self.b,[1],x)
y = ssd.downsample(y,M_change)
return y
|
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
ssd.zplane(self.b,[1],auto_scale,size,tol)
|
def filter(self,x):
"""
Filter the signal using second-order sections
"""
y = signal.sosfilt(self.sos,x)
return y
|
def up(self,x,L_change = 12):
"""
Upsample and filter the signal
"""
y = L_change*ssd.upsample(x,L_change)
y = signal.sosfilt(self.sos,y)
return y
|
def dn(self,x,M_change = 12):
"""
Downsample and filter the signal
"""
y = signal.sosfilt(self.sos,x)
y = ssd.downsample(y,M_change)
return y
|
def freq_resp(self, mode= 'dB', fs = 8000, ylim = [-100,2]):
"""
Frequency response plot
"""
iir_d.freqz_resp_cas_list([self.sos],mode,fs=fs)
pylab.grid()
pylab.ylim(ylim)
|
def zplane(self,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Plot the poles and zeros of the FIR filter in the z-plane
"""
iir_d.sos_zplane(self.sos,auto_scale,size,tol)
|
def ser2ber(q,n,d,t,ps):
"""
Converts symbol error rate to bit error rate. Taken from Ziemer and
Tranter page 650. Necessary when comparing different types of block codes.
parameters
----------
q: size of the code alphabet for given modulation type (BPSK=2)
n: number of channel bits
d: distance (2e+1) where e is the number of correctable errors per code word.
For hamming codes, e=1, so d=3.
t: number of correctable errors per code word
ps: symbol error probability vector
returns
-------
ber: bit error rate
"""
lnps = len(ps) # len of error vector
ber = np.zeros(lnps) # inialize output vector
for k in range(0,lnps): # iterate error vector
ser = ps[k] # channel symbol error rate
sum1 = 0 # initialize sums
sum2 = 0
for i in range(t+1,d+1):
term = special.comb(n,i)*(ser**i)*((1-ser))**(n-i)
sum1 = sum1 + term
for i in range(d+1,n+1):
term = (i)*special.comb(n,i)*(ser**i)*((1-ser)**(n-i))
sum2 = sum2+term
ber[k] = (q/(2*(q-1)))*((d/n)*sum1+(1/n)*sum2)
return ber
|
def block_single_error_Pb_bound(j,SNRdB,coded=True,M=2):
"""
Finds the bit error probability bounds according to Ziemer and Tranter
page 656.
parameters:
-----------
j: number of parity bits used in single error correction block code
SNRdB: Eb/N0 values in dB
coded: Select single error correction code (True) or uncoded (False)
M: modulation order
returns:
--------
Pb: bit error probability bound
"""
Pb = np.zeros_like(SNRdB)
Ps = np.zeros_like(SNRdB)
SNR = 10.**(SNRdB/10.)
n = 2**j-1
k = n-j
for i,SNRn in enumerate(SNR):
if coded: # compute Hamming code Ps
if M == 2:
Ps[i] = Q_fctn(np.sqrt(k*2.*SNRn/n))
else:
Ps[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))/k
else: # Compute Uncoded Pb
if M == 2:
Pb[i] = Q_fctn(np.sqrt(2.*SNRn))
else:
Pb[i] = 4./np.log2(M)*(1 - 1/np.sqrt(M))*\
np.gaussQ(np.sqrt(3*np.log2(M)/(M-1)*SNRn))
# Convert symbol error probability to bit error probability
if coded:
Pb = ser2ber(M,n,3,1,Ps)
return Pb
|
def hamm_gen(self,j):
"""
Generates parity check matrix (H) and generator
matrix (G).
Parameters
----------
j: Number of Hamming code parity bits with n = 2^j-1 and k = n-j
returns
-------
G: Systematic generator matrix with left-side identity matrix
H: Systematic parity-check matrix with right-side identity matrix
R: k x k identity matrix
n: number of total bits/block
k: number of source bits/block
Andrew Smit November 2018
"""
if(j < 3):
raise ValueError('j must be > 2')
# calculate codeword length
n = 2**j-1
# calculate source bit length
k = n-j
# Allocate memory for Matrices
G = np.zeros((k,n),dtype=int)
H = np.zeros((j,n),dtype=int)
P = np.zeros((j,k),dtype=int)
R = np.zeros((k,n),dtype=int)
# Encode parity-check matrix columns with binary 1-n
for i in range(1,n+1):
b = list(binary(i,j))
for m in range(0,len(b)):
b[m] = int(b[m])
H[:,i-1] = np.array(b)
# Reformat H to be systematic
H1 = np.zeros((1,j),dtype=int)
H2 = np.zeros((1,j),dtype=int)
for i in range(0,j):
idx1 = 2**i-1
idx2 = n-i-1
H1[0,:] = H[:,idx1]
H2[0,:] = H[:,idx2]
H[:,idx1] = H2
H[:,idx2] = H1
# Get parity matrix from H
P = H[:,:k]
# Use P to calcuate generator matrix P
G[:,:k] = np.diag(np.ones(k))
G[:,k:] = P.T
# Get k x k identity matrix
R[:,:k] = np.diag(np.ones(k))
return G, H, R, n, k
|
def hamm_encoder(self,x):
"""
Encodes input bit array x using hamming block code.
parameters
----------
x: array of source bits to be encoded by block encoder.
returns
-------
codewords: array of code words generated by generator
matrix G and input x.
Andrew Smit November 2018
"""
if(np.dtype(x[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.k)
N_symbols = int(len(x)/self.k)
codewords = np.zeros(N_symbols*self.n)
x = np.reshape(x,(1,len(x)))
for i in range(0,N_symbols):
codewords[i*self.n:(i+1)*self.n] = np.matmul(x[:,i*self.k:(i+1)*self.k],self.G)%2
return codewords
|
def hamm_decoder(self,codewords):
"""
Decode hamming encoded codewords. Make sure code words are of
the appropriate length for the object.
parameters
---------
codewords: bit array of codewords
returns
-------
decoded_bits: bit array of decoded source bits
Andrew Smit November 2018
"""
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Invalid data type. Input must be a vector of ints')
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Invalid input vector length. Length must be a multiple of %d' %self.n)
# Calculate the number of symbols (codewords) in the input array
N_symbols = int(len(codewords)/self.n)
# Allocate memory for decoded sourcebits
decoded_bits = np.zeros(N_symbols*self.k)
# Loop through codewords to decode one block at a time
codewords = np.reshape(codewords,(1,len(codewords)))
for i in range(0,N_symbols):
# find the syndrome of each codeword
S = np.matmul(self.H,codewords[:,i*self.n:(i+1)*self.n].T) % 2
# convert binary syndrome to an integer
bits = ''
for m in range(0,len(S)):
bit = str(int(S[m,:]))
bits = bits + bit
error_pos = int(bits,2)
h_pos = self.H[:,error_pos-1]
# Use the syndrome to find the position of an error within the block
bits = ''
for m in range(0,len(S)):
bit = str(int(h_pos[m]))
bits = bits + bit
decoded_pos = int(bits,2)-1
# correct error if present
if(error_pos):
codewords[:,i*self.n+decoded_pos] = (codewords[:,i*self.n+decoded_pos] + 1) % 2
# Decode the corrected codeword
decoded_bits[i*self.k:(i+1)*self.k] = np.matmul(self.R,codewords[:,i*self.n:(i+1)*self.n].T).T % 2
return decoded_bits.astype(int)
|
def cyclic_encoder(self,x,G='1011'):
"""
Encodes input bit array x using cyclic block code.
parameters
----------
x: vector of source bits to be encoded by block encoder. Numpy array
of integers expected.
returns
-------
codewords: vector of code words generated from input vector
Andrew Smit November 2018
"""
# Check block length
if(len(x) % self.k or len(x) < self.k):
raise ValueError('Error: Incomplete block in input array. Make sure input array length is a multiple of %d' %self.k)
# Check data type of input vector
if(np.dtype(x[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(x) / self.k)
codewords = np.zeros((Num_blocks,self.n),dtype=int)
x = np.reshape(x,(Num_blocks,self.k))
#print(x)
for p in range(Num_blocks):
S = np.zeros(len(self.G))
codeword = np.zeros(self.n)
current_block = x[p,:]
#print(current_block)
for i in range(0,self.n):
if(i < self.k):
S[0] = current_block[i]
S0temp = 0
for m in range(0,len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
#print(j,S0temp,S[j])
S0temp = S0temp % 2
S = np.roll(S,1)
codeword[i] = current_block[i]
S[1] = S0temp
else:
out = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
out = out + S[m]
codeword[i] = out % 2
S = np.roll(S,1)
S[1] = 0
codewords[p,:] = codeword
#print(codeword)
codewords = np.reshape(codewords,np.size(codewords))
return codewords.astype(int)
|
def cyclic_decoder(self,codewords):
"""
Decodes a vector of cyclic coded codewords.
parameters
----------
codewords: vector of codewords to be decoded. Numpy array of integers expected.
returns
-------
decoded_blocks: vector of decoded bits
Andrew Smit November 2018
"""
# Check block length
if(len(codewords) % self.n or len(codewords) < self.n):
raise ValueError('Error: Incomplete coded block in input array. Make sure coded input array length is a multiple of %d' %self.n)
# Check input data type
if(np.dtype(codewords[0]) != int):
raise ValueError('Error: Input array should be int data type')
# Calculate number of blocks
Num_blocks = int(len(codewords) / self.n)
decoded_blocks = np.zeros((Num_blocks,self.k),dtype=int)
codewords = np.reshape(codewords,(Num_blocks,self.n))
for p in range(Num_blocks):
codeword = codewords[p,:]
Ureg = np.zeros(self.n)
S = np.zeros(len(self.G))
decoded_bits = np.zeros(self.k)
output = np.zeros(self.n)
for i in range(0,self.n): # Switch A closed B open
Ureg = np.roll(Ureg,1)
Ureg[0] = codeword[i]
S0temp = 0
S[0] = codeword[i]
for m in range(len(self.G)):
if(self.G[m] == '1'):
S0temp = S0temp + S[m]
S0 = S
S = np.roll(S,1)
S[1] = S0temp % 2
for i in range(0,self.n): # Switch B closed A open
Stemp = 0
for m in range(1,len(self.G)):
if(self.G[m] == '1'):
Stemp = Stemp + S[m]
S = np.roll(S,1)
S[1] = Stemp % 2
and_out = 1
for m in range(1,len(self.G)):
if(m > 1):
and_out = and_out and ((S[m]+1) % 2)
else:
and_out = and_out and S[m]
output[i] = (and_out + Ureg[len(Ureg)-1]) % 2
Ureg = np.roll(Ureg,1)
Ureg[0] = 0
decoded_bits = output[0:self.k].astype(int)
decoded_blocks[p,:] = decoded_bits
return np.reshape(decoded_blocks,np.size(decoded_blocks)).astype(int)
|
def _select_manager(backend_name):
"""Select the proper LockManager based on the current backend used by Celery.
:raise NotImplementedError: If Celery is using an unsupported backend.
:param str backend_name: Class name of the current Celery backend. Usually value of
current_app.extensions['celery'].celery.backend.__class__.__name__.
:return: Class definition object (not instance). One of the _LockManager* classes.
"""
if backend_name == 'RedisBackend':
lock_manager = _LockManagerRedis
elif backend_name == 'DatabaseBackend':
lock_manager = _LockManagerDB
else:
raise NotImplementedError
return lock_manager
|
def single_instance(func=None, lock_timeout=None, include_args=False):
"""Celery task decorator. Forces the task to have only one running instance at a time.
Use with binded tasks (@celery.task(bind=True)).
Modeled after:
http://loose-bits.com/2010/10/distributed-task-locking-in-celery.html
http://blogs.it.ox.ac.uk/inapickle/2012/01/05/python-decorators-with-optional-arguments/
Written by @Robpol86.
:raise OtherInstanceError: If another instance is already running.
:param function func: The function to decorate, must be also decorated by @celery.task.
:param int lock_timeout: Lock timeout in seconds plus five more seconds, in-case the task crashes and fails to
release the lock. If not specified, the values of the task's soft/hard limits are used. If all else fails,
timeout will be 5 minutes.
:param bool include_args: Include the md5 checksum of the arguments passed to the task in the Redis key. This allows
the same task to run with different arguments, only stopping a task from running if another instance of it is
running with the same arguments.
"""
if func is None:
return partial(single_instance, lock_timeout=lock_timeout, include_args=include_args)
@wraps(func)
def wrapped(celery_self, *args, **kwargs):
"""Wrapped Celery task, for single_instance()."""
# Select the manager and get timeout.
timeout = (
lock_timeout or celery_self.soft_time_limit or celery_self.time_limit
or celery_self.app.conf.get('CELERYD_TASK_SOFT_TIME_LIMIT')
or celery_self.app.conf.get('CELERYD_TASK_TIME_LIMIT')
or (60 * 5)
)
manager_class = _select_manager(celery_self.backend.__class__.__name__)
lock_manager = manager_class(celery_self, timeout, include_args, args, kwargs)
# Lock and execute.
with lock_manager:
ret_value = func(*args, **kwargs)
return ret_value
return wrapped
|
def task_identifier(self):
"""Return the unique identifier (string) of a task instance."""
task_id = self.celery_self.name
if self.include_args:
merged_args = str(self.args) + str([(k, self.kwargs[k]) for k in sorted(self.kwargs)])
task_id += '.args.{0}'.format(hashlib.md5(merged_args.encode('utf-8')).hexdigest())
return task_id
|
def is_already_running(self):
"""Return True if lock exists and has not timed out."""
redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier)
return self.celery_self.backend.client.exists(redis_key)
|
def reset_lock(self):
"""Removed the lock regardless of timeout."""
redis_key = self.CELERY_LOCK.format(task_id=self.task_identifier)
self.celery_self.backend.client.delete(redis_key)
|
def is_already_running(self):
"""Return True if lock exists and has not timed out."""
date_done = (self.restore_group(self.task_identifier) or dict()).get('date_done')
if not date_done:
return False
difference = datetime.utcnow() - date_done
return difference < timedelta(seconds=self.timeout)
|
def init_app(self, app):
"""Actual method to read celery settings from app configuration and initialize the celery instance.
:param app: Flask application instance.
"""
_state._register_app = self.original_register_app # Restore Celery app registration function.
if not hasattr(app, 'extensions'):
app.extensions = dict()
if 'celery' in app.extensions:
raise ValueError('Already registered extension CELERY.')
app.extensions['celery'] = _CeleryState(self, app)
# Instantiate celery and read config.
super(Celery, self).__init__(app.import_name, broker=app.config['CELERY_BROKER_URL'])
# Set result backend default.
if 'CELERY_RESULT_BACKEND' in app.config:
self._preconf['CELERY_RESULT_BACKEND'] = app.config['CELERY_RESULT_BACKEND']
self.conf.update(app.config)
task_base = self.Task
# Add Flask app context to celery instance.
class ContextTask(task_base):
def __call__(self, *_args, **_kwargs):
with app.app_context():
return task_base.__call__(self, *_args, **_kwargs)
setattr(ContextTask, 'abstract', True)
setattr(self, 'Task', ContextTask)
|
def iter_chunksize(num_samples, chunksize):
"""Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns `chunksize` except for the last iteration.
"""
last_chunksize = int(np.mod(num_samples, chunksize))
chunksize = int(chunksize)
for _ in range(int(num_samples) // chunksize):
yield chunksize
if last_chunksize > 0:
yield last_chunksize
|
def iter_chunk_slice(num_samples, chunksize):
"""Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns a slice of size `chunksize`. In the last
iteration the slice may be smaller.
"""
i = 0
for c_size in iter_chunksize(num_samples, chunksize):
yield slice(i, i + c_size)
i += c_size
|
def iter_chunk_index(num_samples, chunksize):
"""Iterator used to iterate in chunks over an array of size `num_samples`.
At each iteration returns a start and stop index for a slice of size
`chunksize`. In the last iteration the slice may be smaller.
"""
i = 0
for c_size in iter_chunksize(num_samples, chunksize):
yield i, i + c_size
i += c_size
|
def reduce_chunk(func, array):
"""Reduce with `func`, chunk by chunk, the passed pytable `array`.
"""
res = []
for slice in iter_chunk_slice(array.shape[-1], array.chunkshape[-1]):
res.append(func(array[..., slice]))
return func(res)
|
def map_chunk(func, array, out_array):
"""Map with `func`, chunk by chunk, the input pytable `array`.
The result is stored in the output pytable array `out_array`.
"""
for slice in iter_chunk_slice(array.shape[-1], array.chunkshape[-1]):
out_array.append(func(array[..., slice]))
return out_array
|
def parallel_gen_timestamps(dview, max_em_rate, bg_rate):
"""Generate timestamps from a set of remote simulations in `dview`.
Assumes that all the engines have an `S` object already containing
an emission trace (`S.em`). The "photons" timestamps are generated
from these emission traces and merged into a single array of timestamps.
`max_em_rate` and `bg_rate` are passed to `S.sim_timetrace()`.
"""
dview.execute('S.sim_timestamps_em_store(max_rate=%d, bg_rate=%d, '
'seed=S.EID, overwrite=True)' % (max_em_rate, bg_rate))
dview.execute('times = S.timestamps[:]')
dview.execute('times_par = S.timestamps_par[:]')
Times = dview['times']
Times_par = dview['times_par']
# Assuming all t_max equal, just take the first
t_max = dview['S.t_max'][0]
t_tot = np.sum(dview['S.t_max'])
dview.execute("sim_name = S.compact_name_core(t_max=False, hashdigit=0)")
# Core names contains no ID or t_max
sim_name = dview['sim_name'][0]
times_all, times_par_all = merge_ph_times(Times, Times_par,
time_block=t_max)
return times_all, times_par_all, t_tot, sim_name
|
def merge_ph_times(times_list, times_par_list, time_block):
"""Build an array of timestamps joining the arrays in `ph_times_list`.
`time_block` is the duration of each array of timestamps.
"""
offsets = np.arange(len(times_list)) * time_block
cum_sizes = np.cumsum([ts.size for ts in times_list])
times = np.zeros(cum_sizes[-1])
times_par = np.zeros(cum_sizes[-1], dtype='uint8')
i1 = 0
for i2, ts, ts_par, offset in zip(cum_sizes, times_list, times_par_list,
offsets):
times[i1:i2] = ts + offset
times_par[i1:i2] = ts_par
i1 = i2
return times, times_par
|
def merge_DA_ph_times(ph_times_d, ph_times_a):
"""Returns a merged timestamp array for Donor+Accept. and bool mask for A.
"""
ph_times = np.hstack([ph_times_d, ph_times_a])
a_em = np.hstack([np.zeros(ph_times_d.size, dtype=np.bool),
np.ones(ph_times_a.size, dtype=np.bool)])
index_sort = ph_times.argsort()
return ph_times[index_sort], a_em[index_sort]
|
def merge_particle_emission(SS):
"""Returns a sim object summing the emissions and particles in SS (list).
"""
# Merge all the particles
P = reduce(lambda x, y: x + y, [Si.particles for Si in SS])
s = SS[0]
S = ParticlesSimulation(t_step=s.t_step, t_max=s.t_max,
particles=P, box=s.box, psf=s.psf)
S.em = np.zeros(s.em.shape, dtype=np.float64)
for Si in SS:
S.em += Si.em
return S
|
def load_PSFLab_file(fname):
"""Load the array `data` in the .mat file `fname`."""
if os.path.exists(fname) or os.path.exists(fname + '.mat'):
return loadmat(fname)['data']
else:
raise IOError("Can't find PSF file '%s'" % fname)
|
def convert_PSFLab_xz(data, x_step=0.5, z_step=0.5, normalize=False):
"""Process a 2D array (from PSFLab .mat file) containing a x-z PSF slice.
The input data is the raw array saved by PSFLab. The returned array has
the x axis cut in half (only positive x) to take advantage of the
rotational symmetry around z. Pysical dimensions (`x_step` and `z_step)
are also assigned.
If `nomalize` is True the peak is normalized to 1.
Returns:
x, z: (1D array) the X and Z axis in pysical units
hdata: (2D array) the PSF intesity
izm: (float) the index of PSF max along z (axis 0) for x=0 (axis 1)
"""
z_len, x_len = data.shape
hdata = data[:, (x_len - 1) // 2:]
x = np.arange(hdata.shape[1]) * x_step
z = np.arange(-(z_len - 1) / 2, (z_len - 1) / 2 + 1) * z_step
if normalize:
hdata /= hdata.max() # normalize to 1 at peak
return x, z, hdata, hdata[:, 0].argmax()
|
def eval(self, x, y, z):
"""Evaluate the function in (x, y, z)."""
xc, yc, zc = self.rc
sx, sy, sz = self.s
## Method1: direct evaluation
#return exp(-(((x-xc)**2)/(2*sx**2) + ((y-yc)**2)/(2*sy**2) +\
# ((z-zc)**2)/(2*sz**2)))
## Method2: evaluation using numexpr
def arg(s):
return "((%s-%sc)**2)/(2*s%s**2)" % (s, s, s)
return NE.evaluate("exp(-(%s + %s + %s))" %
(arg("x"), arg("y"), arg("z")))
|
def eval(self, x, y, z):
"""Evaluate the function in (x, y, z).
The function is rotationally symmetric around z.
"""
ro = np.sqrt(x**2 + y**2)
zs, xs = ro.shape
v = self.eval_xz(ro.ravel(), z.ravel())
return v.reshape(zs, xs)
|
def to_hdf5(self, file_handle, parent_node='/'):
"""Store the PSF data in `file_handle` (pytables) in `parent_node`.
The raw PSF array name is stored with same name as the original fname.
Also, the following attribues are set: fname, dir_, x_step, z_step.
"""
tarray = file_handle.create_array(parent_node, name=self.fname,
obj=self.psflab_psf_raw,
title='PSF x-z slice (PSFLab array)')
for name in ['fname', 'dir_', 'x_step', 'z_step']:
file_handle.set_node_attr(tarray, name, getattr(self, name))
return tarray
|
def hash(self):
"""Return an hash string computed on the PSF data."""
hash_list = []
for key, value in sorted(self.__dict__.items()):
if not callable(value):
if isinstance(value, np.ndarray):
hash_list.append(value.tostring())
else:
hash_list.append(str(value))
return hashlib.md5(repr(hash_list).encode()).hexdigest()
|
def git_path_valid(git_path=None):
"""
Check whether the git executable is found.
"""
if git_path is None and GIT_PATH is None:
return False
if git_path is None: git_path = GIT_PATH
try:
call([git_path, '--version'])
return True
except OSError:
return False
|
def get_git_version(git_path=None):
"""
Get the Git version.
"""
if git_path is None: git_path = GIT_PATH
git_version = check_output([git_path, "--version"]).split()[2]
return git_version
|
def check_clean_status(git_path=None):
"""
Returns whether there are uncommitted changes in the working dir.
"""
output = get_status(git_path)
is_unmodified = (len(output.strip()) == 0)
return is_unmodified
|
def get_last_commit_line(git_path=None):
"""
Get one-line description of HEAD commit for repository in current dir.
"""
if git_path is None: git_path = GIT_PATH
output = check_output([git_path, "log", "--pretty=format:'%ad %h %s'",
"--date=short", "-n1"])
return output.strip()[1:-1]
|
def get_last_commit(git_path=None):
"""
Get the HEAD commit SHA1 of repository in current dir.
"""
if git_path is None: git_path = GIT_PATH
line = get_last_commit_line(git_path)
revision_id = line.split()[1]
return revision_id
|
def print_summary(string='Repository', git_path=None):
"""
Print the last commit line and eventual uncommitted changes.
"""
if git_path is None: git_path = GIT_PATH
# If git is available, check fretbursts version
if not git_path_valid():
print('\n%s revision unknown (git not found).' % string)
else:
last_commit = get_last_commit_line()
print('\n{} revision:\n {}\n'.format(string, last_commit))
if not check_clean_status():
print('\nWARNING -> Uncommitted changes:')
print(get_status())
|
def get_bromo_fnames_da(d_em_kHz, d_bg_kHz, a_em_kHz, a_bg_kHz,
ID='1+2+3+4+5+6', t_tot='480', num_p='30', pM='64',
t_step=0.5e-6, D=1.2e-11, dir_=''):
"""Get filenames for donor and acceptor timestamps for the given parameters
"""
clk_p = t_step/32. # with t_step=0.5us -> 156.25 ns
E_sim = 1.*a_em_kHz/(a_em_kHz + d_em_kHz)
FRET_val = 100.*E_sim
print("Simulated FRET value: %.1f%%" % FRET_val)
d_em_kHz_str = "%04d" % d_em_kHz
a_em_kHz_str = "%04d" % a_em_kHz
d_bg_kHz_str = "%04.1f" % d_bg_kHz
a_bg_kHz_str = "%04.1f" % a_bg_kHz
print("D: EM %s BG %s " % (d_em_kHz_str, d_bg_kHz_str))
print("A: EM %s BG %s " % (a_em_kHz_str, a_bg_kHz_str))
fname_d = ('ph_times_{t_tot}s_D{D}_{np}P_{pM}pM_'
'step{ts_us}us_ID{ID}_EM{em}kHz_BG{bg}kHz.npy').format(
em=d_em_kHz_str, bg=d_bg_kHz_str, t_tot=t_tot, pM=pM,
np=num_p, ID=ID, ts_us=t_step*1e6, D=D)
fname_a = ('ph_times_{t_tot}s_D{D}_{np}P_{pM}pM_'
'step{ts_us}us_ID{ID}_EM{em}kHz_BG{bg}kHz.npy').format(
em=a_em_kHz_str, bg=a_bg_kHz_str, t_tot=t_tot, pM=pM,
np=num_p, ID=ID, ts_us=t_step*1e6, D=D)
print(fname_d)
print(fname_a)
name = ('BroSim_E{:.1f}_dBG{:.1f}k_aBG{:.1f}k_'
'dEM{:.0f}k').format(FRET_val, d_bg_kHz, a_bg_kHz, d_em_kHz)
return dir_+fname_d, dir_+fname_a, name, clk_p, E_sim
|
def set_sim_params(self, nparams, attr_params):
"""Store parameters in `params` in `h5file.root.parameters`.
`nparams` (dict)
A dict as returned by `get_params()` in `ParticlesSimulation()`
The format is:
keys:
used as parameter name
values: (2-elements tuple)
first element is the parameter value
second element is a string used as "title" (description)
`attr_params` (dict)
A dict whole items are stored as attributes in '/parameters'
"""
for name, value in nparams.items():
val = value[0] if value[0] is not None else 'none'
self.h5file.create_array('/parameters', name, obj=val,
title=value[1])
for name, value in attr_params.items():
self.h5file.set_node_attr('/parameters', name, value)
|
def numeric_params(self):
"""Return a dict containing all (key, values) stored in '/parameters'
"""
nparams = dict()
for p in self.h5file.root.parameters:
nparams[p.name] = p.read()
return nparams
|
def numeric_params_meta(self):
"""Return a dict with all parameters and metadata in '/parameters'.
This returns the same dict format as returned by get_params() method
in ParticlesSimulation().
"""
nparams = dict()
for p in self.h5file.root.parameters:
nparams[p.name] = (p.read(), p.title)
return nparams
|
def add_trajectory(self, name, overwrite=False, shape=(0,), title='',
chunksize=2**19, comp_filter=default_compression,
atom=tables.Float64Atom(), params=dict(),
chunkslice='bytes'):
"""Add an trajectory array in '/trajectories'.
"""
group = self.h5file.root.trajectories
if name in group:
print("%s already exists ..." % name, end='')
if overwrite:
self.h5file.remove_node(group, name)
print(" deleted.")
else:
print(" old returned.")
return group.get_node(name)
nparams = self.numeric_params
num_t_steps = nparams['t_max'] / nparams['t_step']
chunkshape = self.calc_chunkshape(chunksize, shape, kind=chunkslice)
store_array = self.h5file.create_earray(
group, name, atom=atom,
shape = shape,
chunkshape = chunkshape,
expectedrows = num_t_steps,
filters = comp_filter,
title = title)
# Set the array parameters/attributes
for key, value in params.items():
store_array.set_attr(key, value)
store_array.set_attr('PyBroMo', __version__)
store_array.set_attr('creation_time', current_time())
return store_array
|
def add_emission_tot(self, chunksize=2**19, comp_filter=default_compression,
overwrite=False, params=dict(),
chunkslice='bytes'):
"""Add the `emission_tot` array in '/trajectories'.
"""
kwargs = dict(overwrite=overwrite, chunksize=chunksize, params=params,
comp_filter=comp_filter, atom=tables.Float32Atom(),
title='Summed emission trace of all the particles')
return self.add_trajectory('emission_tot', **kwargs)
|
def add_emission(self, chunksize=2**19, comp_filter=default_compression,
overwrite=False, params=dict(), chunkslice='bytes'):
"""Add the `emission` array in '/trajectories'.
"""
nparams = self.numeric_params
num_particles = nparams['np']
return self.add_trajectory('emission', shape=(num_particles, 0),
overwrite=overwrite, chunksize=chunksize,
comp_filter=comp_filter,
atom=tables.Float32Atom(),
title='Emission trace of each particle',
params=params)
|
def add_position(self, radial=False, chunksize=2**19, chunkslice='bytes',
comp_filter=default_compression, overwrite=False,
params=dict()):
"""Add the `position` array in '/trajectories'.
"""
nparams = self.numeric_params
num_particles = nparams['np']
name, ncoords, prefix = 'position', 3, 'X-Y-Z'
if radial:
name, ncoords, prefix = 'position_rz', 2, 'R-Z'
title = '%s position trace of each particle' % prefix
return self.add_trajectory(name, shape=(num_particles, ncoords, 0),
overwrite=overwrite, chunksize=chunksize,
comp_filter=comp_filter,
atom=tables.Float32Atom(),
title=title,
params=params)
|
def wrap_periodic(a, a1, a2):
"""Folds all the values of `a` outside [a1..a2] inside that interval.
This function is used to apply periodic boundary conditions.
"""
a -= a1
wrapped = np.mod(a, a2 - a1) + a1
return wrapped
|
def wrap_mirror(a, a1, a2):
"""Folds all the values of `a` outside [a1..a2] inside that interval.
This function is used to apply mirror-like boundary conditions.
"""
a[a > a2] = a2 - (a[a > a2] - a2)
a[a < a1] = a1 + (a1 - a[a < a1])
return a
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.