repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
z2n-periodogram | z2n-periodogram-master/z2n/stats.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Other libraries
import click
import numpy as np
from numba import jit
from tqdm import trange
from scipy import optimize
from scipy.stats import norm
import matplotlib.pyplot as plt
@jit(forceobj=True, parallel=True, fastmath=True)
def exposure(series) -> None:
"""
Calculate the period of exposure.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
last = series.time[-1]
first = series.time[0]
series.exposure = last - first
@jit(forceobj=True, parallel=True, fastmath=True)
def sampling(series) -> None:
"""
Calculate the sampling rate.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
series.sampling = (1 / series.exposure)
@jit(nopython=True, parallel=True, fastmath=True)
def phase(times: np.array, freq: float, harm: int) -> np.array:
"""
Calculate the phase values.
Parameters
----------
times : np.array
An array that represents the times.
freq : float
A float that represents the frequency.
harm : int
A int that represents the harmonics.
Returns
-------
values : np.array
An array that represents the phase values.
"""
values = times * freq
values = values - np.floor(values)
values = values * 2 * np.pi * harm
return values
@jit(nopython=True, parallel=True, fastmath=True)
def sine(phases: np.array) -> np.array:
"""
Calculate the sine values.
Parameters
----------
phases : np.array
An array that represents the phase values.
Returns
-------
values : np.array
An array that represents the sine values.
"""
values = np.sin(phases)
return values
@jit(nopython=True, parallel=True, fastmath=True)
def cosine(phases: np.array) -> np.array:
"""
Calculate the cosine values.
Parameters
----------
phases : np.array
An array that represents the phase values.
Returns
-------
values : np.array
An array that represents the cosine values.
"""
values = np.cos(phases)
return values
@jit(nopython=True, parallel=True, fastmath=True)
def summation(values: np.array) -> float:
"""
Calculate the summation value.
Parameters
----------
values : np.array
An array that represents the phase values.
Returns
-------
value : float
A float that represents the summation value.
"""
value = np.sum(values)
return value
@jit(nopython=True, parallel=False, fastmath=True)
def square(value: float) -> float:
"""
Calculate the square values.
Parameters
----------
value : float
A float that represents the summation value.
Returns
-------
value : float
A float that represents the square value.
"""
value = value ** 2
return value
@jit(nopython=True, parallel=False, fastmath=True)
def summ(sin: float, cos: float) -> float:
"""
Calculate the Z2n power value.
Parameters
----------
sin : float
A float that represents the sine value.
cos : float
A float that represents the cosine value.
Returns
-------
value : float
A float that represents the Z2n power.
"""
value = sin + cos
return value
@jit(nopython=True, parallel=False, fastmath=True)
def z2n(times: np.array, freq: float, harm: int) -> float:
"""
Calculate the Z2n power value.
times : np.array
An array that represents the times.
freq : float
A float that represents the frequency.
harm : int
A int that represents the harmonics.
Returns
-------
value : float
A float that represents the Z2n power.
"""
phases = phase(times, freq, harm)
sin = summation(sine(phases))
cos = summation(cosine(phases))
value = summ(square(sin), square(cos))
return value
@jit(nopython=True, parallel=True, fastmath=True)
def normalization(spectrum: np.array, normal: float) -> np.array:
"""
Calculate the normalization values.
Parameters
----------
spectrum : np.array
An array that represents the z2n values.
normal : float
A float that represents the normalization.
Returns
-------
values : np.array
An array that represents the normalized values.
"""
values = spectrum * normal
return values
@jit(nopython=True, parallel=True, fastmath=True)
def harmonics(time: np.array, freq: float, harm: int) -> np.array:
"""
Calculate the Z2n harmonics.
Parameters
----------
series : Series
A time series object.
harm : int
A int that represents the harmonics.
Returns
-------
None
"""
values = np.zeros(harm)
for harmonic in range(harm):
values[harmonic] = z2n(time, freq, harmonic + 1)
value = summation(values)
return value
@jit(forceobj=True, parallel=True, fastmath=True)
def periodogram(series) -> None:
"""
Calculate the Z2n statistics.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
if series.harmonics == 1:
for freq in trange(series.bins.size, desc=click.style(
'Calculating the periodogram', fg='yellow')):
series.z2n[freq] = z2n(
series.time, series.bins[freq], series.harmonics)
else:
for freq in trange(series.bins.size, desc=click.style(
'Calculating the periodogram', fg='yellow')):
series.z2n[freq] = harmonics(
series.time, series.bins[freq], series.harmonics)
series.z2n = normalization(series.z2n, (2 / series.time.size))
@jit(forceobj=True, parallel=True, fastmath=True)
def power(series) -> None:
"""
Calculate the global power.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
series.power = np.max(series.z2n)
@jit(forceobj=True, parallel=True, fastmath=True)
def frequency(series) -> None:
"""
Calculate the global frequency.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
index = np.argmax(series.z2n)
series.frequency = series.bins[index]
@jit(forceobj=True, parallel=True, fastmath=True)
def period(series) -> None:
"""
Calculate the global period.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
series.period = 1 / series.frequency
@jit(forceobj=True, parallel=True, fastmath=True)
def pfraction(series) -> None:
"""
Calculate the pulsed fraction.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
pfrac = (2 * series.power) / series.time.size
series.pulsed = pfrac ** 0.5
@jit(nopython=True, parallel=True, fastmath=True)
def gaussian(x, amplitude, mean, sigma):
"""Returns a Gaussian like function."""
return amplitude * np.exp(-((x - mean) ** 2) / (2 * sigma ** 2))
@jit(forceobj=True, parallel=True, fastmath=True)
def fitcurve(function, bins, powerspec, guess):
"""Fit a input curve function to the data."""
return optimize.curve_fit(function, bins, powerspec, guess)
@jit(forceobj=True, parallel=True, fastmath=True)
def equal(A, B, tol=1e-05):
"""Compare floating point numbers with tolerance."""
S = round(1/tol)
return np.in1d(np.around(A*S).astype(int), np.around(B*S).astype(int))
def error(series) -> None:
"""
Calculate the uncertainty.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
flag = 1
click.secho(
"Select the peak region to estimate uncertainty.", fg='yellow')
while flag:
if click.confirm("Is the peak region selected", prompt_suffix='? '):
try:
axis = plt.gca().get_xlim()
low = np.where(equal(series.bins, axis[0]))[0][0]
up = np.where(equal(series.bins, axis[1]))[0][-1]
bins = series.bins
powerspec = series.z2n
series.bins = series.bins[low:up]
series.z2n = series.z2n[low:up]
mean, sigma = norm.fit(series.bins)
power(series)
frequency(series)
period(series)
pfraction(series)
guess = [series.power, mean, sigma]
popt, _ = fitcurve(gaussian, series.bins, series.z2n, guess)
series.gauss.power = np.absolute(popt[0])
series.gauss.frequency = np.absolute(popt[1])
series.gauss.period = 1 / series.gauss.frequency
series.gauss.errorf = np.absolute(popt[2])
series.gauss.errorp = np.absolute(
(1 / (series.gauss.frequency + series.gauss.errorf))
- series.gauss.period)
pfrac = (2 * series.gauss.power) / series.time.size
series.gauss.pulsed = pfrac ** 0.5
series.gauss.z2n = gaussian(series.bins, *popt)
series.gauss.bins = series.bins
series.bins = bins
series.z2n = powerspec
flag = 0
except IndexError:
click.secho("Error on the selection.", fg='red')
| 9,648 | 22.824691 | 76 | py |
z2n-periodogram | z2n-periodogram-master/z2n/prompt.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Generic/Built-in
import psutil
import shelve
import pathlib
import threading
# Other Libraries
import click
import numpy as np
from click_shell import shell
import matplotlib.pyplot as mplt
# Owned Libraries
from z2n import file
from z2n import stats
from z2n import __docs__
from z2n import __version__
from z2n.plot import Plot
from z2n.series import Series
data = Series()
figure = Plot()
__z2n__ = f'''
Z2n Software ({__version__}), a python package for periodograms analysis.
Copyright (C) 2020, and MIT License, by Yohan Alexander [UFS].
Type "help" for more information or "docs" for documentation.
'''
__plt__ = f'''
Interactive plotting window of the Z2n Software ({__version__}).
Type "help" for more information.
'''
@click.version_option(prog_name='Z2n Software', version=__version__)
@click.option('--docs', 'docs_', is_flag=True, help='Open the documentation and exit.')
@click.option('--title', 'title_', type=str, help='Title of the image file.')
@click.option(
'--ylabel', 'ylabel_', type=str, show_default=True,
help='Y label of the image file.', default='Power')
@click.option(
'--xlabel', 'xlabel_', type=str, show_default=True,
help='X label of the image file.', default='Frequency (Hz)')
@click.option(
'--image', type=click.Choice(['png', 'pdf', 'ps', 'eps']),
help='Format of the image file.', default='ps', show_default=True)
@click.option(
'--format', 'format_', type=click.Choice(['ascii', 'csv', 'fits', 'hdf5']),
help='Format of the output file.', default='fits', show_default=True)
@click.option(
'--ext', type=int, help='FITS extension number.', default=1, show_default=True)
@click.option(
'--harm', type=int, help='Number of harmonics.', default=1, show_default=True)
@click.option(
'--over', type=int, help='Oversample factor instead of steps.')
@click.option(
'--delta', type=float, help='Frequency steps on the spectrum (Hz).')
@click.option(
'--fmax', type=float, help='Maximum frequency on the spectrum (Hz).')
@click.option(
'--fmin', type=float, help='Minimum frequency on the spectrum (Hz).')
@click.option('--output', 'output_', type=click.Path(), help='Name of the output file.')
@click.option(
'--input', 'input_', type=click.Path(exists=True), help='Name of the input file.')
@shell(prompt=click.style('(z2n) >>> ', fg='blue', bold=True), intro=__z2n__)
def z2n(input_, output_, format_, fmin, fmax, delta, over,
harm, ext, image, title_, xlabel_, ylabel_, docs_):
"""
This program allows the user to calculate periodograms, given a time series,
using the Z2n statistics a la Buccheri et al. 1983.
The standard Z2n statistics calculates the phase of each arrival time and
the corresponding sinusoidal functions for each time. Be advised that this
is very computationally expensive if the number of frequency bins is high.
"""
mutex = threading.Lock()
mutex.acquire()
with shelve.open(f'{pathlib.Path.home()}/.z2n') as database:
if docs_:
click.launch(__docs__)
click.echo(f"To read the documentation go to {__docs__}")
exit()
if input_:
data.harmonics = harm
data.input = input_
default = "z2n_" + pathlib.Path(data.input).stem
if output_:
data.output = output_
else:
data.output = default
data.format = format_
if not file.load_file(data, ext):
click.secho('Event file loaded.', fg='green')
data.set_exposure()
data.set_sampling()
data.set_nyquist()
data.get_time()
data.get_exposure()
data.get_sampling()
data.get_nyquist()
if not fmin:
data.fmin = data.nyquist
else:
data.fmin = fmin
if not fmax:
data.set_fmax()
else:
data.fmax = fmax
if not delta and not over:
if click.confirm(
"Use oversampling factor", True, prompt_suffix='? '):
data.set_oversample()
data.delta = 1 / (data.oversample * data.exposure)
else:
data.set_delta()
else:
if delta:
data.delta = delta
if over:
data.oversample = over
data.delta = 1 / (data.oversample * data.exposure)
data.get_fmin()
data.get_fmax()
data.get_delta()
data.get_harmonics()
block = (data.fmax - data.fmin) / np.array(data.delta)
nbytes = np.array(data.delta).dtype.itemsize * block
click.secho(
f"Computation memory {nbytes* 10e-6:.5f} MB", fg='yellow')
if nbytes < psutil.virtual_memory()[1]:
data.bins = np.arange(data.fmin, data.fmax, data.delta)
data.get_bins()
data.time = np.array(data.time)
data.bins = np.array(data.bins)
data.z2n = np.zeros(data.bins.size)
stats.periodogram(data)
click.secho('Periodogram calculated.', fg='green')
click.secho(
"Values based on the global maximum.", fg='yellow')
data.set_power()
data.set_frequency()
data.set_period()
data.set_pfraction()
data.get_power()
data.get_frequency()
data.get_period()
data.get_pfraction()
flag = 1
while flag:
if pathlib.Path(f"{data.output}.{data.format}").is_file():
click.secho("File already exists.", fg='red')
data.output = click.prompt(
"Name of the file", default, type=click.Path())
else:
flag = 0
if data.format == 'ascii':
file.save_ascii(data)
elif data.format == 'csv':
file.save_csv(data)
elif data.format == 'fits':
file.save_fits(data)
elif data.format == 'hdf5':
file.save_hdf5(data)
click.secho(
f"File saved at {data.output}.{data.format}", fg='green')
flag = 1
while flag:
if pathlib.Path(f"{data.output}.{image}").is_file():
click.secho("Image already exists.", fg='red')
data.output = click.prompt(
"Name of the image", default, type=click.Path())
else:
flag = 0
mplt.plot(
data.bins, data.z2n, label='Z2n Power', linewidth=2)
mplt.title(title_)
mplt.xlabel(xlabel_)
mplt.ylabel(ylabel_)
mplt.legend(loc='best')
mplt.tight_layout()
if image == 'png':
mplt.savefig(f'{data.output}.{image}', format=image)
elif image == 'pdf':
mplt.savefig(f'{data.output}.{image}', format=image)
elif image == 'ps':
mplt.savefig(f'{data.output}.{image}', format=image)
elif image == 'eps':
mplt.savefig(f'{data.output}.{image}', format=image)
click.secho(
f"Image saved at {data.output}.{image}", fg='green')
else:
click.secho("Not enough memory available.", fg='red')
exit()
else:
try:
figure.data.input = database['input']
figure.data.fmin = database['fmin']
figure.data.fmax = database['fmax']
figure.data.delta = database['delta']
figure.data.oversample = database['oversample']
except KeyError:
pass
click.echo(__z2n__)
if figure.plot_periodogram():
figure.plot_figure()
database['input'] = figure.data.input
database['fmin'] = figure.data.fmin
database['fmax'] = figure.data.fmax
database['delta'] = figure.data.delta
database['oversample'] = figure.data.oversample
mutex.release()
@z2n.command()
def docs() -> None:
"""Open the documentation on the software."""
click.launch(__docs__)
click.echo(f"To read the documentation go to {__docs__}")
@z2n.command()
def plot() -> None:
"""Open the interactive plotting window."""
if figure.data.z2n.size == 0:
click.secho("The periodogram was not calculated yet.", fg='yellow')
else:
figure.plot_figure()
plt()
@z2n.command()
def run() -> None:
"""Calculate the Z2n Statistics."""
if figure.plot_periodogram():
figure.plot_figure()
@z2n.command()
def gauss() -> None:
"""Select the fit of a gaussian curve."""
if figure.data.z2n.size == 0:
click.secho("The periodogram was not calculated yet.", fg='yellow')
else:
figure.data.plot()
@z2n.command()
def save() -> None:
"""Save the periodogram on a file."""
if figure.data.z2n.size == 0:
click.secho("The periodogram was not calculated yet.", fg='yellow')
else:
figure.data.save_file()
@shell(prompt=click.style('(plt) >>> ', fg='magenta', bold=True), intro=__plt__)
def plt() -> None:
"""Open the interactive periodogram plotting window."""
@plt.command()
def title() -> None:
"""Change the title on the figure."""
figure.change_title()
@plt.command()
def xlabel() -> None:
"""Change the label on the x axis."""
figure.change_xlabel()
@plt.command()
def xscale() -> None:
"""Change the scale on the x axis."""
figure.change_xscale()
@plt.command()
def xlim() -> None:
"""Change the limites on the x axis."""
figure.change_xlim()
@plt.command()
def ylabel() -> None:
"""Change the label on the y axis."""
figure.change_ylabel()
@plt.command()
def yscale() -> None:
"""Change the scale on the y axis."""
figure.change_yscale()
@plt.command()
def ylim() -> None:
"""Change the limites on the y axis."""
figure.change_ylim()
@plt.command()
def save() -> None:
"""Save the image on a file."""
figure.save_image()
| 11,094 | 34.790323 | 88 | py |
z2n-periodogram | z2n-periodogram-master/z2n/file.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Generic/Built-in
import pathlib
# Other Libraries
import click
import numpy as np
from astropy.io import fits
from astropy.table import Table
def load_file(series, ext) -> int:
"""
Open file and store time series.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
flag = 0
suffix = pathlib.Path(series.input).suffix
if suffix in ("", ".txt"):
flag = load_ascii(series)
elif suffix in (".csv", ".ecsv"):
flag = load_csv(series)
elif suffix in (".hdf", ".h5", ".hdf5", ".he5"):
flag = load_hdf5(series)
else:
with fits.open(series.input) as events:
try:
events['Z2N']
click.secho('Z2N extension already found', fg='yellow')
if click.confirm('Use the periodogram', prompt_suffix='? '):
series.bins = events['Z2N'].data['FREQUENCY']
series.bins = series.bins.astype(series.bins.dtype.name)
series.z2n = events['Z2N'].data['POWER']
series.z2n = series.z2n.astype(series.z2n.dtype.name)
hdr = events['Z2N'].header
series.exposure = float(hdr['exposure'])
series.sampling = float(hdr['sampling'])
series.nyquist = float(hdr['nyquist'])
series.harmonics = int(hdr['harmonic'])
series.fmin = float(hdr['fmin'])
series.fmax = float(hdr['fmax'])
series.delta = float(hdr['delta'])
series.frequency = float(hdr['peak'])
series.period = float(hdr['period'])
series.power = float(hdr['power'])
series.pulsed = float(hdr['pulsed'])
click.secho(f"{hdr['events']} events.", fg='cyan')
series.get_exposure()
series.get_sampling()
series.get_nyquist()
series.get_fmin()
series.get_fmax()
series.get_delta()
series.get_bins()
series.get_harmonics()
series.get_frequency()
series.get_period()
series.get_power()
series.get_pfraction()
series.set_gauss()
series.set_bak()
load_fits(series, ext)
flag = 1
else:
flag = load_fits(series, ext)
except KeyError:
flag = load_fits(series, ext)
return flag
def load_ascii(series) -> int:
"""
Open ascii file and store time series.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
flag = 1
table = Table.read(series.input, format='ascii')
try:
series.time = table['TIME'].data
series.time = series.time.astype(series.time.dtype.name)
flag = 0
except (KeyError, TypeError, IndexError):
click.clear()
column = 'TIME'
flag = 1
while flag:
try:
table.pprint()
click.secho(f"Column {column} not found.", fg='red')
column = click.prompt(
"Which column name", type=str, prompt_suffix='? ')
series.time = table[column].data
series.time = series.time.astype(series.time.dtype.name)
if click.confirm(f"Use column {column}", prompt_suffix='? '):
flag = 0
else:
click.clear()
except (KeyError, TypeError, IndexError):
click.clear()
return flag
def load_csv(series) -> int:
"""
Open csv file and store time series.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
flag = 1
table = Table.read(series.input, format='csv')
try:
series.time = table['TIME'].data
series.time = series.time.astype(series.time.dtype.name)
flag = 0
except (KeyError, TypeError, IndexError):
click.clear()
column = 'TIME'
flag = 1
while flag:
try:
table.pprint()
click.secho(f"Column {column} not found.", fg='red')
column = click.prompt(
"Which column name", type=str, prompt_suffix='? ')
series.time = table[column].data
series.time = series.time.astype(series.time.dtype.name)
if click.confirm(f"Use column {column}", prompt_suffix='? '):
flag = 0
else:
click.clear()
except (KeyError, TypeError, IndexError):
click.clear()
return flag
def load_fits(series, ext) -> None:
"""
Open fits file and store time series.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
flag = 0
times = 0
columns = ['TIME', 'time']
extensions = []
with fits.open(series.input) as events:
if ext:
try:
series.time = events[ext].data['TIME']
series.time = series.time.astype(series.time.dtype.name)
click.secho(
f"Column TIME in {events[ext].name}.", fg='yellow')
flag = 0
except (KeyError, TypeError):
flag = 1
click.secho(
f"Column TIME not found in {events[ext].name}.", fg='red')
except IndexError:
flag = 1
click.secho(
f"Extension number {ext} not found.", fg='red')
else:
for hdu in range(1, len(events)):
try:
if any(column in events[hdu].columns.names for column in columns):
extensions.append(hdu)
times += 1
except AttributeError:
pass
if times == 1:
click.secho(
f"Column TIME in {events[extensions[0]].name}.", fg='yellow')
series.time = events[extensions[0]].data['TIME']
series.time = series.time.astype(series.time.dtype.name)
flag = 0
elif times > 1:
click.secho("Multiple columns TIME found.", fg='yellow')
flag = 1
while flag:
table = Table(
names=('Number', 'Extension', 'Length (Rows)'),
dtype=('int64', 'str', 'int64'))
for value in extensions:
table.add_row([
value,
events[value].name,
events[value].data['TIME'].size])
table.pprint()
number = click.prompt(
"Which extension number", type=int, prompt_suffix='? ')
try:
if click.confirm(f"Use column in {events[number].name}"):
series.time = events[number].data['TIME']
series.time = series.time.astype(
series.time.dtype.name)
flag = 0
break
else:
flag = 1
click.clear()
except (KeyError, TypeError):
flag = 1
click.clear()
click.secho(
f"Column TIME not found in {events[number].name}.",
fg='red')
except IndexError:
flag = 1
click.clear()
click.secho(
f"Extension number {number} not found.", fg='red')
else:
click.clear()
column = 'TIME'
flag = 1
while flag:
hdu = 1
while hdu < len(events):
table = Table(events[hdu].data)
table.pprint()
click.secho(
f"Column {column} not found in extension.", fg='red')
click.secho(
f"Extension {events[hdu].name}.", fg='yellow')
if click.confirm("Use extension [y] or go to next [n]"):
try:
column = click.prompt(
"Which column name", type=str, prompt_suffix='? ')
series.time = events[hdu].data[column]
series.time = series.time.astype(
series.time.dtype.name)
if click.confirm(
f"Use column {column}", prompt_suffix='? '):
flag = 0
break
else:
flag = 1
click.clear()
except (KeyError, TypeError):
flag = 1
click.clear()
except IndexError:
flag = 1
click.clear()
click.secho(
f"Extension number {hdu} not found.", fg='red')
else:
flag = 1
hdu += 1
click.clear()
return flag
def load_hdf5(series) -> int:
"""
Open hdf5 file and store time series.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
flag = 1
table = Table.read(series.input, format='hdf5')
try:
series.time = table['TIME'].data
series.time = series.time.astype(series.time.dtype.name)
flag = 0
except (KeyError, TypeError, IndexError):
click.clear()
column = 'TIME'
flag = 1
while flag:
try:
table.pprint()
click.secho(f"Column {column} not found.", fg='red')
column = click.prompt(
"Which column name", type=str, prompt_suffix='? ')
series.time = table[column].data
series.time = series.time.astype(series.time.dtype.name)
if click.confirm(f"Use column {column}", prompt_suffix='? '):
flag = 0
else:
click.clear()
except (KeyError, TypeError, IndexError):
click.clear()
return flag
def save_ascii(series) -> None:
"""
Save the periodogram to ascii file.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
array = np.column_stack((series.bins, series.z2n))
table = Table(array, names=('FREQUENCY', 'POWER'))
table.write(f'{series.output}.txt', format='ascii')
def save_csv(series) -> None:
"""
Save the periodogram to csv file.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
array = np.column_stack((series.bins, series.z2n))
table = Table(array, names=('FREQUENCY', 'POWER'))
table.write(f'{series.output}.csv', format='csv')
def save_fits(series) -> None:
"""
Save the periodogram to fits file.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
suffix = pathlib.Path(series.input).suffix
if suffix in ("", ".txt", ".csv", ".ecsv", ".hdf", ".h5", ".hdf5", ".he5"):
primary_hdu = fits.PrimaryHDU()
bins = fits.Column(
name='FREQUENCY', array=series.bins, format='D', unit='Hz')
z2n = fits.Column(name='POWER', array=series.z2n, format='D')
hdr = fits.Header()
hdr['EXTNAME'] = 'Z2N'
hdr.comments['EXTNAME'] = 'Name of this extension'
hdr['HDUNAME'] = 'Z2N'
hdr.comments['HDUNAME'] = 'Name of the hdu'
hdr['events'] = f'{series.time.size}'
hdr.comments['events'] = 'Number of events'
hdr['exposure'] = f'{series.exposure}'
hdr.comments['exposure'] = 'Exposure time (Texp)'
hdr['sampling'] = f'{series.sampling}'
hdr.comments['sampling'] = 'Sampling rate (1/Texp)'
hdr['nyquist'] = f'{series.nyquist}'
hdr.comments['nyquist'] = 'Nyquist 2*(1/Texp)'
hdr['harmonic'] = f'{series.harmonics}'
hdr.comments['harmonic'] = 'Number of harmonics'
hdr['steps'] = f'{series.z2n.size}'
hdr.comments['steps'] = 'Number of steps'
hdr['fmin'] = f'{series.fmin}'
hdr.comments['fmin'] = 'Minimum frequency'
hdr['fmax'] = f'{series.fmax}'
hdr.comments['fmax'] = 'Maximum frequency'
hdr['delta'] = f'{series.delta}'
hdr.comments['delta'] = 'Frequency steps'
hdr['peak'] = f'{series.frequency}'
hdr.comments['peak'] = 'Global peak frequency'
hdr['period'] = f'{series.period}'
hdr.comments['period'] = 'Global peak period'
hdr['power'] = f'{series.power}'
hdr.comments['power'] = 'Global peak power'
hdr['pulsed'] = f'{series.pulsed}'
hdr.comments['pulsed'] = 'Global pulsed fraction'
try:
hdr['gpeak'] = f'{series.gauss.frequency}'
hdr.comments['gpeak'] = 'Gauss peak frequency'
hdr['gperiod'] = f'{series.gauss.period}'
hdr.comments['gperiod'] = 'Gauss peak period'
hdr['gpower'] = f'{series.gauss.power}'
hdr.comments['gpower'] = 'Gauss peak power'
hdr['gpulsed'] = f'{series.gauss.pulsed}'
hdr.comments['gpulsed'] = 'Gauss pulsed fraction'
except AttributeError:
pass
table_hdu = fits.BinTableHDU.from_columns([bins, z2n], header=hdr)
hdul = fits.HDUList([primary_hdu, table_hdu])
hdul.writeto(f'{series.output}.fits')
else:
with fits.open(series.input) as events:
bins = fits.Column(
name='FREQUENCY', array=series.bins, format='D', unit='Hz')
z2n = fits.Column(name='POWER', array=series.z2n, format='D')
hdr = fits.Header()
hdr['EXTNAME'] = 'Z2N'
hdr.comments['EXTNAME'] = 'Name of this extension'
hdr['HDUNAME'] = 'Z2N'
hdr.comments['HDUNAME'] = 'Name of the hdu'
hdr['events'] = f'{series.time.size}'
hdr.comments['events'] = 'Number of events'
hdr['exposure'] = f'{series.exposure}'
hdr.comments['exposure'] = 'Exposure time (Texp)'
hdr['sampling'] = f'{series.sampling}'
hdr.comments['sampling'] = 'Sampling rate (1/Texp)'
hdr['nyquist'] = f'{series.nyquist}'
hdr.comments['nyquist'] = 'Nyquist 2*(1/Texp)'
hdr['harmonic'] = f'{series.harmonics}'
hdr.comments['harmonic'] = 'Number of harmonics'
hdr['steps'] = f'{series.z2n.size}'
hdr.comments['steps'] = 'Number of steps'
hdr['fmin'] = f'{series.fmin}'
hdr.comments['fmin'] = 'Minimum frequency'
hdr['fmax'] = f'{series.fmax}'
hdr.comments['fmax'] = 'Maximum frequency'
hdr['delta'] = f'{series.delta}'
hdr.comments['delta'] = 'Frequency steps'
hdr['peak'] = f'{series.frequency}'
hdr.comments['peak'] = 'Global peak frequency'
hdr['period'] = f'{series.period}'
hdr.comments['period'] = 'Global peak period'
hdr['power'] = f'{series.power}'
hdr.comments['power'] = 'Global peak power'
hdr['pulsed'] = f'{series.pulsed}'
hdr.comments['pulsed'] = 'Global pulsed fraction'
try:
hdr['gpeak'] = f'{series.gauss.frequency}'
hdr.comments['gpeak'] = 'Gauss peak frequency'
hdr['gperiod'] = f'{series.gauss.period}'
hdr.comments['gperiod'] = 'Gauss peak period'
hdr['gpower'] = f'{series.gauss.power}'
hdr.comments['gpower'] = 'Gauss peak power'
hdr['gpulsed'] = f'{series.gauss.pulsed}'
hdr.comments['gpulsed'] = 'Gauss pulsed fraction'
except AttributeError:
pass
hdu = fits.BinTableHDU.from_columns([bins, z2n], header=hdr)
events.append(hdu)
events.writeto(f'{series.output}.fits')
def save_hdf5(series) -> None:
"""
Save the periodogram to hdf5 file.
Parameters
----------
series : Series
A time series object.
Returns
-------
None
"""
array = np.column_stack((series.bins, series.z2n))
table = Table(array, names=('FREQUENCY', 'POWER'))
table.write(f'{series.output}.hdf5', path='z2n',
format='hdf5', compression=True)
| 17,730 | 34.820202 | 86 | py |
z2n-periodogram | z2n-periodogram-master/z2n/plot.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Generic/Built-in
import psutil
import pathlib
# Other Libraries
import click
import numpy as np
import matplotlib.pyplot as plt
# Owned Libraries
from z2n import stats
from z2n.series import Series
class Plot:
"""
A class to represent the plot of a time series.
Attributes
----------
* `input : str`
> A string that represents the input file path.
* `output : str`
> A string that represents the output file name.
* `format : str`
> A string that represents the file format.
* `back : int`
> A integer for the state of the background.
* `data : Series`
> A series object that represents the data.
* `noise : Series`
> A series object that represents the background.
Methods
-------
"""
def __init__(self) -> None:
self.back = 0
self.input = ""
self.output = ""
self.format = ""
self.data = Series()
self.noise = Series()
self.figure, self.axes = ((), ())
def get_input(self) -> str:
"""Return the input image name."""
click.secho(
f"Name of the file: {self.input}", self.data.input, fg='cyan')
return self.input
def set_input(self) -> None:
"""Change the input image name."""
self.input = click.prompt("\nFilename", type=click.Path(exists=True))
def get_output(self) -> str:
"""Return the output image name."""
click.secho(f"Name of the image: {self.output}", fg='cyan')
return self.output
def set_output(self) -> None:
"""Change the output image name."""
default = "z2n_" + pathlib.Path(self.data.input).stem
flag = 1
while flag:
self.output = click.prompt(
"\nName of the image", default, type=click.Path())
if pathlib.Path(f"{self.output}.{self.format}").is_file():
click.secho("File already exists.", fg='red')
else:
flag = 0
def get_format(self) -> str:
"""Return the image format."""
click.secho(f"File format: {self.format}", fg='cyan')
return self.format
def set_format(self) -> None:
"""Change the image format."""
self.format = click.prompt(
"\nFormat", "ps", type=click.Choice(['png', 'pdf', 'ps', 'eps']))
def add_background(self) -> None:
"""Add background on the plot."""
self.back = 1
click.secho("Background file added.", fg='green')
def rm_background(self) -> None:
"""Remove background on the plot."""
self.back = 0
del self.noise
self.noise = Series()
click.secho("Background file removed.", fg='green')
def plot_figure(self) -> None:
"""Create the figure on the plotting window."""
plt.close()
plt.ion()
if not self.back:
self.figure, self.axes = plt.subplots(self.back + 1)
self.axes.plot(
self.data.bins, self.data.z2n, label='Z2n Power',
color='tab:blue', linewidth=2)
try:
self.axes.plot(
self.data.gauss.bins, self.data.gauss.z2n,
color='tab:red', label='Gaussian Fit', linewidth=1)
except AttributeError:
pass
self.axes.set_xlabel('Frequency (Hz)')
self.axes.set_ylabel('Power')
self.axes.legend(loc='best')
else:
self.figure, self.axes = plt.subplots(
self.back + 1, sharex=True, sharey=True)
self.axes[0].plot(
self.data.bins, self.data.z2n, label='Z2n Power',
color='tab:blue', linewidth=2)
try:
self.axes[0].plot(
self.data.gauss.bins, self.data.gauss.z2n,
color='tab:red', label='Gaussian Fit', linewidth=1)
except AttributeError:
pass
self.axes[1].plot(
self.noise.bins, self.noise.z2n, color='tab:cyan',
label='Background')
self.axes[0].set_xlabel('Frequency (Hz)')
self.axes[0].set_ylabel('Power')
self.axes[1].set_xlabel('Frequency (Hz)')
self.axes[1].set_ylabel('Power')
self.axes[0].legend(loc='best')
self.axes[1].legend(loc='best')
plt.tight_layout()
def plot_background(self) -> int:
"""Create subplot of the background."""
flag = 0
click.secho("The background file is needed.", fg='yellow')
if not self.noise.set_time():
self.noise.bins = np.array(self.data.bins)
self.noise.harmonics = self.data.harmonics
plt.close()
self.noise.z2n = np.zeros(self.noise.bins.size)
stats.periodogram(self.noise)
self.add_background()
self.plot_figure()
else:
flag = 1
return flag
def plot_periodogram(self) -> int:
"""Create plot of the periodogram."""
flag = 0
if not self.data.z2n.size:
click.secho("The event file is needed.", fg='yellow')
if not self.data.set_time():
if not self.data.set_bins():
plt.close()
self.data.set_periodogram()
self.data.plot()
self.plot_figure()
self.save_image()
if click.confirm("Add background file", prompt_suffix='? '):
self.plot_background()
else:
flag = 1
else:
flag = 1
else:
opt = click.prompt(
"Change file [1] or frequency range [2]", type=int)
if opt in (1, 2):
self.rm_background()
if opt == 1:
click.secho("The event file is needed.", fg='yellow')
if not self.data.set_time():
if not self.data.set_bins():
plt.close()
self.data.set_periodogram()
self.data.plot()
self.plot_figure()
self.save_image()
if click.confirm("Add background file", prompt_suffix='? '):
self.plot_background()
else:
flag = 1
else:
flag = 1
elif opt == 2:
if not click.confirm(
"Select region on the interactive plot"):
if not self.data.set_bins():
plt.close()
self.data.set_periodogram()
self.data.plot()
self.plot_figure()
self.save_image()
if click.confirm("Add background file", prompt_suffix='? '):
self.plot_background()
else:
flag = 1
else:
flag2 = 1
click.secho(
"The frequency range is needed (Hz).", fg='yellow')
self.plot_figure()
while flag2:
if click.confirm(
"Is the region selected", prompt_suffix='? '):
axis = plt.gca().get_xlim()
self.data.fmin = axis[0]
self.data.fmax = axis[1]
if click.confirm(
"Use oversampling factor",
True, prompt_suffix='? '):
self.data.set_oversample()
self.data.delta = 1 / \
(self.data.oversample * self.data.exposure)
else:
self.data.set_delta()
self.data.get_fmin()
self.data.get_fmax()
self.data.get_delta()
self.data.set_harmonics()
block = (self.data.fmax - self.data.fmin) / \
np.array(self.data.delta)
nbytes = np.array(
self.data.delta).dtype.itemsize * block
click.secho(
f"Computation memory {nbytes* 10e-6:.5f} MB",
fg='yellow')
if click.confirm(
"Run with these values",
True, prompt_suffix='? '):
if nbytes < psutil.virtual_memory()[1]:
self.data.bins = np.arange(
self.data.fmin, self.data.fmax,
self.data.delta)
self.data.get_bins()
plt.close()
self.data.set_periodogram()
self.data.plot()
self.plot_figure()
self.save_image()
if click.confirm(
"Add background file",
prompt_suffix='? '):
self.plot_background()
flag = 0
flag2 = 0
else:
flag2 = 1
flag = 1
click.secho(
"Not enough memory available.", fg='red')
else:
flag2 = 1
flag = 1
click.secho(
"The frequency range is needed (Hz).",
fg='yellow')
self.plot_figure()
else:
flag = 1
click.secho("Select '1' or '2'.", fg='red')
return flag
def save_image(self) -> None:
"""Save the image on a file."""
plt.tight_layout()
click.secho("Save the periodogram on a image.", fg='yellow')
self.set_format()
if self.format == 'png':
self.set_output()
plt.savefig(f'{self.output}.{self.format}', format=self.format)
click.secho(
f"Image saved at {self.output}.{self.format}", fg='green')
elif self.format == 'pdf':
self.set_output()
plt.savefig(f'{self.output}.{self.format}', format=self.format)
click.secho(
f"Image saved at {self.output}.{self.format}", fg='green')
elif self.format == 'ps':
self.set_output()
plt.savefig(f'{self.output}.{self.format}', format=self.format)
click.secho(
f"Image saved at {self.output}.{self.format}", fg='green')
elif self.format == 'eps':
self.set_output()
plt.savefig(f'{self.output}.{self.format}', format=self.format)
click.secho(
f"Image saved at {self.output}.{self.format}", fg='green')
else:
click.secho(f"{self.format} format not supported.", fg='red')
def change_title(self) -> None:
"""Change the title on the figure."""
if self.back:
self.figure.suptitle(click.prompt(
"Which title", "Z2n Periodogram"))
click.secho("Changed title.", fg='green')
else:
self.axes.set_title(click.prompt("Which title", "Z2n Periodogram"))
click.secho("Changed title.", fg='green')
def change_xlabel(self) -> None:
"""Change the label on the x axis."""
if self.back:
xlabel = click.prompt("Which label", "Frequency (Hz)")
self.axes[0].set_xlabel(xlabel)
self.axes[1].set_xlabel(xlabel)
click.secho("Changed X axis label.", fg='green')
else:
self.axes.set_xlabel(click.prompt("Which label", "Frequency (Hz)"))
click.secho("Changed X axis label.", fg='green')
def change_xscale(self) -> None:
"""Change the scale on the x axis."""
if self.back:
self.axes[0].set_xscale(click.prompt(
"Which scale [linear, log]", "linear"))
click.secho("Changed X axis scale.", fg='green')
else:
self.axes.set_xscale(click.prompt(
"Which scale [linear, log]", "linear"))
click.secho("Changed X axis scale.", fg='green')
def change_xlim(self) -> None:
"""Change the limites on the x axis."""
if self.back:
low = click.prompt("Which lower limit", type=float)
up = click.prompt("Which upper limit", type=float)
self.axes[0].set_xlim([low, up])
click.secho("Changed X axis limits.", fg='green')
else:
low = click.prompt("Which lower limit", type=float)
up = click.prompt("Which upper limit", type=float)
self.axes.set_xlim([low, up])
click.secho("Changed X axis limits.", fg='green')
def change_ylabel(self) -> None:
"""Change the label on the y axis."""
if self.back:
ylabel = click.prompt("Which label", "Power")
self.axes[0].set_ylabel(ylabel)
self.axes[1].set_ylabel(ylabel)
click.secho("Changed y axis label.", fg='green')
else:
self.axes.set_ylabel(click.prompt("Which label", "Power"))
click.secho("Changed y axis label.", fg='green')
def change_yscale(self) -> None:
"""Change the scale on the y axis."""
if self.back:
self.axes[0].set_yscale(click.prompt(
"Which scale [linear, log]", "linear"))
click.secho("Changed y axis scale.", fg='green')
else:
self.axes.set_yscale(click.prompt(
"Which scale [linear, log]", "linear"))
click.secho("Changed y axis scale.", fg='green')
def change_ylim(self) -> None:
"""Change the limites on the y axis."""
if self.back:
low = click.prompt("Which lower limit", type=float)
up = click.prompt("Which upper limit", type=float)
self.axes[0].set_ylim([low, up])
click.secho("Changed y axis limits.", fg='green')
else:
low = click.prompt("Which lower limit", type=float)
up = click.prompt("Which upper limit", type=float)
self.axes.set_ylim([low, up])
click.secho("Changed y axis limits.", fg='green')
| 15,571 | 39.978947 | 88 | py |
z2n-periodogram | z2n-periodogram-master/z2n/__init__.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
__version__ = '2.0.6'
__license__ = 'MIT'
__author__ = 'Yohan Alexander'
__copyright__ = 'Copyright (C) 2020, Z2n Software, by Yohan Alexander.'
__description__ = 'A package for interative periodograms analysis.'
__maintainer__ = 'Yohan Alexander'
__email__ = '[email protected]'
__status__ = 'Development'
__credits__ = 'Yohan Alexander'
__docs__ = 'https://z2n-periodogram.readthedocs.io'
__url__ = 'https://github.com/yohanalexander/z2n-periodogram'
| 501 | 32.466667 | 71 | py |
z2n-periodogram | z2n-periodogram-master/z2n/series.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
# Generic/Built-in
import sys
import copy
import psutil
import pathlib
import tempfile
# Other Libraries
import h5py
import click
import termtables
import numpy as np
import matplotlib.pyplot as plt
# Owned Libraries
from z2n import file
from z2n import stats
class Series:
"""
A class to represent a time series object.
Attributes
----------
* `bak : str`
> A string that represents the backup file path.
* `gauss : str`
> A series object that represents the gaussian fit.
* `input : str`
> A string that represents the input file path.
* `output : str`
> A string that represents the output file name.
* `format : str`
> A string that represents the file format.
* `time : np.array`
> An arrray that represents the time series.
* `bins : np.array`
> An arrray that represents the frequency bins.
* `z2n : np.array`
> An arrray that represents the periodogram.
* `harmonics : int`
> An integer that represents the number of harmonics.
* `oversample : int`
> An integer that represents the oversample factor.
* `fmin : float`
> A float that represents the minimum frequency.
* `fmax : float`
> A float that represents the maximum frequency.
* `delta : float`
> A float that represents the frequency steps.
* `nyquist : float`
> A float that represents the nyquist frequency.
* `exposure : float`
> A float that represents the exposure period.
* `sampling : float`
> A float that represents the sampling rate.
* `power : float`
> A float that represents the peak power.
* `frequency : float`
> A float that represents the peak frequency.
* `period : float`
> A float that represents the peak period.
* `errorf : float`
> A float that represents the frequency uncertainty.
* `errorp : float`
> A float that represents the period uncertainty.
* `pulsed : float`
> A float that represents the pulsed fraction.
Methods
-------
"""
def __init__(self) -> None:
self.bak = ""
self.gauss = ""
self.input = ""
self.output = ""
self.format = ""
self.time = np.array([])
self.bins = np.array([])
self.z2n = np.array([])
self.fmin = 0
self.fmax = 0
self.delta = 0
self.nyquist = 0
self.harmonics = 0
self.oversample = 0
self.exposure = 0
self.sampling = 0
self.power = 0
self.frequency = 0
self.errorf = 0
self.period = 0
self.errorp = 0
self.pulsed = 0
def get_gauss(self) -> str:
"""Return the gaussian series object."""
return self.gauss
def set_gauss(self) -> None:
"""Copy the gaussian series object."""
self.gauss = copy.deepcopy(self)
def get_bak(self) -> str:
"""Return the backup file path."""
click.secho(f"Path of the backup: {self.bak}", fg='cyan')
return self.bak
def set_bak(self) -> None:
"""Change the backup file path."""
self.bak = tempfile.NamedTemporaryFile(
suffix='.z2n', delete=False).name
self.bak = h5py.File(self.bak, 'a')
self.bak.create_dataset('TIME', data=self.time, compression='lzf')
self.bak.create_dataset('BINS', data=self.bins, compression='lzf')
self.bak.create_dataset('Z2N', data=self.z2n, compression='lzf')
del self.time
del self.bins
del self.z2n
self.time = self.bak['TIME']
self.bins = self.bak['BINS']
self.z2n = self.bak['Z2N']
def get_input(self) -> str:
"""Return the input file path."""
click.secho(f"Event file: {self.input}", fg='cyan')
return self.input
def set_input(self) -> None:
"""Change the input file path."""
self.input = click.prompt(
"\nFilename", self.input, type=click.Path(exists=True))
def get_output(self) -> str:
"""Return the output file name."""
click.secho(f"Output file: {self.output}", fg='cyan')
return self.output
def set_output(self) -> None:
"""Change the output file name."""
default = "z2n_" + pathlib.Path(self.input).stem
flag = 1
while flag:
self.output = click.prompt(
"\nName of the file", default, type=click.Path())
if pathlib.Path(f"{self.output}.{self.format}").is_file():
click.secho("File already exists.", fg='red')
else:
flag = 0
def get_format(self) -> str:
"""Return the file format."""
click.secho(f"File format: {self.format}", fg='cyan')
return self.format
def set_format(self) -> None:
"""Change the file format."""
self.format = click.prompt(
"\nFormat", "fits", type=click.Choice(['ascii', 'csv', 'fits', 'hdf5']))
def get_time(self) -> np.array:
"""Return the time series."""
click.secho(f"{self.time.size} events.", fg='cyan')
return self.time
def set_time(self) -> int:
"""Change the time series."""
flag = 0
self.set_input()
if not file.load_file(self, 0):
click.secho('Event file loaded.', fg='green')
self.set_exposure()
self.set_sampling()
self.set_nyquist()
self.get_time()
self.get_exposure()
self.get_sampling()
self.get_nyquist()
else:
flag = 1
return flag
def get_bins(self) -> np.array:
"""Return the frequency steps."""
click.secho(f"{self.bins.size} steps.", fg='cyan')
return self.bins
def set_bins(self) -> int:
"""Change the frequency steps."""
flag = 1
while flag:
click.secho("The frequency range is needed (Hz).", fg='yellow')
if click.confirm(
"\nNyquist as the minimum frequency", True, prompt_suffix='? '):
self.fmin = self.nyquist
else:
self.set_fmin()
self.set_fmax()
if click.confirm(
"\nUse oversampling factor", True, prompt_suffix='? '):
self.set_oversample()
self.delta = 1 / (self.oversample * self.exposure)
else:
self.set_delta()
self.get_fmin()
self.get_fmax()
self.get_delta()
self.set_harmonics()
block = (self.fmax - self.fmin) / np.array(self.delta)
nbytes = np.array(self.delta).dtype.itemsize * block
click.secho(
f"Computation memory {nbytes* 10e-6:.5f} MB", fg='yellow')
if click.confirm("\nRun with these values", True, prompt_suffix='? '):
if nbytes < psutil.virtual_memory()[1]:
self.bins = np.arange(self.fmin, self.fmax, self.delta)
self.get_bins()
flag = 0
else:
click.secho("Not enough memory available.", fg='red')
return flag
def get_periodogram(self) -> np.array:
"""Return the periodogram."""
click.secho(f"{self.z2n.size} steps.", fg='cyan')
return self.z2n
def set_periodogram(self) -> None:
"""Change the periodogram."""
self.bak = ""
self.time = np.array(self.time)
self.bins = np.array(self.bins)
self.z2n = np.zeros(self.bins.size)
stats.periodogram(self)
click.secho('Periodogram calculated.', fg='green')
self.set_gauss()
def get_nyquist(self) -> float:
"""Return the nyquist frequency."""
click.secho(
f"Nyquist 2*(1/Texp): {self.nyquist:.1e} Hz", fg='cyan')
return self.nyquist
def set_nyquist(self) -> None:
"""Change the nyquist frequency."""
self.nyquist = 2 * self.sampling
def get_fmin(self) -> float:
"""Return the minimum frequency."""
click.secho(f"\nMinimum frequency: {self.fmin:.1e} Hz", fg='cyan')
return self.fmin
def set_fmin(self) -> None:
"""Change the minimum frequency."""
self.fmin = click.prompt(
"\nMinimum frequency (Hz)", self.fmin, type=float)
def get_fmax(self) -> float:
"""Return the maximum frequency."""
click.secho(f"Maximum frequency: {self.fmax:.1e} Hz", fg='cyan')
return self.fmax
def set_fmax(self) -> None:
"""Change the maximum frequency."""
self.fmax = click.prompt(
"\nMaximum frequency (Hz)", self.fmax, type=float)
def get_delta(self) -> float:
"""Return the frequency steps."""
click.secho(f"Frequency steps: {self.delta:.1e} Hz\n", fg='cyan')
return self.delta
def set_delta(self) -> None:
"""Change the frequency steps."""
self.delta = click.prompt(
"\nFrequency steps (Hz)", self.delta, type=float)
def get_oversample(self) -> int:
"""Return the oversample factor."""
click.secho(f"Oversampling factor: {self.oversample}", fg='cyan')
return self.oversample
def set_oversample(self) -> None:
"""Change the oversample factor."""
self.oversample = click.prompt(
"\nOversampling factor", self.oversample, type=int)
def get_harmonics(self) -> int:
"""Return the number of harmonics."""
click.secho(f"Number of harmonics: {self.harmonics}", fg='cyan')
return self.harmonics
def set_harmonics(self) -> None:
"""Change the number of harmonics."""
self.harmonics = click.prompt("\nNumber of harmonics", 1, type=int)
def get_exposure(self) -> float:
"""Return the period of exposure."""
click.secho(f"Exposure time (Texp): {self.exposure:.1f} s", fg='cyan')
return self.exposure
def set_exposure(self) -> None:
"""Change the period of exposure."""
stats.exposure(self)
def get_sampling(self) -> float:
"""Return the sampling rate."""
click.secho(
f"Sampling rate (1/Texp): {self.sampling:.1e} Hz", fg='cyan')
return self.sampling
def set_sampling(self) -> None:
"""Change the sampling rate."""
stats.sampling(self)
def get_power(self) -> float:
"""Return the peak power."""
click.secho(f"Peak power: {self.power}", fg='cyan')
return self.power
def set_power(self) -> None:
"""Change the peak power."""
stats.power(self)
def get_frequency(self) -> float:
"""Return the peak frequency."""
click.secho(f"Peak frequency: {self.frequency} Hz", fg='cyan')
return self.frequency
def set_frequency(self) -> None:
"""Change the peak frequency."""
stats.frequency(self)
def get_period(self) -> float:
"""Return the peak period."""
click.secho(f"Peak period: {self.period} s", fg='cyan')
return self.period
def set_period(self) -> None:
"""Change the peak period."""
stats.period(self)
def get_pfraction(self) -> float:
"""Return the pulsed fraction."""
click.secho(f"Pulsed fraction: {self.pulsed * 100} %", fg='cyan')
return self.pulsed
def set_pfraction(self) -> None:
"""Change the pulsed fraction."""
stats.pfraction(self)
def get_errorf(self) -> float:
"""Return the uncertainty of the frequency."""
click.secho(f"Frequency Uncertainty: +/- {self.errorf} Hz", fg='cyan')
return self.errorf
def set_errorf(self) -> None:
"""Return the uncertainty of the frequency."""
stats.error(self)
def get_errorp(self) -> float:
"""Return the uncertainty of the period."""
click.secho(f"Period Uncertainty: +/- {self.errorp} s", fg='cyan')
return self.errorp
def set_errorp(self) -> None:
"""Return the uncertainty of the period."""
stats.error(self)
def load_file(self) -> int:
"""Load a input file."""
flag = 0
self.set_format()
if self.format == 'ascii':
self.set_input()
file.load_ascii(self)
elif self.format == 'csv':
self.set_input()
file.load_csv(self)
elif self.format == 'fits':
self.set_input()
file.load_fits(self, 0)
elif self.format == 'hdf5':
self.set_input()
file.load_hdf5(self)
else:
click.secho(f"{self.format} format not supported.", fg='red')
flag = 1
return flag
def save_file(self) -> None:
"""Save a output file."""
click.secho("Save the periodogram on a file.", fg='yellow')
self.set_format()
if self.format == 'ascii':
self.set_output()
file.save_ascii(self)
click.secho(f"File saved at {self.output}.txt", fg='green')
elif self.format == 'csv':
self.set_output()
file.save_csv(self)
click.secho(
f"File saved at {self.output}.{self.format}", fg='green')
elif self.format == 'fits':
self.set_output()
file.save_fits(self)
click.secho(
f"File saved at {self.output}.{self.format}", fg='green')
elif self.format == 'hdf5':
self.set_output()
file.save_hdf5(self)
click.secho(
f"File saved at {self.output}.{self.format}", fg='green')
else:
click.secho(f"{self.format} format not supported.", fg='red')
def plot(self) -> None:
"""Plot the series and the parameters."""
flag = 1
while flag:
plt.close()
plt.ion()
plt.plot(self.bins, self.z2n, label='Z2n Power', linewidth=2)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power')
plt.legend(loc='best')
plt.tight_layout()
try:
stats.error(self)
header = ["", "Z2N POWER", "GAUSSIAN FIT"]
data = [
["Power", f"{self.power}",
f"{self.gauss.power}"],
["Frequency", f"{self.frequency} Hz",
f"{self.gauss.frequency} Hz"],
["Frequency error", "_",
f"+/- {self.gauss.errorf} Hz"],
["Period", f"{self.period} s",
f"{self.gauss.period} s"],
["Period error", "_", f"+/- {self.gauss.errorp} s"],
["Pulsed Fraction", f"{self.pulsed* 100} %",
f"{self.gauss.pulsed* 100} %"],
]
termtables.print(data, header)
plt.close()
plt.ion()
plt.plot(self.bins, self.z2n, label='Z2n Power', linewidth=2)
plt.plot(
self.gauss.bins, self.gauss.z2n,
color='tab:red', label='Gaussian Fit', linewidth=1)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Power')
plt.legend(loc='best')
plt.tight_layout()
except IndexError:
click.secho("Error on the selection.", fg='red')
else:
if not click.confirm("Select another region for the fit"):
self.save_file()
flag = 0
click.secho("Save the results on a log file.", fg='yellow')
default = "z2n_" + pathlib.Path(self.input).stem
flag2 = 1
while flag2:
log = click.prompt(
"\nName of the file", default, type=click.Path())
if pathlib.Path(f"{log}.log").is_file():
click.secho("File already exists.", fg='red')
else:
flag2 = 0
with open(f"{log}.log", "w+") as logfile:
sys.stdout = logfile
self.get_input()
self.get_output()
self.get_format()
self.get_time()
self.get_exposure()
self.get_sampling()
self.get_nyquist()
self.get_fmin()
self.get_fmax()
self.get_delta()
self.get_bins()
self.get_harmonics()
click.secho("Periodogram values.", fg='yellow')
self.get_power()
self.get_frequency()
self.get_period()
self.get_pfraction()
click.secho("Gaussian values.", fg='yellow')
self.gauss.get_power()
self.gauss.get_frequency()
self.gauss.get_errorf()
self.gauss.get_period()
self.gauss.get_errorp()
self.gauss.get_pfraction()
sys.stdout = sys.__stdout__
click.secho(
f"Saved the results at {log}.log", fg='green')
| 17,816 | 34.281188 | 84 | py |
bmm | bmm-master/setup.py |
import setuptools
NAME = 'bmm'
DESCRIPTION = 'Bayesian Map-matching'
with open('README.md') as f:
long_description = f.read()
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
METADATA = dict(
name="bmm",
version='1.3',
url='http://github.com/SamDuffield/bmm',
author='Sam Duffield',
install_requires=install_requires,
author_email='[email protected]',
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_namespace_packages(),
include_package_data=True,
platforms='any',
classifiers=[
"Programming Language :: Python :: 3.8",
]
)
setuptools.setup(**METADATA)
| 742 | 22.21875 | 50 | py |
bmm | bmm-master/bmm/__init__.py |
"""bmm: Bayesian Map-matching"""
from bmm.src.inference.smc import initiate_particles
from bmm.src.inference.smc import update_particles
from bmm.src.inference.smc import offline_map_match
from bmm.src.inference.smc import _offline_map_match_fl
from bmm.src.inference.smc import updates
from bmm.src.inference.sample import sample_route
from bmm.src.inference.sample import random_positions
from bmm.src.inference.model import MapMatchingModel
from bmm.src.inference.model import ExponentialMapMatchingModel
from bmm.src.inference.proposal import get_possible_routes
from bmm.src.tools.plot import plot
from bmm.src.tools.edges import cartesianise_path
from bmm.src.tools.edges import get_geometry
from bmm.src.tools.edges import discretise_edge
from bmm.src.tools.edges import observation_time_indices
from bmm.src.tools.edges import observation_time_rows
from bmm.src.tools.edges import long_lat_to_utm
from bmm.src.inference.particles import MMParticles
from bmm.src.inference.parameters import offline_em
try:
del src
except NameError:
pass
| 1,060 | 28.472222 | 63 | py |
bmm | bmm-master/bmm/src/tools/edges.py | ########################################################################################################################
# Module: edges.py
# Description: Some tools including interpolation along a proportion of a given edge, selecting edges within a distance
# of a point and discretisation of an edge for sampling.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from functools import lru_cache
from typing import Union, Tuple
import numpy as np
import osmnx as ox
from shapely.geometry import Point
from shapely.geometry import LineString
from networkx.classes import MultiDiGraph
from geopandas import GeoDataFrame
def edge_interpolate(geometry: LineString,
alpha: float) -> np.ndarray:
"""
Given edge and proportion travelled, return (x,y) coordinate.
:param geometry: edge geometry
:param alpha: in (0,1] proportion of edge travelled
:return: cartesian coordinate
"""
return np.array(geometry.interpolate(alpha, normalized=True).coords)[0]
def get_geometry(graph: MultiDiGraph,
edge: np.ndarray) -> LineString:
"""
Extract geometry of an edge from global graph object. If geometry doesn't exist set to straight line.
:param graph: encodes road network, simplified and projected to UTM
:param edge: length = 3 with elements u, v, k
* u: int, edge start node
* v: int, edge end node
* k: int, edge key
:return: edge geometry
"""
edge_tuple = tuple(int(e) for e in edge)
return get_geometry_cached(graph, edge_tuple)
@lru_cache(maxsize=2 ** 8)
def get_geometry_cached(graph: MultiDiGraph,
edge_tuple: tuple) -> LineString:
"""
Cacheable
Extract geometry of an edge from global graph object. If geometry doesn't exist set to straight line.
:param graph: encodes road network, simplified and projected to UTM
:param edge_tuple: (hashable for lru_cache), length = 3
elements u, v, k
u: int, edge start node
v: int, edge end node
k: int, edge key
:return: edge geometry
"""
# Extract edge data, in particular the geometry
edge_data = graph.get_edge_data(edge_tuple[0], edge_tuple[1], edge_tuple[2])
# If no geometry attribute, manually add straight line
if 'geometry' in edge_data:
edge_geom = edge_data['geometry']
else:
point_u = Point((graph.nodes[edge_tuple[0]]['x'], graph.nodes[edge_tuple[0]]['y']))
point_v = Point((graph.nodes[edge_tuple[1]]['x'], graph.nodes[edge_tuple[1]]['y']))
edge_geom = LineString([point_u, point_v])
return edge_geom
def discretise_geometry(geom: LineString,
d_refine: float,
return_dists: bool = False) -> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Given edge, return series of [edge, alpha] points at determined discretisation increments along edge.
alpha is proportion of edge traversed.
:param geom: edge geometry
:param d_refine: metres, resolution of distance discretisation
:param return_dists: if true return distance along edge as well as alpha (proportion)
:return: list of alphas at each discretisation point
"""
ds = np.arange(geom.length, d_refine / 10, -d_refine)
alphas = ds / geom.length
return (alphas, ds) if return_dists else alphas
def discretise_edge(graph: MultiDiGraph,
edge: np.ndarray,
d_refine: float) -> np.ndarray:
"""
Discretises edge to given edge refinement parameter.
Returns array of proportions along edge, xy cartesian coordinates and distances along edge
:param graph: encodes road network, simplified and projected to UTM
:param edge: list-like, length = 3 with elements u, v, k
* u: int, edge start node
* v: int, edge end node
* k: int, edge key
:param d_refine: metres, resolution of distance discretisation
:return: shape = (_, 4) with columns
* alpha: float in (0,1], position along edge
* x: float, metres, cartesian x coordinate
* y: float, metres, cartesian y coordinate
* distance: float, distance from start of edge
"""
edge_tuple = tuple(int(e) for e in edge)
return discretise_edge_cached(graph, edge_tuple, d_refine).copy()
@lru_cache(maxsize=2 ** 8)
def discretise_edge_cached(graph: MultiDiGraph,
edge_tuple: tuple,
d_refine: float) -> np.ndarray:
"""
Cacheable
Discretises edge to given edge refinement parameter.
Returns array of proportions along edge, xy cartesian coordinates and distances along edge
:param graph: encodes road network, simplified and projected to UTM
:param edge_tuple: tuple (hashable for lru_cache), length = 3
elements u, v, k
u: int, edge start node
v: int, edge end node
k: int, edge key
:param d_refine: metres, resolution of distance discretisation
:return: shape = (_, 4)
columns
alpha: float in (0,1], position along edge
x: float, metres, cartesian x coordinate
y: float, metres, cartesian y coordinate
distance: float, distance from start of edge
"""
edge_geom = get_geometry_cached(graph, edge_tuple)
alphas, distances = discretise_geometry(edge_geom, d_refine, True)
n_distances = len(distances)
out_mat = np.zeros((n_distances, 4))
out_mat[:, 0] = alphas
out_mat[:, 3] = distances
for i in range(n_distances):
out_mat[i, 1:3] = edge_geom.interpolate(distances[i]).coords[0]
return out_mat
def graph_edges_gdf(graph: MultiDiGraph) -> GeoDataFrame:
"""
Converts networkx graph to geopandas data frame. (Fast!)
:param graph: encodes road network, simplified and projected to UTM
:return: gdf of edges with columns [u, v, k, geometry]
"""
gdf = ox.graph_to_gdfs(graph, nodes=False, fill_edge_geometry=True)
gdf_index_gdf = gdf.index.to_frame()
for col in ["u", "v", "key"]:
if col in gdf_index_gdf.columns:
gdf[col] = gdf_index_gdf[col]
edge_gdf = gdf[["u", "v", "key", "geometry"]]
return edge_gdf
def get_edges_within_dist(graph_edges: GeoDataFrame,
coord: np.ndarray,
dist_retain: float) -> GeoDataFrame:
"""
Given a point returns all edges that fall within a radius of dist.
:param graph_edges: gdf of edges with columns [u, v, k, geometry]
:param coord: central point
:param dist_retain: metres, retain radius
:return: gdf of edges with columns [u, v, k, geometry, distance_to_obs]
all with distance_to_obs < dist_retain
"""
graph_edges_dist = graph_edges.copy()
graph_edges_dist['distance_to_obs'] = graph_edges['geometry'].apply(
lambda geom: Point(tuple(coord)).distance(geom))
edges_within_dist = graph_edges_dist[graph_edges_dist['distance_to_obs'] < dist_retain]
return edges_within_dist
def get_truncated_discrete_edges(graph: MultiDiGraph,
coord: np.ndarray,
d_refine: float,
d_truncate: float,
return_dists_to_coord: bool = False) \
-> Union[Tuple[np.ndarray, np.ndarray], np.ndarray]:
"""
Discretises edges within dist_retain of coord
:param graph: encodes road network, simplified and projected to UTM
:param coord: conformal with graph (i.e. UTM)
:param d_refine: metres, resolution of distance discretisation
:param d_truncate: metres, distance within which of coord to retain points
:param return_dists_to_coord: if true additionally return array of distances to coord
:return: numpy.ndarray, shape = (number of points within truncation, 6)
columns: u, v, k, alpha, distance_to_coord
u: int, edge start node
v: int, edge end node
k: int, edge key
alpha: in [0,1], position along edge
x: float, metres, cartesian x coordinate
y: float, metres, cartesian y coordinate
if return_dists_to_coord also return np.ndarray, shape = (number of points within truncation,)
with distance of each point to coord
"""
# Extract geodataframe
graph_edges = graph_edges_gdf(graph)
# Remove edges with closest point outside truncation
close_edges = get_edges_within_dist(graph_edges, coord, d_truncate)
# Discretise edges
close_edges['alpha'] = close_edges['geometry'].apply(discretise_geometry, d_refine=d_refine)
# Remove distance from closest point on edge column
# (as this refers to closest point of edge and now we want specific point on edge)
close_edges = close_edges.drop(columns='distance_to_obs')
# Elongate, remove points outside truncation and store in list of lists
points = []
dists = []
for _, row in close_edges.iterrows():
for a in row['alpha']:
xy = edge_interpolate(row['geometry'], a)
dist = np.sqrt(np.square(coord - xy).sum())
if dist < d_truncate:
add_row = row.copy()
add_row['alpha'] = a
add_row['distance_to_obs'] = dist
points += [[row['u'], row['v'], row['key'], a, *xy]]
dists += [dist]
# Convert to numpy.ndarray
points_arr = np.array(points)
dists_arr = np.array(dists)
return (points_arr, dists_arr) if return_dists_to_coord else points_arr
def observation_time_indices(times: np.ndarray) -> np.ndarray:
"""
Remove zeros (other than the initial zero) from a series
:param times: series of timestamps
:return: bool array of timestamps that are either non-zero or the first timestamp
"""
return np.logical_or(times != 0, np.arange(len(times)) == 0.)
def observation_time_rows(path: np.ndarray) -> np.ndarray:
"""
Returns rows of path only at observation times (not intersections)
:param path: numpy.ndarray, shape=(_, 5+)
columns - t, u, v, k, alpha, ...
:return: trimmed path
numpy.ndarray, shape like path
"""
return path[observation_time_indices(path[:, 0])]
def long_lat_to_utm(points: Union[list, np.ndarray], graph=None) -> np.ndarray:
"""
Converts a collection of long-lat points to UTM
:param points: points to be projected, shape = (N, 2)
:param graph: optional graph containing desired crs in graph.graph['crs']
:return: array of projected points
"""
points = np.atleast_2d(points)
points_gdf = GeoDataFrame({'index': np.arange(len(points)),
'x': points[:, 0],
'y': points[:, 1]})
points_gdf['geometry'] = points_gdf.apply(lambda row: Point(row['x'], row['y']), axis=1)
points_gdf.crs = '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs' # long lat crs
points_gdf_utm = ox.projection.project_gdf(points_gdf, to_crs=str(graph.graph['crs']) if graph is not None else None)
points_gdf_utm['x'] = points_gdf_utm['geometry'].map(lambda point: point.x)
points_gdf_utm['y'] = points_gdf_utm['geometry'].map(lambda point: point.y)
return np.squeeze(np.array(points_gdf_utm[['x', 'y']]))
def interpolate_path(graph: MultiDiGraph,
path: np.ndarray,
d_refine: float = 1,
t_column: bool = False) -> np.ndarray:
"""
Turns path into a discrete collection of positions to be plotted
:param graph: simplified graph
:param path: numpy.ndarray, shape = (_, 4)
:param d_refine: float
metres
resolution of distance discretisation
:param t_column: boolean
boolean describing if input has a first column for the time variable
:return: numpy.ndarray, shape = (_, 6)
elongated array for plotting path
"""
start_col = 1 * t_column
out_arr = path[:1].copy()
prev_point = out_arr[0]
for point in path[1:]:
edge_geom = get_geometry(graph, point[start_col:(start_col + 3)])
edge_length = edge_geom.length
if np.array_equal(point[start_col:(start_col + 3)], prev_point[start_col:(start_col + 3)]):
edge_metres = np.arange(prev_point[start_col + 3] * edge_length
+ d_refine, point[start_col + 3] * edge_length, d_refine)
else:
edge_metres = np.arange(0, point[start_col + 3] * edge_length, d_refine)
edge_alphas = edge_metres / edge_length
append_arr = np.zeros((len(edge_alphas), out_arr.shape[1]))
append_arr[:, start_col:(start_col + 3)] = point[start_col:(start_col + 3)]
append_arr[:, start_col + 3] = edge_alphas
out_arr = np.append(out_arr, append_arr, axis=0)
prev_point = point
return out_arr
def cartesianise_path(graph, path, t_column=True, observation_time_only=False):
"""
Converts particle or array of edges and alphas into cartesian (x,y) points.
:param path: numpy.ndarray, shape=(_, 5+)
columns - (t), u, v, k, alpha, ...
:param t_column: boolean
boolean describing if input has a first column for the time variable
:return: numpy.ndarray, shape = (_, 2)
cartesian points
"""
start_col = 1 * t_column
if observation_time_only:
path = observation_time_rows(path)
cart_points = np.zeros(shape=(path.shape[0], 2))
for i, point in enumerate(path):
edge_geom = get_geometry(graph, point[start_col:(3 + start_col)])
cart_points[i, :] = edge_interpolate(edge_geom, point[3 + start_col])
return np.atleast_2d(cart_points)
| 13,894 | 38.251412 | 121 | py |
bmm | bmm-master/bmm/src/tools/plot.py | ########################################################################################################################
# Module: plot.py
# Description: Plot cam_graph, inferred route and/or polyline.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
import numpy as np
import osmnx as ox
from matplotlib import pyplot as plt
from bmm.src.tools.edges import interpolate_path, cartesianise_path, observation_time_rows
def plot(graph, particles=None, polyline=None, label_start_end=True,
bgcolor='white', node_color='grey', node_size=0, edge_color='lightgrey', edge_linewidth=3,
particles_color='orange', particles_alpha=None,
polyline_color='red', polyline_s=100, polyline_linewidth=3,
**kwargs):
"""
Plots particle approximation of trajectory
:param graph: NetworkX MultiDiGraph
UTM projection
encodes road network
e.g. generated using OSMnx
:param particles: MMParticles object (from inference.particles)
particle approximation
:param polyline: list-like, each element length 2
UTM - metres
series of GPS coordinate observations
:param label_start_end: bool
whether to label the start and end points of the route
:param bgcolor: str
background colour
:param node_color: str
node (intersections) colour
:param node_size: float
size of nodes (intersections)
:param edge_color: str
colour of edges (roads)
:param edge_linewidth: float
width of edges (roads
:param particles_color: str
colour of routes
:param particles_alpha: float in [0, 1]
plotting parameter
opacity of routes
:param polyline_color: str
colour of polyline crosses
:param polyline_s: str
size of polyline crosses
:param polyline_linewidth: str
linewidth of polyline crosses
:param kwargs:
additional parameters to ox.plot_graph
:return: fig, ax
"""
fig, ax = ox.plot_graph(graph, show=False, close=False,
bgcolor=bgcolor, node_color=node_color, node_size=node_size,
edge_color=edge_color, edge_linewidth=edge_linewidth,
**kwargs)
ax.set_aspect("equal")
start_end_points = None
if particles is not None:
if isinstance(particles, np.ndarray):
particles = [particles]
start_end_points = np.zeros((2, 2))
alpha_min = 0.1
if particles_alpha is None:
particles_alpha = 1 / len(particles) * (1 - alpha_min) + alpha_min
xlim = [None, None]
ylim = [None, None]
for i, particle in enumerate(particles):
if particle is None:
continue
if len(particle) > 1:
int_path = interpolate_path(graph, particle, t_column=True)
cart_int_path = cartesianise_path(graph, int_path, t_column=True)
ax.plot(cart_int_path[:, 0], cart_int_path[:, 1], color=particles_color, linewidth=1.5,
alpha=particles_alpha)
cart_path = cartesianise_path(graph, observation_time_rows(particle), t_column=True)
else:
cart_path = cartesianise_path(graph, particle, t_column=True)
ax.scatter(cart_path[:, 0], cart_path[:, 1], color=particles_color, alpha=particles_alpha, zorder=2)
start_end_points[0] += cart_path[0] / len(particles)
start_end_points[1] += cart_path[-1] / len(particles)
xlim[0] = np.min(cart_path[:, 0]) if xlim[0] is None else min(np.min(cart_path[:, 0]), xlim[0])
xlim[1] = np.max(cart_path[:, 0]) if xlim[1] is None else max(np.max(cart_path[:, 0]), xlim[1])
ylim[0] = np.min(cart_path[:, 1]) if ylim[0] is None else min(np.min(cart_path[:, 1]), ylim[0])
ylim[1] = np.max(cart_path[:, 1]) if ylim[1] is None else max(np.max(cart_path[:, 1]), ylim[1])
xlim, ylim = expand_lims(xlim, ylim, 0.1)
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
if polyline is not None:
poly_arr = np.array(polyline)
ax.scatter(poly_arr[:, 0],
poly_arr[:, 1],
marker='x', c=polyline_color, s=polyline_s, linewidth=polyline_linewidth, zorder=10)
if particles is None:
start_end_points = poly_arr[np.array([0, -1])]
xlim = [np.min(poly_arr[:, 0]), np.max(poly_arr[:, 0])]
ylim = [np.min(poly_arr[:, 1]), np.max(poly_arr[:, 1])]
xlim, ylim = expand_lims(xlim, ylim, 0.1)
ax.set_xlim(xlim[0], xlim[1])
ax.set_ylim(ylim[0], ylim[1])
if start_end_points is not None and label_start_end:
plt.annotate('Start', start_end_points[0] + 25, zorder=12)
plt.annotate('End', start_end_points[1] + 25, zorder=12)
plt.tight_layout()
return fig, ax
def expand_lims(xlim, ylim, inflation):
x_range = max(xlim[1] - xlim[0], 200)
xlim[0] -= x_range * inflation
xlim[1] += x_range * inflation
y_range = max(ylim[1] - ylim[0], 200)
ylim[0] -= y_range * inflation
ylim[1] += y_range * inflation
return xlim, ylim
| 5,372 | 35.55102 | 120 | py |
bmm | bmm-master/bmm/src/inference/sample.py | ########################################################################################################################
# Module: inference/sample.py
# Description: Generate route and polyline from map-matching model.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
import warnings
from typing import Union, Tuple
import numpy as np
from networkx.classes import MultiDiGraph
from bmm.src.inference import proposal
from bmm.src.tools import edges
from bmm.src.inference.model import MapMatchingModel, ExponentialMapMatchingModel
from bmm.src.inference.smc import get_time_interval_array
def random_positions(graph: MultiDiGraph,
n: int = 1) -> np.ndarray:
"""
Sample random positions on a graph.
:param graph: encodes road network, simplified and projected to UTM
:param n: int number of positions to sample, default 1
:return: array of positions (u, v, key, alpha) - shape (n, 4)
"""
edges_arr = np.array(graph.edges)
n_edges = len(edges_arr)
edge_selection_indices = np.random.choice(n_edges, n)
edge_selection = edges_arr[edge_selection_indices]
random_alphas = np.random.uniform(size=(n, 1))
positions = np.concatenate((edge_selection, random_alphas), axis=1)
return positions
def sample_route(graph: MultiDiGraph,
timestamps: Union[float, np.ndarray],
num_obs: int = None,
mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
d_refine: float = 1.,
start_position: np.ndarray = None,
num_inter_cut_off: int = None) -> Tuple[np.ndarray, np.ndarray]:
"""
Runs offline map-matching. I.e. receives a full polyline and returns an equal probability collection
of trajectories.
Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.
:param graph: encodes road network, simplified and projected to UTM
:param timestamps: seconds
either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
:param num_obs: int length of observed polyline to generate
:param mm_model: MapMatchingModel
:param d_refine: metres, resolution of distance discretisation
:param start_position: optional start position; array (u, v, k, alpha)
:param num_inter_cut_off: maximum number of intersections to cross in the time interval
:return: tuple with sampled route (array with same shape as a single MMParticles)
and polyline (array with shape (num_obs, 2))
"""
if isinstance(timestamps, np.ndarray):
num_obs = len(timestamps) + 1
time_interval_arr = get_time_interval_array(timestamps, num_obs)
if start_position is None:
start_position = random_positions(graph, 1)[0]
start_geom = edges.get_geometry(graph, start_position)
start_coords = edges.edge_interpolate(start_geom, start_position[-1])
full_sampled_route = np.concatenate([[0.], start_position, start_coords, [0.]])[np.newaxis]
for k in range(num_obs - 1):
time_interval = time_interval_arr[k]
d_max = mm_model.d_max(time_interval)
num_inter_cut_off_i = max(int(time_interval / 1.5), 10) if num_inter_cut_off is None else num_inter_cut_off
prev_pos = full_sampled_route[-1:].copy()
prev_pos[0, 0] = 0.
prev_pos[0, -1] = 0.
possible_routes = proposal.get_all_possible_routes_overshoot(graph, prev_pos, d_max,
num_inter_cut_off=num_inter_cut_off_i)
# Get all possible positions on each route
discretised_routes_indices_list = []
discretised_routes_list = []
for i, route in enumerate(possible_routes):
# All possible end positions of route
discretised_edge_matrix = edges.discretise_edge(graph, route[-1, 1:4], d_refine)
if route.shape[0] == 1:
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0]
>= full_sampled_route[-1, 4]]
discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
else:
discretised_edge_matrix[:, -1] += route[-2, -1]
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]
# Track route index and append to list
if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
discretised_routes_indices_list += [np.ones(discretised_edge_matrix.shape[0], dtype=int) * i]
discretised_routes_list += [discretised_edge_matrix]
# Concatenate into numpy.ndarray
discretised_routes_indices = np.concatenate(discretised_routes_indices_list)
discretised_routes = np.concatenate(discretised_routes_list)
if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and discretised_routes[0][-1] == 0):
warnings.warn('sample_route exited prematurely')
break
# Distance prior evals
distances = discretised_routes[:, -1]
distance_prior_evals = mm_model.distance_prior_evaluate(distances, time_interval)
# Deviation prior evals
deviation_prior_evals = mm_model.deviation_prior_evaluate(full_sampled_route[-1, 5:7],
discretised_routes[:, 1:3],
discretised_routes[:, -1])
# Normalise prior/transition probabilities
prior_probs = distance_prior_evals * deviation_prior_evals
prior_probs_norm_const = prior_probs.sum()
sampled_dis_route_index = np.random.choice(len(prior_probs), 1, p=prior_probs / prior_probs_norm_const)[0]
sampled_dis_route = discretised_routes[sampled_dis_route_index]
# Append sampled route to old particle
sampled_route = possible_routes[discretised_routes_indices[sampled_dis_route_index]]
full_sampled_route = proposal.process_proposal_output(full_sampled_route, sampled_route, sampled_dis_route,
time_interval, True)
obs_indices = edges.observation_time_indices(full_sampled_route[:, 0])
polyline = full_sampled_route[obs_indices, 5:7] \
+ mm_model.gps_sd * np.random.normal(size=(obs_indices.sum(), 2))
return full_sampled_route, polyline
| 6,711 | 45.937063 | 120 | py |
bmm | bmm-master/bmm/src/inference/model.py | ########################################################################################################################
# Module: inference/model.py
# Description: Objects and functions relating to the map-matching state-space model.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from typing import Union
from collections import OrderedDict
import numpy as np
from numba import njit
@njit
def _likelihood_evaluate(route_cart_coords: np.ndarray,
observation: np.ndarray,
gps_sd: float,
likelihood_d_truncate: float) -> Union[float, np.ndarray]:
"""
Evaluate probability of generating observation from cartesian coords - njitted
Vectorised to evaluate over many cart_coords for a single observation
Isotropic Gaussian with standard dev self.gps_sd
:param route_cart_coords: shape = (_, 2), cartesian coordinates - positions along road network
:param observation: shape = (2,) observed GPS cartesian coordinate
:return: shape = (_,) likelihood evaluations
"""
squared_deviations = np.sum((observation - route_cart_coords) ** 2, axis=1)
evals = np.exp(-0.5 / gps_sd ** 2 * squared_deviations)
if likelihood_d_truncate < np.inf:
evals *= squared_deviations < likelihood_d_truncate ** 2
return evals
class MapMatchingModel:
r"""
Class defining the state-space model used for map-matching.
**Transition density** (assuming constant time interval)
.. math:: p(x_t, e_t | x_{t-1}) \propto \gamma(d_t) \exp(-\beta|d^\text{gc}_t - d_t|)\mathbb{1}[d_t < d_\text{max}],
where :math:`d_t` is the distance between positions :math:`x_{t-1}` and :math:`x_{t}` along the series of edges
:math:`e_{t-1}`, restricted to the graph/road network.
:math:`d^\text{gc}_t` is the *great circle distance* between :math:`x_{t-1}` and :math:`x_{t}`,
not restricted to the graph/road network.
The :math:`\exp(-\beta|d^\text{gc}_t - d_t|)` term penalises non-direct or windy routes where :math:`\beta` is a
parameter stored in ``self.deviation_beta``, yet to be defined.
:math:`d_\text{max}` is defined by ``self.d_max`` function (metres)
and ``self.max_speed`` parameter (metres per second), defaults to 35.
The :math:`\gamma(d_t)` term penalises overly lengthy routes and is yet to be defined.
**Observation density**
.. math:: p(y_t| x_{t}) = \mathcal{N}(y_t \mid x_t, \sigma_\text{GPS}^2 \mathbb{I}_2),
where :math:`\sigma_\text{GPS}` is the standard deviation (metres) of the GPS noise stored in ``self.gps_sd``,
yet to be defined. Additional optional ``self.likelihood_d_truncate`` for truncated Gaussian noise, defaults to inf.
The parameters ``self.deviation_beta``, ``self.gps_sd`` and the distance prior parameters defined in
``self.distance_params`` and ``self.distance_params_bounds`` can be tuned using expectation-maximisation with
``bmm.offline_em``.
For more details see https://arxiv.org/abs/2012.04602.
"""
__module__ = 'bmm'
def __init__(self):
self.gps_sd = None
self.gps_sd_bounds = (0, np.inf)
self.likelihood_d_truncate = np.inf
self.deviation_beta = None
self.deviation_beta_bounds = (0, np.inf)
self.max_speed = 35
self.distance_params = OrderedDict()
self.distance_params_bounds = OrderedDict()
def distance_prior_evaluate(self,
distance: Union[float, np.ndarray],
time_interval: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Evaluate distance prior/transition density
Vectorised to handle multiple evaluations at once
:param distance: metres
array if multiple evaluations at once
:param time_interval: seconds, time between observations
:return: distance prior density evaluation(s)
"""
raise NotImplementedError
def distance_prior_gradient(self,
distance: Union[float, np.ndarray],
time_interval: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Evaluate gradient of distance prior/transition density in distance_params
Vectorised to handle multiple evaluations at once
:param distance: metres
array if multiple evaluations at once
:param time_interval: seconds, time between observations
:return: distance prior density evaluation(s)
"""
raise AttributeError("Distance prior gradient not implemented")
def pos_distance_prior_bound(self, time_interval: float) -> float:
"""
Extracts bound on the distance component of the prior/transition density given the distance is > 0
:param time_interval: seconds, time between observations
:return: bound on distance prior density
"""
raise AttributeError("Prior bound not implemented")
def distance_prior_bound(self, time_interval: float) -> float:
"""
Extracts bound on the distance component of the prior/transition density
:param time_interval: seconds, time between observations
:return: bound on distance prior density
"""
raise AttributeError("Prior bound not implemented")
def d_max(self, time_interval: float) -> float:
"""
Initiates default value of the maximum distance possibly travelled in the time interval.
Assumes a maximum possible speed.
:param time_interval: float
seconds
time between observations
:return: float
defaulted d_max
"""
return self.max_speed * time_interval
def deviation_prior_evaluate(self,
previous_cart_coord: np.ndarray,
route_cart_coords: np.ndarray,
distances: np.ndarray) -> np.ndarray:
"""
Evaluate deviation prior/transition density
Vectorised to handle multiple evaluations at once
:param previous_cart_coord: shape = (2,) or (_, 2) cartesian coordinate(s) at previous observation time
:param route_cart_coords: shape = (_, 2), cartesian coordinates - positions along road network
:param distances: shape = (_,) route distances between previous_cart_coord(s) and route_cart_coords
:return: deviation prior density evaluation(s)
"""
if self.deviation_beta == 0:
return np.ones(len(route_cart_coords))
deviations = np.sqrt(np.sum((previous_cart_coord - route_cart_coords) ** 2, axis=1))
diffs = np.abs(deviations - distances)
return np.exp(-diffs * self.deviation_beta)
def likelihood_evaluate(self,
route_cart_coords: np.ndarray,
observation: np.ndarray) -> Union[float, np.ndarray]:
"""
Evaluate probability of generating observation from cartesian coords
Vectorised to evaluate over many cart_coords for a single observation
Isotropic Gaussian with standard dev self.gps_sd
:param route_cart_coords: shape = (_, 2), cartesian coordinates - positions along road network
:param observation: shape = (2,) observed GPS cartesian coordinate
:return: shape = (_,) likelihood evaluations
"""
return _likelihood_evaluate(route_cart_coords, observation, self.gps_sd, self.likelihood_d_truncate)
class ExponentialMapMatchingModel(MapMatchingModel):
r"""
Class defining the state-space model used for map-matching with exponential prior on distance travelled.
**Transition density** (assuming constant time interval)
.. math:: p(x_t, e_t | x_{t-1}) \propto \gamma(d_t) \exp(-\beta|d^\text{gc}_t - d_t|)\mathbb{1}[d_t < d_\text{max}],
where :math:`d_t` is the distance between positions :math:`x_{t-1}` and :math:`x_{t}` along the series of edges
:math:`e_{t-1}`, restricted to the graph/road network.
:math:`d^\text{gc}_t` is the *great circle distance* between :math:`x_{t-1}` and :math:`x_{t}`,
not restricted to the graph/road network.
The :math:`\exp(-\beta|d^\text{gc}_t - d_t|)` term penalises non-direct or windy routes where :math:`\beta` is a
parameter stored in ``self.deviation_beta`` defaults to 0.052.
:math:`d_\text{max}` is defined by ``self.d_max`` function (metres) and ``self.max_speed`` parameter
(metres per second), defaults to 35.
The :math:`\gamma(d_t)` term
.. math:: \gamma(d_t) = p^0\mathbb{1}[d_t = 0] + (1-p^0) \mathbb{1}[d_t > 0] \lambda \exp(-\lambda d_t/\Delta t),
penalises overly lengthy routes, defined as an exponential distribution with
probability mass at :math:`d_t=0` to account for traffic, junctions etc.
where :math:`p^0 = \exp(-r^0 \Delta t)` with :math:`\Delta t` being the time interval between observations.
The :math:`r^0` parameter is stored in ``self.zero_dist_prob_neg_exponent`` and defaults to 0.133.
Exponential distribution parameter :math:`\lambda` is stored in ``self.lambda_speed`` and defaults to 0.068.
**Observation density**
.. math:: p(y_t| x_{t}) = \mathcal{N}(y_t \mid x_t, \sigma_\text{GPS}^2 \mathbb{I}_2),
where :math:`\sigma_\text{GPS}` is the standard deviation (metres) of the GPS noise stored in ``self.gps_sd``,
defaults to 5.23. Additional optional ``self.likelihood_d_truncate`` for truncated Gaussian noise, defaults to inf.
The parameters ``self.deviation_beta``, ``self.gps_sd`` as well as the distance prior parameters
``self.zero_dist_prob_neg_exponent`` and ``self.lambda_speed`` can be tuned using expectation-maximisation
with ``bmm.offline_em``.
For more details see https://arxiv.org/abs/2012.04602.
:param zero_dist_prob_neg_exponent: Positive parameter such that stationary probability
is :math:`p^0 = \exp(-r^0 \Delta t)`, defaults to 0.133.
:param lambda_speed: Positive parameter of exponential distribution over average speed between observations.
:param deviation_beta: Positive parameter of exponential distribution over route deviation.
:param gps_sd: Positive parameter defining standard deviation of GPS noise in metres.
"""
__module__ = 'bmm'
def __init__(self,
zero_dist_prob_neg_exponent: float = 0.133,
lambda_speed: float = 0.068,
deviation_beta: float = 0.052,
gps_sd: float = 5.23):
"""
Initiate parameters of map-matching model with exponential prior on distance travelled between observations.
:param zero_dist_prob_neg_exponent: Positive parameter such that stationary probability is
:math:`p^0 = \exp(-r^0 \Delta t)`, defaults to 0.133.
:param lambda_speed: Positive parameter of exponential distribution over average speed between observations.
:param deviation_beta: Positive parameter of exponential distribution over route deviation.
:param gps_sd: Positive parameter defining standard deviation of GPS noise in metres.
"""
super().__init__()
self.min_zero_dist_prob = 0.01
self.max_zero_dist_prob = 0.5
self.distance_params = OrderedDict({'zero_dist_prob_neg_exponent': zero_dist_prob_neg_exponent,
'lambda_speed': lambda_speed})
self.distance_params_bounds = OrderedDict(
{'zero_dist_prob_neg_exponent': (-np.log(self.max_zero_dist_prob) / 15,
-np.log(self.min_zero_dist_prob) / 15),
'lambda_speed': (1e-20, np.inf)})
self.deviation_beta = deviation_beta
self.gps_sd = gps_sd
def zero_dist_prob(self,
time_interval: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Probability of travelling a distance of exactly zero
:param time_interval: time between last observation and newly received observation
:return: probability of travelling zero metres in time_interval
"""
prob = np.exp(- self.distance_params['zero_dist_prob_neg_exponent'] * time_interval)
prob = np.where(prob < self.min_zero_dist_prob, self.min_zero_dist_prob, prob)
prob = np.where(prob > self.max_zero_dist_prob, self.max_zero_dist_prob, prob)
return prob
def distance_prior_evaluate(self,
distance: Union[float, np.ndarray],
time_interval: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Evaluate distance prior/transition density
Vectorised to handle multiple evaluations at once
:param distance: metres
array if multiple evaluations at once
:param time_interval: seconds, time between observations
:return: distance prior density evaluation(s)
"""
zero_dist_prob = self.zero_dist_prob(time_interval)
distance = np.atleast_1d(distance)
out_arr = np.ones_like(distance) * zero_dist_prob
non_zero_inds = distance > 1e-5
if np.sum(non_zero_inds) > 0:
if np.any(np.atleast_1d(distance[non_zero_inds]) < 0):
raise ValueError("Exponential pdf takes only positive values")
time_int_check = time_interval[non_zero_inds] if isinstance(time_interval, np.ndarray) else time_interval
zero_dist_prob_check = zero_dist_prob[non_zero_inds] if isinstance(time_interval, np.ndarray) \
else zero_dist_prob
speeds = distance[non_zero_inds] / time_int_check
out_arr[non_zero_inds] = self.distance_params['lambda_speed'] \
* np.exp(-self.distance_params['lambda_speed'] * speeds) \
* (1 - zero_dist_prob_check) / time_int_check
return np.squeeze(out_arr)
def distance_prior_gradient(self,
distance: Union[float, np.ndarray],
time_interval: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Evaluate gradient of distance prior/transition density in distance_params
Vectorised to handle multiple evaluations at once
:param distance: metres
array if multiple evaluations at once
:param time_interval: seconds, time between observations
:return: distance prior gradient evaluation(s)
"""
distance = np.atleast_1d(distance)
speeds = distance / time_interval
out_arr = np.zeros((2, len(distance)))
non_zero_inds = distance > 1e-5
if np.any(np.atleast_1d(distance[non_zero_inds]) < 0):
raise ValueError("Exponential pdf takes only positive values")
time_int_check = time_interval[non_zero_inds] if isinstance(time_interval, np.ndarray) else time_interval
out_arr[0] = (- time_interval * ~non_zero_inds
+ non_zero_inds
* self.distance_params['lambda_speed'] * np.exp(-self.distance_params['lambda_speed'] * speeds)) \
* self.zero_dist_prob(time_interval)
out_arr[1, non_zero_inds] = (1 - self.zero_dist_prob(time_int_check)) \
* np.exp(
-self.distance_params['lambda_speed'] * speeds[non_zero_inds]) / time_int_check \
* (1 - self.distance_params['lambda_speed'] * speeds[non_zero_inds])
return np.squeeze(out_arr)
def distance_prior_bound(self,
time_interval: float) -> float:
"""
Extracts bound on the prior/transition density
:param time_interval: seconds, time between observations
:return: bound on distance prior density
"""
zero_dist_prob = self.zero_dist_prob(time_interval)
distance_bound = max(zero_dist_prob,
(1 - zero_dist_prob) * self.distance_params['lambda_speed'] / time_interval)
return distance_bound
def pos_distance_prior_bound(self, time_interval: float) -> float:
"""
Extracts bound on the distance component of the prior/transition density given the distance is > 0
:param time_interval: seconds, time between observations
:return: bound on distance prior density
"""
zero_dist_prob = self.zero_dist_prob(time_interval)
return (1 - zero_dist_prob) * self.distance_params['lambda_speed'] / time_interval
| 16,786 | 46.420904 | 124 | py |
bmm | bmm-master/bmm/src/inference/resampling.py | ########################################################################################################################
# Module: inference/resampling.py
# Description: Resampling schemes for converting weighted particles (series of positions/edges/distances) to
# unweighted. Notably multinomial resampling and fixed-lag resampling (with stitching).
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from typing import Union, Tuple
import numpy as np
from networkx.classes import MultiDiGraph
from bmm.src.inference.particles import MMParticles
from bmm.src.inference.model import MapMatchingModel
from bmm.src.tools.edges import get_geometry
def multinomial(particles: Union[list, np.ndarray, MMParticles],
weights: np.ndarray) -> Union[list, np.ndarray, MMParticles]:
"""
Full multinomial resampling scheme. Lengths of particles and weights must conform.
:param particles: list-like or MMParticles object to be resampled
:param weights: resampling probabilities
:return: unweighted collection of objects in the same form as input particles
"""
# Number of samples
n = len(weights)
# Check weights are normalised
weights_sum = np.sum(weights)
if weights_sum != 1:
weights /= weights_sum
# Sample indices according to weights (with replacement)
sampled_indices = np.random.choice(n, n, replace=True, p=weights)
# Update and output particles
if isinstance(particles, MMParticles):
if particles.n != n:
raise ValueError("Length of MMParticles to be resampled and weights do not conform")
out_particles = particles.copy()
out_particles.particles = [out_particles.particles[i] for i in sampled_indices]
if hasattr(out_particles, 'prior_norm'):
if out_particles.prior_norm.shape[1] == n:
out_particles.prior_norm = out_particles.prior_norm[:, sampled_indices]
else:
out_particles.prior_norm = out_particles.prior_norm[sampled_indices]
elif isinstance(particles, np.ndarray):
if len(particles) != n:
raise ValueError("Length of particles (numpy.ndarray) to be resampled and weights do not conform")
out_particles = particles[sampled_indices]
else:
if len(particles) != n:
raise ValueError("Length of particles to be resampled and weights do not conform")
out_particles = [particles[i] for i in sampled_indices]
return out_particles
def full_fixed_lag_stitch(fixed_particle: np.ndarray,
last_edge_fixed: np.ndarray,
last_edge_fixed_length: float,
new_particles: MMParticles,
adjusted_weights: np.ndarray,
stitch_time_interval: float,
min_resample_time_indices: Union[list, np.ndarray],
mm_model: MapMatchingModel,
return_ess_stitch: bool = False) -> Union[np.ndarray, Tuple[np.ndarray, float]]:
"""
Evaluate full interacting weights, normalise and sample (stitch) for a single fixed particle
:param fixed_particle: trajectory prior to stitching time
:param last_edge_fixed: row of last fixed particle
:param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
:param new_particles: particles proposed to stitching
:param adjusted_weights: non-interacting weights for new_particles
:param stitch_time_interval: time between stitching observations
:param min_resample_time_indices: indices for row of min_resample_time in new_particles
:param mm_model: MapMatchingModel
:param return_ess_stitch: whether to calculate and return the ESS of the full stitching weights
:return: stitched particle (and ess_stitch if return_ess_stitch)
"""
n = len(new_particles)
# Possible particles to be resampled placeholder
newer_particles_adjusted = [None] * n
# Stitching distances
new_stitching_distances = np.empty(n)
new_stitching_distances[:] = np.nan
new_cart_coords = np.empty((n, 2))
for k in range(n):
# if adjusted_weights[k] == 0:
# continue
if new_particles[k] is None:
continue
new_particle = new_particles[k].copy()
# Check both particles start from same edge
if np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
# Check that new edge overtakes fixed edge. i.e. distance isn't negative
if np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
new_particle[1, 4] < (last_edge_fixed[4] - 1e-6):
continue
new_cart_coords[k] = new_particle[min_resample_time_indices[k], 5:7]
# Calculate distance modification
first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length
first_distance_k = new_particle[1, -1]
change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)
new_particle[1:(min_resample_time_indices[k] + 1), -1] += change_dist
new_stitching_distances[k] = new_particle[min_resample_time_indices[k], -1]
# Store adjusted particle
newer_particles_adjusted[k] = new_particle[1:]
# Calculate adjusted weight
res_weights = np.zeros(n)
possible_inds = ~np.isnan(new_stitching_distances)
new_stitching_distances_trimmed = new_stitching_distances[possible_inds]
new_cart_coords_trimmed = new_cart_coords[possible_inds]
adjusted_weights_trimmed = adjusted_weights[possible_inds]
if adjusted_weights_trimmed.sum() == 0:
adjusted_weights_trimmed[:] = 1
stitched_distance_prior_evals_trimmed = mm_model.distance_prior_evaluate(new_stitching_distances_trimmed,
stitch_time_interval)
stitched_deviation_prior_trimmed = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7],
new_cart_coords_trimmed,
new_stitching_distances_trimmed)
res_weights[possible_inds] = adjusted_weights_trimmed \
* stitched_distance_prior_evals_trimmed \
* stitched_deviation_prior_trimmed
# Normalise adjusted resample weights
with np.errstate(invalid='ignore'):
res_weights /= res_weights.sum()
# If only particle on fixed edge resample full trajectory
if max(res_weights) == 0 or np.all(np.isnan(res_weights)):
out_particle = None
ess_stitch = 1 / np.sum(adjusted_weights ** 2)
# Otherwise fixed-lag resample and stitch
else:
# Resample index
res_index = np.random.choice(n, 1, p=res_weights)[0]
# Update output
out_particle = np.append(fixed_particle, newer_particles_adjusted[res_index], axis=0)
# Track ESS
ess_stitch = 1 / np.sum(res_weights ** 2)
if return_ess_stitch:
return out_particle, ess_stitch
else:
return out_particle
def rejection_fixed_lag_stitch(fixed_particle: np.ndarray,
last_edge_fixed: np.ndarray,
last_edge_fixed_length: float,
new_particles: MMParticles,
adjusted_weights: np.ndarray,
stitch_time_interval: float,
min_resample_time_indices: Union[list, np.ndarray],
dist_prior_bound: float,
mm_model: MapMatchingModel,
max_rejections: int,
break_on_zero: bool = False) -> Union[np.ndarray, None, int]:
"""
Attempt up to max_rejections of rejection sampling to stitch a single fixed particle
:param fixed_particle: trajectory prior to stitching time
:param last_edge_fixed: row of last fixed particle
:param last_edge_fixed_length: length of last fixed edge (so don't have to call get_geometry)
:param new_particles: particles proposed to stitching
:param adjusted_weights: non-interacting stitching weights
:param stitch_time_interval: time between stitching observations
:param min_resample_time_indices: indices for row of min_resample_time in new_particles
:param dist_prior_bound: bound on distance transition density (given positive if break_on_zero)
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt, if none succeed return None
:param break_on_zero: whether to return 0 if new_stitching_distance=0
:return: stitched particle
"""
n = len(new_particles)
for reject_ind in range(max_rejections):
new_index = np.random.choice(n, 1, p=adjusted_weights)[0]
new_particle = new_particles[new_index].copy()
# Reject if new_particle starts from different edge
if not np.array_equal(last_edge_fixed[1:4], new_particle[0, 1:4]):
continue
# Reject if new_particle doesn't overtake fixed_particles
elif np.array_equal(last_edge_fixed[1:4], new_particle[1, 1:4]) and \
new_particle[1, 4] < last_edge_fixed[4]:
continue
# Calculate stitching distance
first_distance_j_to_k = (new_particle[1, 4] - last_edge_fixed[4]) * last_edge_fixed_length
first_distance_k = new_particle[1, -1]
change_dist = np.round(first_distance_j_to_k - first_distance_k, 5)
new_particle[1:(min_resample_time_indices[new_index] + 1), -1] += change_dist
new_stitching_distance = new_particle[min_resample_time_indices[new_index], -1]
if break_on_zero and new_stitching_distance < 1e-5:
return 0
# Evaluate distance prior
new_stitching_distance_prior = mm_model.distance_prior_evaluate(new_stitching_distance, stitch_time_interval)
new_stitching_deviation_prior = mm_model.deviation_prior_evaluate(fixed_particle[-1, 5:7],
new_particle[None,
min_resample_time_indices[new_index], 5:7],
new_stitching_distance)
accept_prob = new_stitching_distance_prior * new_stitching_deviation_prior / dist_prior_bound
if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
out_particle = np.append(fixed_particle, new_particle[1:], axis=0)
return out_particle
return None
def fixed_lag_stitch_post_split(graph: MultiDiGraph,
fixed_particles: MMParticles,
new_particles: MMParticles,
new_weights: np.ndarray,
mm_model: MapMatchingModel,
max_rejections: int) -> MMParticles:
"""
Stitch together fixed_particles with samples from new_particles according to joint fixed-lag posterior
:param graph: encodes road network, simplified and projected to UTM
:param fixed_particles: trajectories before stitching time (won't be changed)
:param new_particles: trajectories after stitching time (to be resampled)
one observation time overlap with fixed_particles
:param new_weights: weights applied to new_particles
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:return: MMParticles object
"""
n = len(fixed_particles)
full_fixed_lag_resample = max_rejections == 0
min_resample_time = new_particles.observation_times[1]
min_resample_time_indices = [np.where(particle[:, 0] == min_resample_time)[0][0] if particle is not None else 0
for particle in new_particles]
originial_stitching_distances = np.array([new_particles[j][min_resample_time_indices[j], -1]
if new_particles[j] is not None else 0 for j in range(n)])
max_fixed_time = fixed_particles._first_non_none_particle[-1, 0]
stitch_time_interval = min_resample_time - max_fixed_time
distance_prior_evals = mm_model.distance_prior_evaluate(originial_stitching_distances, stitch_time_interval)
fixed_last_coords = np.array([part[0, 5:7] if part is not None else [0, 0] for part in new_particles])
new_coords = np.array([new_particles[j][min_resample_time_indices[j], 5:7]
if new_particles[j] is not None else [0, 0] for j in range(n)])
deviation_prior_evals = mm_model.deviation_prior_evaluate(fixed_last_coords,
new_coords,
originial_stitching_distances)
original_prior_evals = np.zeros(n)
pos_inds = new_particles.prior_norm > 1e-5
original_prior_evals[pos_inds] = distance_prior_evals[pos_inds] \
* deviation_prior_evals[pos_inds] \
* new_particles.prior_norm[pos_inds]
out_particles = fixed_particles
# Initiate some required quantities depending on whether to do rejection sampling or not
if full_fixed_lag_resample:
ess_stitch_track = np.zeros(n)
# distance_prior_bound = None
# adjusted_weights = None
else:
ess_stitch_track = None
pos_prior_bound = mm_model.pos_distance_prior_bound(stitch_time_interval)
prior_bound = mm_model.distance_prior_bound(stitch_time_interval)
store_out_parts = fixed_particles.copy()
adjusted_weights = new_weights.copy()
adjusted_weights[original_prior_evals > 1e-5] /= original_prior_evals[original_prior_evals > 1e-5]
adjusted_weights[original_prior_evals < 1e-5] = 0
adjusted_weights /= np.sum(adjusted_weights)
resort_to_full = False
# Iterate through particles
for j in range(n):
fixed_particle = fixed_particles[j]
# Check if particle is None
# i.e. fixed lag approx has failed
if fixed_particle is None:
out_particles[j] = None
if full_fixed_lag_resample:
ess_stitch_track[j] = 0
continue
last_edge_fixed = fixed_particle[-1]
last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
last_edge_fixed_length = last_edge_fixed_geom.length
if full_fixed_lag_resample:
# Full resampling
out_particles[j], ess_stitch_track[j] = full_fixed_lag_stitch(fixed_particle,
last_edge_fixed, last_edge_fixed_length,
new_particles,
adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
mm_model,
True)
else:
# Rejection sampling
out_particles[j] = rejection_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles, adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
pos_prior_bound,
mm_model,
max_rejections,
break_on_zero=True)
if out_particles[j] is None:
# Rejection sampling reached max_rejections -> try full resampling
out_particles[j] = full_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles,
adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
mm_model,
False)
if isinstance(out_particles[j], int) and out_particles[j] == 0:
resort_to_full = True
break
if resort_to_full:
for j in range(n):
fixed_particle = store_out_parts[j]
# Check if particle is None
# i.e. fixed lag approx has failed
if fixed_particle is None:
out_particles[j] = None
if full_fixed_lag_resample:
ess_stitch_track[j] = 0
continue
last_edge_fixed = fixed_particle[-1]
last_edge_fixed_geom = get_geometry(graph, last_edge_fixed[1:4])
last_edge_fixed_length = last_edge_fixed_geom.length
# Rejection sampling with full bound
out_particles[j] = rejection_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles, adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
prior_bound,
mm_model,
max_rejections)
if out_particles[j] is None:
# Rejection sampling reached max_rejections -> try full resampling
out_particles[j] = full_fixed_lag_stitch(fixed_particle, last_edge_fixed, last_edge_fixed_length,
new_particles,
adjusted_weights,
stitch_time_interval,
min_resample_time_indices,
mm_model,
False)
if full_fixed_lag_resample:
out_particles.ess_stitch = np.append(out_particles.ess_stitch, np.atleast_2d(ess_stitch_track), axis=0)
# Do full resampling where fixed lag approx broke
none_inds = np.array([p is None for p in out_particles])
good_inds = ~none_inds
n_good = good_inds.sum()
if n_good == 0:
raise ValueError("Map-matching failed: all stitching probabilities zero,"
"try increasing the lag or number of particles")
if n_good < n:
none_inds_res_indices = np.random.choice(n, n - n_good, p=good_inds / n_good)
for i, j in enumerate(np.where(none_inds)[0]):
out_particles[j] = out_particles[none_inds_res_indices[i]]
if full_fixed_lag_resample:
out_particles.ess_stitch[-1, none_inds] = 1 / (new_weights ** 2).sum()
return out_particles
def fixed_lag_stitching(graph: MultiDiGraph,
mm_model: MapMatchingModel,
particles: MMParticles,
weights: np.ndarray,
lag: int,
max_rejections: int) -> MMParticles:
"""
Split particles and resample (with stitching) coordinates after a certain time - defined by the lag parameter.
:param graph: encodes road network, simplified and projected to UTM
:param mm_model: MapMatchingModel
:param particles: MMParticles object
:param weights: shape = (n,) weights at latest observation time
:param lag: fixed lag for resampling/stitching
None indicates full multinomial resampling
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:return: MMParticles object
"""
# Bool whether to store ESS stitch quantities
full_fixed_lag_resample = max_rejections == 0
# Check weights are normalised
weights_sum = np.sum(weights)
if weights_sum != 1:
weights /= weights_sum
# Extract basic quantities
observation_times = particles.observation_times
m = len(observation_times) - 1
n = particles.n
ess_pf = 1 / np.sum(weights ** 2)
# Initiate output
out_particles = particles.copy()
# If not reached lag yet do standard resampling
if lag is None or m <= lag:
if full_fixed_lag_resample:
out_particles.ess_stitch = np.append(particles.ess_stitch, np.ones((1, n)) * ess_pf,
axis=0)
return multinomial(out_particles, weights)
# Largest time not to be resampled
max_fixed_time = observation_times[m - lag - 1]
# Pre-process a bit
fixed_particles = out_particles.copy()
new_particles = out_particles.copy()
max_fixed_time_indices = [0] * n
for j in range(n):
if out_particles[j] is None:
continue
max_fixed_time_indices[j] = np.where(out_particles[j][:, 0] == max_fixed_time)[0][0]
fixed_particles[j] = out_particles[j][:(max_fixed_time_indices[j] + 1)]
new_particles[j] = out_particles[j][max_fixed_time_indices[j]:]
new_particles.prior_norm = out_particles.prior_norm[m - lag - 1]
# Stitch
out_particles = fixed_lag_stitch_post_split(graph,
fixed_particles,
new_particles,
weights,
mm_model,
max_rejections)
return out_particles
| 22,840 | 46.192149 | 120 | py |
bmm | bmm-master/bmm/src/inference/backward.py | ########################################################################################################################
# Module: inference/backward.py
# Description: Implementation of backward simulation for particle smoothing.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from typing import Union, Optional
import numpy as np
from networkx.classes import MultiDiGraph
from bmm.src.inference.resampling import multinomial
from bmm.src.tools.edges import get_geometry
from bmm.src.inference.particles import MMParticles
from bmm.src.inference.model import MapMatchingModel
def full_backward_sample(fixed_particle: np.ndarray,
first_edge_fixed: np.ndarray,
first_edge_fixed_length: float,
filter_particles: MMParticles,
adjusted_weights: Union[list, np.ndarray],
time_interval: float,
next_time_index: int,
mm_model: MapMatchingModel,
return_ess_back: bool = False,
return_sampled_index: bool = False) \
-> Union[Optional[np.ndarray], tuple]:
"""
Evaluate full interacting weights, normalise and backwards sample a past coordinate
for a single fixed particle of future coordinates
:param fixed_particle: trajectory post backwards sampling time
:param first_edge_fixed: first row of fixed particle
:param first_edge_fixed_length: metres
:param filter_particles: proposal particles to be sampled
:param adjusted_weights: non-interacting weights for filter_particles
:param time_interval: time between observations at backwards sampling time
:param next_time_index: index of second observation time in fixed_particle
:param mm_model: MapMatchingModel
:param return_ess_back: whether to calculate and return the ESS of the full interacting weights
:param return_sampled_index: whether to return index of selected back sample
:return: appended particle (and ess_back if return_ess_back)
"""
n = filter_particles.n
smoothing_distances = np.empty(n)
smoothing_distances[:] = np.nan
distances_j_to_k = np.empty(n)
new_prev_cart_coords = np.empty((n, 2))
for k in range(n):
if adjusted_weights[k] == 0:
continue
filter_particle = filter_particles[k]
# Check first fixed edge and last filter edge coincide
if np.array_equal(first_edge_fixed[1:4], filter_particle[-1, 1:4]):
# Check that fixed edge overtakes filter edge. i.e. distance isn't negative
if np.array_equal(filter_particle[-1, 1:4], fixed_particle[next_time_index, 1:4]) and \
filter_particle[-1, 4] > fixed_particle[next_time_index, 4]:
continue
distances_j_to_k[k] = np.round((first_edge_fixed[4] - filter_particle[-1, 4]) * first_edge_fixed_length, 5)
smoothing_distances[k] = fixed_particle[next_time_index, -1] + distances_j_to_k[k]
if smoothing_distances[k] < 0:
raise ValueError('Negative smoothing distance')
new_prev_cart_coords[k] = filter_particle[-1, 5:7]
possible_inds = ~np.isnan(smoothing_distances)
if not np.any(possible_inds):
if return_ess_back:
if return_sampled_index:
return None, 0, 0
else:
return None, 0
else:
if return_sampled_index:
return None, 0
else:
return None
smoothing_weights = adjusted_weights[possible_inds] \
* mm_model.distance_prior_evaluate(smoothing_distances[possible_inds],
time_interval) \
* mm_model.deviation_prior_evaluate(new_prev_cart_coords[possible_inds],
fixed_particle[None, next_time_index, 5:7],
smoothing_distances[possible_inds])
smoothing_weights /= smoothing_weights.sum()
sampled_index = np.where(possible_inds)[0][np.random.choice(len(smoothing_weights), 1, p=smoothing_weights)[0]]
fixed_particle[1:(next_time_index + 1), -1] += distances_j_to_k[sampled_index]
out_particle = np.append(filter_particles[sampled_index], fixed_particle[1:], axis=0)
ess_back = 1 / (smoothing_weights ** 2).sum()
if return_ess_back:
if return_sampled_index:
return out_particle, ess_back, sampled_index
else:
return out_particle, ess_back
else:
if return_sampled_index:
return out_particle, sampled_index
else:
return out_particle
def rejection_backward_sample(fixed_particle: np.ndarray,
first_edge_fixed: np.ndarray,
first_edge_fixed_length: float,
filter_particles: MMParticles,
filter_weights: np.ndarray,
time_interval: float,
next_time_index: int,
prior_bound: float,
mm_model: MapMatchingModel,
max_rejections: int,
return_sampled_index: bool = False,
break_on_zero: bool = False) -> Union[Optional[np.ndarray], tuple, int]:
"""
Attempt up to max_rejections of rejection sampling to backwards sample a single particle
:param fixed_particle: trajectory prior to stitching time
:param first_edge_fixed: first row of fixed particle
:param first_edge_fixed_length: metres
:param filter_particles: proposal particles to be sampled
:param filter_weights: weights for filter_particles
:param time_interval: time between observations at backwards sampling time
:param next_time_index: index of second observation time in fixed_particle
:param prior_bound: bound on distance transition density (given positive if break_on_zero)
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt, if none succeed return None
:param return_sampled_index: whether to return index of selected back sample
:param break_on_zero: whether to return 0 if smoothing_distance=0
:return: appended particle
"""
n = filter_particles.n
for k in range(max_rejections):
filter_index = np.random.choice(n, 1, p=filter_weights)[0]
filter_particle = filter_particles[filter_index]
if not np.array_equal(first_edge_fixed[1:4], filter_particle[-1, 1:4]):
continue
elif np.array_equal(fixed_particle[next_time_index, 1:4], filter_particle[-1, 1:4]) and \
filter_particle[-1, 4] > fixed_particle[next_time_index, 4]:
continue
distance_j_to_k = np.round((first_edge_fixed[4] - filter_particle[-1, 4]) * first_edge_fixed_length, 5)
smoothing_distance = fixed_particle[next_time_index, -1] + distance_j_to_k
if break_on_zero and smoothing_distance < 1e-5:
return (0, filter_index) if return_sampled_index else 0
smoothing_distance_prior = mm_model.distance_prior_evaluate(smoothing_distance, time_interval)
smoothing_deviation_prior = mm_model.deviation_prior_evaluate(filter_particle[-1, 5:7],
fixed_particle[None, next_time_index, 5:7],
smoothing_distance)
accept_prob = smoothing_distance_prior * smoothing_deviation_prior / prior_bound
if accept_prob > (1 - 1e-5) or np.random.uniform() < accept_prob:
fixed_particle[1:(next_time_index + 1), -1] += distance_j_to_k
out_part = np.append(filter_particle, fixed_particle[1:], axis=0)
if return_sampled_index:
return out_part, filter_index
else:
return out_part
return (None, 0) if return_sampled_index else None
def backward_simulate(graph: MultiDiGraph,
filter_particles: MMParticles,
filter_weights: np.ndarray,
time_interval_arr: np.ndarray,
mm_model: MapMatchingModel,
max_rejections: int,
verbose: bool = False,
store_ess_back: bool = None,
store_norm_quants: bool = False) -> MMParticles:
"""
Given particle filter output, run backwards simulation to output smoothed trajectories
:param graph: encodes road network, simplified and projected to UTM
:param filter_particles: marginal outputs from particle filter
:param filter_weights: weights
:param time_interval_arr: times between observations, must be length one less than filter_particles
:param mm_model: MapMatchingModel
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full backward simulation and track ess_back
:param verbose: print ess_pf or ess_back
:param store_ess_back: whether to store ess_back (if possible) in MMParticles object
:param store_norm_quants: if True normalisation quantities returned in out_particles
:return: MMParticles object
"""
n_samps = filter_particles[-1].n
num_obs = len(filter_particles)
if len(time_interval_arr) + 1 != num_obs:
raise ValueError("time_interval_arr must be length one less than that of filter_particles")
full_sampling = max_rejections == 0
if store_ess_back is None:
store_ess_back = full_sampling
# Multinomial resample end particles if weighted
if np.all(filter_weights[-1] == filter_weights[-1][0]):
out_particles = filter_particles[-1].copy()
else:
out_particles = multinomial(filter_particles[-1], filter_weights[-1])
if full_sampling:
ess_back = np.zeros((num_obs, n_samps))
ess_back[0] = 1 / (filter_weights[-1] ** 2).sum()
else:
ess_back = None
if num_obs < 2:
return out_particles
if store_norm_quants:
norm_quants = np.zeros((num_obs - 1, *filter_particles[0].prior_norm.shape))
for i in range(num_obs - 2, -1, -1):
next_time = filter_particles[i + 1].latest_observation_time
if not full_sampling:
pos_prior_bound = mm_model.pos_distance_prior_bound(time_interval_arr[i])
prior_bound = mm_model.distance_prior_bound(time_interval_arr[i])
store_out_parts = out_particles.copy()
if filter_particles[i].prior_norm.ndim == 2:
prior_norm = filter_particles[i].prior_norm[:, 0]
else:
prior_norm = filter_particles[i].prior_norm
adjusted_weights = filter_weights[i].copy()
good_inds = np.logical_and(adjusted_weights != 0, prior_norm != 0)
adjusted_weights[good_inds] /= prior_norm[good_inds]
adjusted_weights[~good_inds] = 0
adjusted_weights /= adjusted_weights.sum()
if store_norm_quants:
sampled_inds = np.zeros(n_samps, dtype=int)
resort_to_full = False
for j in range(n_samps):
fixed_particle = out_particles[j].copy()
first_edge_fixed = fixed_particle[0]
first_edge_fixed_geom = get_geometry(graph, first_edge_fixed[1:4])
first_edge_fixed_length = first_edge_fixed_geom.length
fixed_next_time_index = np.where(fixed_particle[:, 0] == next_time)[0][0]
if full_sampling:
back_output = full_backward_sample(fixed_particle,
first_edge_fixed,
first_edge_fixed_length,
filter_particles[i],
adjusted_weights,
time_interval_arr[i],
fixed_next_time_index,
mm_model,
return_ess_back=True,
return_sampled_index=store_norm_quants)
if store_norm_quants:
out_particles[j], ess_back[i, j], sampled_inds[j] = back_output
else:
out_particles[j], ess_back[i, j] = back_output
else:
back_output = rejection_backward_sample(fixed_particle,
first_edge_fixed,
first_edge_fixed_length,
filter_particles[i],
adjusted_weights,
time_interval_arr[i],
fixed_next_time_index,
pos_prior_bound,
mm_model,
max_rejections,
return_sampled_index=store_norm_quants,
break_on_zero=True)
first_back_output = back_output[0] if store_norm_quants else back_output
if first_back_output is None:
back_output = full_backward_sample(fixed_particle,
first_edge_fixed,
first_edge_fixed_length,
filter_particles[i],
adjusted_weights,
time_interval_arr[i],
fixed_next_time_index,
mm_model,
return_ess_back=False,
return_sampled_index=store_norm_quants)
if isinstance(first_back_output, int) and first_back_output == 0:
resort_to_full = True
break
if store_norm_quants:
out_particles[j], sampled_inds[j] = back_output
else:
out_particles[j] = back_output
if resort_to_full:
if store_norm_quants:
sampled_inds = np.zeros(n_samps, dtype=int)
for j in range(n_samps):
fixed_particle = store_out_parts[j]
first_edge_fixed = fixed_particle[0]
first_edge_fixed_geom = get_geometry(graph, first_edge_fixed[1:4])
first_edge_fixed_length = first_edge_fixed_geom.length
fixed_next_time_index = np.where(fixed_particle[:, 0] == next_time)[0][0]
back_output = rejection_backward_sample(fixed_particle,
first_edge_fixed,
first_edge_fixed_length,
filter_particles[i],
adjusted_weights,
time_interval_arr[i],
fixed_next_time_index,
prior_bound,
mm_model,
max_rejections,
return_sampled_index=store_norm_quants,
break_on_zero=False)
first_back_output = back_output[0] if store_norm_quants else back_output
if first_back_output is None:
back_output = full_backward_sample(fixed_particle,
first_edge_fixed,
first_edge_fixed_length,
filter_particles[i],
adjusted_weights,
time_interval_arr[i],
fixed_next_time_index,
mm_model,
return_ess_back=False,
return_sampled_index=store_norm_quants)
if store_norm_quants:
out_particles[j], sampled_inds[j] = back_output
else:
out_particles[j] = back_output
if store_norm_quants:
norm_quants[i] = filter_particles[i].prior_norm[sampled_inds]
none_inds = np.array([p is None or None in p for p in out_particles])
good_inds = ~none_inds
n_good = good_inds.sum()
if n_good < n_samps:
none_inds_res_indices = np.random.choice(n_samps, n_samps - n_good, p=good_inds / n_good)
for i_none, j_none in enumerate(np.where(none_inds)[0]):
out_particles[j_none] = out_particles[none_inds_res_indices[i_none]].copy()
if store_norm_quants:
norm_quants[:, j_none] = norm_quants[:, none_inds_res_indices[i_none]]
if store_ess_back:
out_particles.ess_back[i, none_inds] = n_samps
if verbose:
if full_sampling:
print(str(filter_particles[i].latest_observation_time) + " Av Backward ESS: " + str(
np.mean(ess_back[i])))
else:
print(str(filter_particles[i].latest_observation_time))
if store_ess_back:
out_particles.ess_back = ess_back
if store_norm_quants:
out_particles.dev_norm_quants = norm_quants
return out_particles
| 18,714 | 47.86423 | 120 | py |
bmm | bmm-master/bmm/src/inference/parameters.py | ########################################################################################################################
# Module: inference/parameters.py
# Description: Expectation maximisation to infer maximum likelihood hyperparameters.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from typing import Union, Tuple
import pickle
import numpy as np
from networkx.classes import MultiDiGraph
from scipy.optimize import minimize
from bmm.src.inference.particles import MMParticles
from bmm.src.inference.model import MapMatchingModel
from bmm.src.inference.smc import get_time_interval_array, offline_map_match
from bmm.src.tools.edges import observation_time_rows
def offline_em(graph: MultiDiGraph,
mm_model: MapMatchingModel,
timestamps: Union[list, float],
polylines: list,
save_path: str,
n_ffbsi: int = 100,
n_iter: int = 10,
gradient_stepsize_scale: float = 1e-3,
gradient_stepsize_neg_exp: float = 0.5,
**kwargs):
"""
Run expectation maximisation to optimise parameters of bmm.MapMatchingModel object.
Updates the hyperparameters of mm_model in place.
:param graph: encodes road network, simplified and projected to UTM
:param mm_model: MapMatchingModel - of which parameters will be updated
:param timestamps: seconds, either float if all times between observations are the same, or a series of timestamps
in seconds/UNIX timestamp, if timestamps given, must be of matching dimensions to polylines
:param polylines: UTM polylines
:param save_path: path to save learned parameters
:param n_ffbsi: number of samples for FFBSi algorithm
:param n_iter: number of EM iterations
:param gradient_stepsize_scale: starting stepsize
:param gradient_stepsize_neg_exp: rate of decay of stepsize, in [0.5, 1]
:param kwargs: additional arguments for FFBSi
:return: dict of optimised parameters
"""
params_track = {'distance_params': {key: np.asarray(value) for key, value in mm_model.distance_params.items()},
'deviation_beta': np.asarray(mm_model.deviation_beta),
'gps_sd': np.asarray(mm_model.gps_sd)}
if isinstance(polylines, np.ndarray):
polylines = [polylines]
if isinstance(timestamps, (float, int)):
timestamps = [timestamps] * len(polylines)
# If no deviation prior - can optimise prior directly, otherwise can only take gradient step
no_deviation_prior = mm_model.deviation_beta_bounds[1] == 0
if no_deviation_prior:
mm_model.deviation_beta = 0
time_interval_arrs_full = [get_time_interval_array(timestamps_single, len(polyline))
for timestamps_single, polyline in zip(timestamps, polylines)]
for k in range(n_iter):
# Run FFBSi over all given polylines with latest hyperparameters
mm_ind = 0
map_matchings = []
time_interval_arrs_int = []
polylines_int = []
for time_ints_single, polyline in zip(time_interval_arrs_full, polylines):
print(f'Polyline {mm_ind}')
success = True
try:
mm = offline_map_match(graph,
polyline,
n_ffbsi,
time_ints_single,
mm_model,
store_norm_quants=not no_deviation_prior,
**kwargs)
except ValueError:
print(f'Map-matching {mm_ind} failed')
success = False
if success:
map_matchings.append(mm)
time_interval_arrs_int.append(time_ints_single)
polylines_int.append(polyline)
mm_ind += 1
if no_deviation_prior:
# Optimise hyperparameters
optimise_hyperparameters(mm_model, map_matchings, time_interval_arrs_int, polylines_int)
else:
# Take gradient step
gradient_em_step(mm_model, map_matchings, time_interval_arrs_int, polylines_int,
gradient_stepsize_scale / (k + 1) ** gradient_stepsize_neg_exp)
# Update tracking of hyperparameters
params_track = update_params_track(params_track, mm_model)
print(f'EM iter: {k}')
print(params_track)
pickle.dump(params_track, open(save_path, 'wb'))
return params_track
def update_params_track(params_track: dict,
mm_model: MapMatchingModel) -> dict:
"""
Appends latest value to tracking of hyperparameter tuning
:param params_track: dict of hyperparameters
:param mm_model: MapMatchingModel with hyperparameters updated
:return: params_track with new hyperparameters updated
"""
params_track['distance_params'] = {key: np.append(params_track['distance_params'][key], value)
for key, value in mm_model.distance_params.items()}
params_track['deviation_beta'] = np.append(params_track['deviation_beta'], mm_model.deviation_beta)
params_track['gps_sd'] = np.append(params_track['gps_sd'], mm_model.gps_sd)
return params_track
def extract_mm_quantities(map_matching: MMParticles,
polyline: np.ndarray,
extract_devs: bool = True) -> tuple:
"""
Extract required statistics for parameter optimisation from map-matching results.
:param map_matching: MMParticles.particles list
:param polyline: for single route
:param extract_devs: whether to extract deviations (and gradient quantities)
:return: distances, deviations and squared observation-position distances
"""
distances = np.array([])
devs = np.array([])
sq_obs_dists = np.array([])
for particle in map_matching:
particle_obs_time_rows = observation_time_rows(particle)
distances_particle = particle_obs_time_rows[1:, -1]
distances = np.append(distances, distances_particle)
if extract_devs:
devs_particle = np.abs(distances_particle - np.sqrt(np.sum(np.square(particle_obs_time_rows[1:, 5:7]
- particle_obs_time_rows[:-1, 5:7]),
axis=1)))
devs = np.append(devs, devs_particle)
sq_obs_dists = np.append(sq_obs_dists, np.sum(np.square(particle_obs_time_rows[:, 5:7] - polyline), axis=1))
if extract_devs:
# Z, *dZ/dalpha, dZ/dbeta where alpha = distance_params and beta = deviation_beta
dev_norm_quants = np.concatenate(map_matching.dev_norm_quants)
return distances, (devs, dev_norm_quants), sq_obs_dists
else:
return distances, sq_obs_dists
def optimise_hyperparameters(mm_model: MapMatchingModel,
map_matchings: list,
time_interval_arrs: list,
polylines: list):
"""
For given map-matching results, optimise model hyperparameters.
Updates mm_model hyperparameters in place
:param mm_model: MapMatchingModel
:param map_matchings: list of MMParticles objects
:param time_interval_arrs: time interval arrays for each route
:param polylines: observations for each route
"""
# Get key quantities
distances = np.array([])
time_interval_arrs_concat = np.array([])
sq_obs_dists = np.array([])
for map_matching, time_interval_arr, polyline in zip(map_matchings, time_interval_arrs, polylines):
distances_single, sq_obs_dists_single = extract_mm_quantities(map_matching,
polyline,
extract_devs=False)
distances = np.append(distances, np.concatenate(distances_single))
time_interval_arrs_concat = np.append(time_interval_arrs_concat,
np.concatenate([time_interval_arr] * len(map_matching)))
sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)
# # Optimise zero dist prob
# def zero_dist_prob_root_func(neg_exp: float) -> float:
# return - np.sum(- time_interval_arrs_concat * (distances < 1e-5)
# + time_interval_arrs_concat * np.exp(-neg_exp * time_interval_arrs_concat)
# / (1 - np.exp(-neg_exp * time_interval_arrs_concat)) * (distances >= 1e-5))
#
# mm_model.zero_dist_prob_neg_exponent = root_scalar(zero_dist_prob_root_func, bracket=(1e-3, 1e20)).root
#
# pos_distances = distances[distances > 1e-5]
# pos_time_interval_arrs_concat = time_interval_arrs_concat[distances > 1e-5]
pos_distances = distances
pos_time_interval_arrs_concat = time_interval_arrs_concat
bounds = list(mm_model.distance_params_bounds.values())
bounds = [(a - 1e-5, a + 1e-5) if a == b else (a, b) for a, b in bounds]
# Optimise distance params
def distance_minim_func(distance_params_vals: np.ndarray) -> float:
for i, k in enumerate(mm_model.distance_params.keys()):
mm_model.distance_params[k] = distance_params_vals[i]
return -np.sum(np.log(mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)))
# Optimise distance params
optim_dist_params = minimize(distance_minim_func,
np.array([a for a in mm_model.distance_params.values()]),
# method='powell',
bounds=bounds)
for i, k in enumerate(mm_model.distance_params.keys()):
mm_model.distance_params[k] = optim_dist_params.x[i]
# Optimise GPS noise
mm_model.gps_sd = min(max(np.sqrt(sq_obs_dists.mean() / 2),
mm_model.gps_sd_bounds[0]),
mm_model.gps_sd_bounds[1])
def gradient_em_step(mm_model: MapMatchingModel,
map_matchings: list,
time_interval_arrs: list,
polylines: list,
stepsize: float):
"""
For given map-matching results, take gradient step on prior hyperparameters (but fully optimise gps_sd)
Updates mm_model hyperparameters in place
:param mm_model: MapMatchingModel
:param map_matchings: list of MMParticles objects
:param time_interval_arrs: time interval arrays for each route
:param polylines: observations for each route
:param stepsize: stepsize for gradient step (applied to each coord)
"""
n_particles = map_matchings[0].n
# Get key quantities
distances = np.array([])
time_interval_arrs_concat = np.array([])
devs = np.array([])
sq_obs_dists = np.array([])
dev_norm_quants = []
for map_matching, time_interval_arr, polyline in zip(map_matchings, time_interval_arrs, polylines):
distances_single, devs_and_norms_single, sq_obs_dists_single = extract_mm_quantities(map_matching,
polyline)
distances = np.append(distances, distances_single)
time_interval_arrs_concat = np.append(time_interval_arrs_concat,
np.concatenate([time_interval_arr] * len(map_matching)))
devs_single, dev_norm_quants_single = devs_and_norms_single
devs = np.append(devs, devs_single)
dev_norm_quants.append(dev_norm_quants_single)
sq_obs_dists = np.append(sq_obs_dists, sq_obs_dists_single)
# Z, *dZ/dalpha, dZ/dbeta where alpha = distance_params and beta = deviation_beta
dev_norm_quants = np.concatenate(dev_norm_quants)
pos_distances = distances
pos_time_interval_arrs_concat = time_interval_arrs_concat
pos_dev_norm_quants = dev_norm_quants
pos_devs = devs
distance_gradient_evals = (mm_model.distance_prior_gradient(pos_distances, pos_time_interval_arrs_concat)
/ mm_model.distance_prior_evaluate(pos_distances, pos_time_interval_arrs_concat)
- pos_dev_norm_quants[:, 1:-1].T / pos_dev_norm_quants[:, 0]).sum(axis=1) \
/ n_particles
deviation_beta_gradient_evals = (-pos_devs - pos_dev_norm_quants[:, -1] /
pos_dev_norm_quants[:, 0]).sum() \
/ n_particles
# Take gradient step in distance params
for i, k in enumerate(mm_model.distance_params.keys()):
bounds = mm_model.distance_params_bounds[k]
mm_model.distance_params[k] = min(max(
mm_model.distance_params[k] + stepsize * distance_gradient_evals[i],
bounds[0]), bounds[1])
# Take gradient step in deviation beta
mm_model.deviation_beta = min(max(
mm_model.deviation_beta + stepsize * deviation_beta_gradient_evals,
mm_model.deviation_beta_bounds[0]), mm_model.deviation_beta_bounds[1])
# Optimise GPS noise
mm_model.gps_sd = min(max(np.sqrt(sq_obs_dists.mean() / 2),
mm_model.gps_sd_bounds[0]),
mm_model.gps_sd_bounds[1])
| 13,538 | 45.208191 | 120 | py |
bmm | bmm-master/bmm/src/inference/proposal.py | ########################################################################################################################
# Module: inference/proposal.py
# Description: Proposal mechanisms to extend particles (series of positions/edges/distances) and re-weight
# in light of a newly received observation.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from functools import lru_cache
from typing import Tuple, Union
import numpy as np
from numba import njit
from networkx.classes import MultiDiGraph
from bmm.src.tools.edges import get_geometry, edge_interpolate, discretise_edge
from bmm.src.inference.model import MapMatchingModel
@lru_cache(maxsize=2 ** 8)
def get_out_edges(graph: MultiDiGraph,
node: int) -> np.ndarray:
"""
Extracts out edges from a given node
:param graph: encodes road network, simplified and projected to UTM
:param node: graph index to a single node
:return: array with columns u, v, k with u = node
"""
return np.atleast_2d([[u, v, k] for u, v, k in graph.out_edges(node, keys=True)])
@lru_cache(maxsize=2 ** 7)
def get_possible_routes_all_cached(graph: MultiDiGraph,
u: int,
v: int,
k: int,
d_max: float,
num_inter_cut_off: int) -> list:
in_route = np.array([[0., u, v, k, 1., 0., 0., 0.]])
return get_possible_routes(graph, in_route, d_max, all_routes=True, num_inter_cut_off=num_inter_cut_off)
def get_all_possible_routes_overshoot(graph: MultiDiGraph,
in_edge: np.ndarray,
d_max: float,
num_inter_cut_off: int = np.inf) -> list:
in_edge_geom = get_geometry(graph, in_edge[-1, 1:4])
in_edge_length = in_edge_geom.length
extra_dist = (1 - in_edge[-1, 4]) * in_edge_length
if extra_dist > d_max:
return get_possible_routes(graph, in_edge, d_max, all_routes=True, num_inter_cut_off=num_inter_cut_off)
all_possible_routes_overshoot = get_possible_routes_all_cached(graph, *in_edge[-1, 1:4],
d_max, num_inter_cut_off)
out_routes = []
for i in range(len(all_possible_routes_overshoot)):
temp_route = all_possible_routes_overshoot[i].copy()
temp_route[:, -1] += extra_dist
out_routes.append(temp_route)
return out_routes
def get_possible_routes(graph: MultiDiGraph,
in_route: np.ndarray,
dist: float,
all_routes: bool = False,
num_inter_cut_off: int = np.inf) -> list:
"""
Given a route so far and maximum distance to travel, calculate and return all possible routes on graph.
:param graph: encodes road network, simplified and projected to UTM
:param in_route: shape = (_, 9)
columns: t, u, v, k, alpha, x, y, n_inter, d
t: float, time
u: int, edge start node
v: int, edge end node
k: int, edge key
alpha: in [0,1], position along edge
x: float, metres, cartesian x coordinate
y: float, metres, cartesian y coordinate
d: metres, distance travelled
:param dist: metres, maximum possible distance to travel
:param all_routes: if true return all routes possible <= d
otherwise return only routes of length d
:param num_inter_cut_off: maximum number of intersections to cross in the time interval
:return: list of arrays
each array with shape = (_, 9) as in_route
each array describes a possible route
"""
# Extract final position from inputted route
start_edge_and_position = in_route[-1]
# Extract edge geometry
start_edge_geom = get_geometry(graph, start_edge_and_position[1:4])
start_edge_geom_length = start_edge_geom.length
# Distance left on edge before intersection
# Use NetworkX length rather than OSM length
distance_left_on_edge = (1 - start_edge_and_position[4]) * start_edge_geom_length
if distance_left_on_edge > dist:
# Remain on edge
# Propagate and return
start_edge_and_position[4] += dist / start_edge_geom_length
start_edge_and_position[-1] += dist
return [in_route]
# Reach intersection at end of edge
# Propagate to intersection and recurse
dist -= distance_left_on_edge
start_edge_and_position[4] = 1.
start_edge_and_position[-1] += distance_left_on_edge
intersection_edges = get_out_edges(graph, start_edge_and_position[2]).copy()
if intersection_edges.shape[1] == 0 or len(in_route) >= num_inter_cut_off:
# Dead-end and one-way or exceeded max intersections
if all_routes:
return [in_route]
else:
return [None]
if len(intersection_edges) == 1 and intersection_edges[0][1] == start_edge_and_position[1] \
and intersection_edges[0][2] == start_edge_and_position[3]:
# Dead-end and two-way -> Only option is u-turn
if all_routes:
return [in_route]
else:
new_routes = []
for new_edge in intersection_edges:
# If not u-turn or loop continue route search on new edge
if (not (new_edge[1] == start_edge_and_position[1] and new_edge[2] == start_edge_and_position[3])) \
and not (new_edge == in_route[:, 1:4]).all(1).any():
add_edge = np.array([[0, *new_edge, 0, 0, 0, start_edge_and_position[-1]]])
new_route = np.append(in_route,
add_edge,
axis=0)
new_routes += get_possible_routes(graph, new_route, dist, all_routes, num_inter_cut_off)
if all_routes:
return [in_route] + new_routes
else:
return new_routes
def extend_routes(graph, routes, add_distance, all_routes=True):
"""
Extend routes to a further distance.
:param graph: encodes road network, simplified and projected to UTM
:param routes: list of arrays
columns: t, u, v, k, alpha, x, y, n_inter, d
t: float, time
u: int, edge start node
v: int, edge end node
k: int, edge key
alpha: in [0,1], position along edge
x: float, metres, cartesian x coordinate
y: float, metres, cartesian y coordinate
n_inter: int, number of options if intersection
d: metres, distance travelled
:param add_distance: float
metres
additional distance to travel
:param all_routes: bool
if true return all routes possible <= d
else return only routes of length d
:return: list of numpy.ndarrays
each numpy.ndarray with shape = (_, 7)
each array describes a possible route
"""
out_routes = []
for route in routes:
out_routes += get_possible_routes(graph, route, add_distance, all_routes=all_routes)
return out_routes
def process_proposal_output(particle: np.ndarray,
sampled_route: np.ndarray,
sampled_dis_route: np.ndarray,
time_interval: float,
full_smoothing: bool) -> np.ndarray:
"""
Append sampled route to previous particle
:param particle: route up to previous observation
:param sampled_route: route since previous observation
:param sampled_dis_route: alpha, x, y, distance
:param time_interval: time between last observation and newly received observation
:param full_smoothing: whether to append to full particle or only last row
:return: appended particle
"""
# Append sampled route to old particle
new_route_append = sampled_route
new_route_append[0, 0] = 0
new_route_append[0, 5:7] = 0
new_route_append[-1, 0] = particle[-1, 0] + time_interval
new_route_append[-1, 4:7] = sampled_dis_route[0:3]
new_route_append[-1, -1] = sampled_dis_route[-1]
if full_smoothing:
return np.append(particle, new_route_append, axis=0)
else:
return np.append(particle[-1:], new_route_append, axis=0)
def optimal_proposal(graph: MultiDiGraph,
particle: np.ndarray,
new_observation: Union[None, np.ndarray],
time_interval: float,
mm_model: MapMatchingModel,
full_smoothing: bool = True,
d_refine: float = 1.,
d_max: float = None,
d_max_fail_multiplier: float = 1.5,
d_max_threshold: tuple = (0.9, 0.1),
num_inter_cut_off: int = None,
only_norm_const: bool = False,
store_norm_quants: bool = False,
resample_fails: bool = True) -> Union[Tuple[Union[None, np.ndarray],
float,
Union[float, np.ndarray]], float]:
"""
Samples a single particle from the (distance discretised) optimal proposal.
:param graph: encodes road network, simplified and projected to UTM
:param particle: single element of MMParticles.particles
:param new_observation: cartesian coordinate in UTM
:param time_interval: time between last observation and newly received observation
:param mm_model: MapMatchingModel
:param full_smoothing: if True returns full trajectory
otherwise returns only x_t-1 to x_t
:param d_refine: metres, resolution of distance discretisation
:param d_max: optional override of d_max = mm_model.d_max(time_interval)
:param d_max_fail_multiplier: extension of d_max in case all probs are 0
:param d_max_threshold: tuple defining when to extend d_max
extend if total sample prob of distances > d_max * d_max_threshold[0] larger than d_max_threshold[1]
:param num_inter_cut_off: maximum number of intersections to cross in the time interval
:param only_norm_const: if true only return prior normalising constant (don't sample)
:param store_norm_quants: whether to additionally return quantities needed for gradient EM step
assuming deviation prior is used
:param resample_fails: whether to return None (and induce later resampling of whole trajectory)
if proposal fails to find route with positive probability
if False assume distance=0
:return: (particle, unnormalised weight, prior_norm) or (particle, unnormalised weight, dev_norm_quants)
"""
if particle is None:
return 0. if only_norm_const else (None, 0., 0.)
if isinstance(new_observation, list):
new_observation = np.array(new_observation)
if num_inter_cut_off is None:
num_inter_cut_off = max(int(time_interval / 1.5), 10)
if d_max is None:
d_max = mm_model.d_max(time_interval)
# Extract all possible routes from previous position
start_position = particle[-1:].copy()
start_position[0, -1] = 0
possible_routes = get_all_possible_routes_overshoot(graph, start_position, d_max,
num_inter_cut_off=num_inter_cut_off)
# Get all possible positions on each route
discretised_routes_indices_list = []
discretised_routes_list = []
for i, route in enumerate(possible_routes):
# All possible end positions of route
discretised_edge_matrix = discretise_edge(graph, route[-1, 1:4], d_refine)
if route.shape[0] == 1:
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, 0] >= particle[-1, 4]]
discretised_edge_matrix[:, -1] -= discretised_edge_matrix[-1, -1]
else:
discretised_edge_matrix[:, -1] += route[-2, -1]
discretised_edge_matrix = discretised_edge_matrix[discretised_edge_matrix[:, -1] < d_max + 1e-5]
# Track route index and append to list
if discretised_edge_matrix is not None and len(discretised_edge_matrix) > 0:
discretised_routes_indices_list += [np.ones(discretised_edge_matrix.shape[0], dtype=int) * i]
discretised_routes_list += [discretised_edge_matrix]
# Concatenate into numpy.ndarray
discretised_routes_indices = np.concatenate(discretised_routes_indices_list)
discretised_routes = np.concatenate(discretised_routes_list)
if len(discretised_routes) == 0 or (len(discretised_routes) == 1 and discretised_routes[0][-1] == 0):
if only_norm_const:
return 0
if resample_fails:
return None, 0., 0.
else:
sampled_dis_route = discretised_routes[0]
# Append sampled route to old particle
sampled_route = possible_routes[0]
proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
full_smoothing)
return proposal_out, 0., 0.
# Distance prior evals
distances = discretised_routes[:, -1]
distance_prior_evals = mm_model.distance_prior_evaluate(distances, time_interval)
# Deviation prior evals
deviation_prior_evals = mm_model.deviation_prior_evaluate(particle[-1, 5:7],
discretised_routes[:, 1:3],
discretised_routes[:, -1])
# Normalise prior/transition probabilities
prior_probs = distance_prior_evals * deviation_prior_evals
prior_probs_norm_const = prior_probs.sum()
if only_norm_const:
if store_norm_quants:
deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
deviations = np.abs(deviations - discretised_routes[:, -1])
# Z, dZ/d(dist_params), dZ/d(deviation_beta)
dev_norm_quants = np.array([prior_probs_norm_const,
*np.sum(mm_model.distance_prior_gradient(distances, time_interval)
.reshape(len(mm_model.distance_params), len(distances))
* deviation_prior_evals, axis=-1),
-np.sum(deviations
* distance_prior_evals
* deviation_prior_evals)
])
return dev_norm_quants
else:
return prior_probs_norm_const
prior_probs /= prior_probs_norm_const
# Likelihood evaluations
likelihood_evals = mm_model.likelihood_evaluate(discretised_routes[:, 1:3], new_observation)
# Calculate sample probabilities
sample_probs = prior_probs[likelihood_evals > 0] * likelihood_evals[likelihood_evals > 0]
# sample_probs = prior_probs * likelihood_evals
# p(y_m | x_m-1^j)
prop_weight = sample_probs.sum()
model_d_max = mm_model.d_max(time_interval)
if prop_weight < 1e-100 \
or (np.sum(sample_probs[np.where(distances[likelihood_evals > 0]
> (d_max * d_max_threshold[0]))[0]])/prop_weight > d_max_threshold[1]\
and (not d_max > model_d_max)):
if (d_max - np.max(distances)) < d_refine + 1e-5 \
and d_max_fail_multiplier > 1 and (not d_max > model_d_max):
return optimal_proposal(graph,
particle,
new_observation,
time_interval,
mm_model,
full_smoothing,
d_refine,
d_max=d_max * d_max_fail_multiplier,
num_inter_cut_off=num_inter_cut_off,
only_norm_const=only_norm_const,
store_norm_quants=store_norm_quants,
resample_fails=resample_fails)
if resample_fails:
proposal_out = None
else:
sampled_dis_route_index = np.where(discretised_routes[:, -1] == 0)[0][0]
sampled_dis_route = discretised_routes[sampled_dis_route_index]
# Append sampled route to old particle
sampled_route = possible_routes[discretised_routes_indices[sampled_dis_route_index]]
proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
full_smoothing)
prop_weight = 0.
else:
# Sample an edge and distance
sampled_dis_route_index = np.random.choice(len(sample_probs), 1, p=sample_probs / prop_weight)[0]
sampled_dis_route = discretised_routes[likelihood_evals > 0][sampled_dis_route_index]
# Append sampled route to old particle
sampled_route = possible_routes[discretised_routes_indices[likelihood_evals > 0][sampled_dis_route_index]]
proposal_out = process_proposal_output(particle, sampled_route, sampled_dis_route, time_interval,
full_smoothing)
if store_norm_quants:
deviations = np.sqrt(np.sum((particle[-1, 5:7] - discretised_routes[:, 1:3]) ** 2, axis=1))
deviations = np.abs(deviations - discretised_routes[:, -1])
# Z, dZ/d(dist_params), dZ/d(deviation_beta)
dev_norm_quants = np.array([prior_probs_norm_const,
*np.sum(mm_model.distance_prior_gradient(distances, time_interval)
.reshape(len(mm_model.distance_params), len(distances))
* deviation_prior_evals, axis=-1),
-np.sum(deviations
* distance_prior_evals
* deviation_prior_evals)
])
return proposal_out, prop_weight, dev_norm_quants
else:
return proposal_out, prop_weight, prior_probs_norm_const
| 18,595 | 44.802956 | 120 | py |
bmm | bmm-master/bmm/src/inference/particles.py | ########################################################################################################################
# Module: inference/particles.py
# Description: Class to store map-matching particles.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
import copy
from typing import List
from bmm.src.tools.edges import observation_time_indices
import numpy as np
class MMParticles:
"""
Class to store trajectories from a map-matching algorithm.
In particular, contains the ``self.particles`` object, which is a
list of n arrays each with shape = (_, 8)
where _ represents the trajectory length (number of nodes that are either intersection or observation)
and columns:
- t: seconds, observation time
- u: int, edge start node
- v: int, edge end node
- k: int, edge key
- alpha: in [0,1], position along edge
- x: float, metres, cartesian x coordinate
- y: float, metres, cartesian y coordinate
- d: float, metres, distance travelled since previous observation time
As well as some useful properties:
* ``self.n``: number of particles
* ``self.m``: number of observations
* ``self.observation_times``: array of observation times
* ``self.latest_observation_time``: time of most recently received observation
* ``self.route_nodes``: list of length n, each element contains the series of nodes traversed for that particle
Initiate MMParticles storage of trajectories with some start positions as input.
:param initial_positions: list, length = n_samps, each element is an array of length 6 with elements
* u: int, edge start node
* v: int, edge end node
* k: int, edge key
* alpha: in [0,1], position along edge
* x: float, metres, cartesian x coordinate
* y: float, metres, cartesian y coordinate
"""
__module__ = 'bmm'
def __init__(self, initial_positions: List[np.ndarray]):
"""
Initiate storage of trajectories with some start positions as input.
:param initial_positions: list, length = n_samps, each element is an array of length 6 with elements
* u: int, edge start node
* v: int, edge end node
* k: int, edge key
* alpha: in [0,1], position along edge
* x: float, metres, cartesian x coordinate
* y: float, metres, cartesian y coordinate
"""
if initial_positions is not None:
self.n = len(initial_positions)
self.particles = [np.zeros((1, 8)) for _ in range(self.n)]
for i in range(self.n):
self.particles[i][0, 1:7] = initial_positions[i]
self.ess_pf = np.zeros(1)
self.time = 0
def copy(self) -> 'MMParticles':
out_part = MMParticles(None)
out_part.n = self.n
out_part.particles = [p.copy() if p is not None else None for p in self.particles]
for key, value in self.__dict__.items():
if isinstance(value, np.ndarray):
out_part.__dict__[key] = value.copy()
# elif isinstance(value, list):
# out_part.__dict__[key] = [p.copy() if p is not None else None for p in value]
elif not isinstance(value, list):
out_part.__dict__[key] = value
return out_part
def deepcopy(self) -> 'MMParticles':
return copy.deepcopy(self)
def __len__(self) -> int:
return self.n
@property
def _first_non_none_particle(self) -> np.ndarray:
"""
Finds the first element of self.particles that is not None
:return: array for single particle
"""
try:
return self.particles[0] if self.particles[0] is not None\
else next(particle for particle in self.particles if particle is not None)
except StopIteration:
raise ValueError("All particles are none")
@property
def latest_observation_time(self) -> float:
"""
Extracts most recent observation time.
:return: time of most recent observation
"""
return self._first_non_none_particle[-1, 0]
@property
def observation_times(self) -> np.ndarray:
"""
Extracts all observation times.
:return: array, shape = (m,)
"""
all_times = self._first_non_none_particle[:, 0]
return all_times[observation_time_indices(all_times)]
@property
def m(self) -> int:
"""
Number of observations received.
:return: number of observations received
"""
return len(self.observation_times)
def __getitem__(self, item):
"""
Extract single particle
:param item: index of particle to be extracted
:return: single path array, shape = (_, 8)
where _ represents the trajectory length (number of nodes that are either intersection or observation)
"""
return self.particles[item]
def __setitem__(self, key, value):
"""
Allows editing and replacement of particles
:param key: particle(s) to replace
:param value: replacement value(s)
"""
self.particles[key] = value
def route_nodes(self):
"""
Returns n series of nodes describing the routes
:return: length n list of arrays, shape (_,)
where _ represents the trajectory length (number of nodes that are either intersection or observation)
"""
nodes = []
for p in self.particles:
edges = p[:, 1:4]
pruned_edges = np.array([e for i, e in enumerate(edges) if i == 0 or not np.array_equal(e, edges[i-1])])
nodes += [np.append(pruned_edges[:, 0], pruned_edges[-1, 1])]
return nodes
| 5,927 | 35.592593 | 120 | py |
bmm | bmm-master/bmm/src/inference/smc.py | ########################################################################################################################
# Module: inference/smc.py
# Description: Implementation of sequential Monte Carlo map-matching. Both offline and online.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
from time import time as tm
import inspect
from typing import Callable, Union, Tuple
import numpy as np
from networkx.classes import MultiDiGraph
from bmm.src.tools import edges
from bmm.src.inference.particles import MMParticles
from bmm.src.inference.proposal import optimal_proposal
from bmm.src.inference.resampling import fixed_lag_stitching, multinomial, fixed_lag_stitch_post_split
from bmm.src.inference.backward import backward_simulate
from bmm.src.inference.model import MapMatchingModel, ExponentialMapMatchingModel
updates = ('PF', 'BSi')
def get_time_interval_array(timestamps: Union[float, np.ndarray],
num_obs: int) -> np.ndarray:
"""
Preprocess timestamp in put
:param timestamps: either float if all observations equally spaced, list of timestamps (length of polyline)
or list of time intervals (length of polyline - 1)
:param num_obs: length of polyline
:return: array of time intervals (length of polyline - 1)
"""
if isinstance(timestamps, (int, float)):
return np.ones(num_obs - 1) * timestamps
elif len(timestamps) == num_obs:
return timestamps[1:] - timestamps[:-1]
elif len(timestamps) == (num_obs - 1):
return timestamps
else:
raise ValueError("timestamps input not understood")
def initiate_particles(graph: MultiDiGraph,
first_observation: np.ndarray,
n_samps: int,
mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
d_refine: float = 1,
d_truncate: float = None,
ess_all: bool = True,
filter_store: bool = True) -> MMParticles:
"""
Initiate start of a trajectory by sampling points around the first observation.
Note that coordinate system of inputs must be the same, typically a UTM projection (not longtitude-latitude!).
:param graph: encodes road network, simplified and projected to UTM
:param mm_model: MapMatchingModel
:param first_observation: cartesian coordinate in UTM
:param n_samps: number of samples to generate
:param d_refine: metres, resolution of distance discretisation
:param d_truncate: metres, distance beyond which to assume zero likelihood probability defaults
to 5 * mm_model.gps_sd
:param ess_all: if true initiate effective sample size for each particle for each observation otherwise initiate
effective sample size only for each observation
:param filter_store: whether to initiate storage of filter particles and weights
:return: MMParticles object
"""
gps_sd = mm_model.gps_sd
if d_truncate is None:
d_truncate = gps_sd * 5
start = tm()
# Discretize edges within truncation
dis_points, dists_to_first_obs = edges.get_truncated_discrete_edges(graph, first_observation,
d_refine,
d_truncate, True)
if dis_points.size == 0:
raise ValueError("No edges found near initial observation: try increasing the initial_truncation")
# Likelihood weights
weights = np.exp(-0.5 / gps_sd ** 2 * dists_to_first_obs ** 2)
weights /= np.sum(weights)
# Sample indices according to weights
sampled_indices = np.random.choice(len(weights), n_samps, replace=True, p=weights)
# Output
out_particles = MMParticles(dis_points[sampled_indices])
# Initiate ESS
if ess_all:
out_particles.ess_stitch = np.ones((1, out_particles.n)) * out_particles.n
out_particles.ess_pf = np.array([out_particles.n])
if filter_store:
out_particles.filter_particles = [out_particles.copy()]
out_particles.filter_weights = np.ones((1, n_samps)) / n_samps
end = tm()
out_particles.time += end - start
return out_particles
def update_particles_flpf(graph: MultiDiGraph,
particles: MMParticles,
new_observation: np.ndarray,
time_interval: float,
mm_model: MapMatchingModel,
proposal_func: Callable,
lag: int = 3,
max_rejections: int = 50,
**kwargs) -> MMParticles:
"""
Joint fixed-lag update in light of a newly received observation, uses particle filter trajectories for stitching
Propose + reweight then fixed-lag stitching.
:param graph: encodes road network, simplified and projected to UTM
:param particles: unweighted particle approximation up to the previous observation time
:param new_observation: cartesian coordinate in UTM
:param time_interval: time between last observation and newly received observation
:param mm_model: MapMatchingModel
:param proposal_func: function to propagate and weight single particle
:param lag: fixed lag for resampling/stitching
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:param kwargs:
any additional arguments to be passed to proposal
i.e. d_refine or d_max for optimal proposal
:return: MMParticles object
"""
start = tm()
# Propose and weight for each particle
out_particles, weights, new_norm_constants = propose_particles(proposal_func,
None,
graph,
particles,
new_observation,
time_interval,
mm_model,
full_smoothing=True,
store_norm_quants=False,
**kwargs)
# Normalise weights
weights /= sum(weights)
if np.any(np.isnan(weights)):
raise ZeroDivisionError('Map-matching failed (all weights zero)')
# Store norm constants
if hasattr(out_particles, 'prior_norm'):
out_particles.prior_norm = np.vstack([out_particles.prior_norm, new_norm_constants])
else:
out_particles.prior_norm = new_norm_constants[None]
# Store ESS
out_particles.ess_pf = np.append(out_particles.ess_pf, 1 / np.sum(weights ** 2))
# Update time intervals
if not hasattr(out_particles, 'time_intervals'):
out_particles.time_intervals = []
out_particles.time_intervals = np.append(out_particles.time_intervals, time_interval)
# Resample
out_particles = fixed_lag_stitching(graph, mm_model, out_particles, weights, lag, max_rejections)
end = tm()
out_particles.time += end - start
return out_particles
def update_particles_flbs(graph: MultiDiGraph,
particles: MMParticles,
new_observation: np.ndarray,
time_interval: float,
mm_model: MapMatchingModel,
proposal_func: Callable,
lag: int = 3,
max_rejections: int = 20,
ess_threshold: float = 1.,
**kwargs) -> MMParticles:
"""
Joint fixed-lag update in light of a newly received observation, uses partial backward simulation runs for stitching
Propose + reweight then backward simulation + fixed-lag stitching.
:param graph: encodes road network, simplified and projected to UTM
:param particles: unweighted particle approximation up to the previous observation time
:param new_observation: cartesian coordinate in UTM
:param time_interval: time between last observation and newly received observation
:param mm_model: MapMatchingModel
:param proposal_func: function to propagate and weight single particle
:param lag: fixed lag for resampling/stitching
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:param ess_threshold: in [0,1], particle filter resamples if ess < ess_threshold * n_samps
:param kwargs:
any additional arguments to be passed to proposal
i.e. d_refine or d_max for optimal proposal
:return: MMParticles object
"""
start = tm()
filter_particles = particles.filter_particles
# Extract basic quantities
n = particles.n
observation_times = np.append(particles.observation_times, particles.observation_times[-1] + time_interval)
m = len(observation_times) - 1
stitching_required = m > lag
# Initiate particle output
out_particles = particles.copy()
# Which particles to propose from (out_particles have been resampled, filter_particles haven't)
previous_resample = particles.ess_pf[-1] < ess_threshold * n
base_particles = out_particles if previous_resample else particles.filter_particles[-1].copy()
latest_filter_particles, weights, temp_prior_norm = propose_particles(proposal_func,
None,
graph,
base_particles,
new_observation,
time_interval,
mm_model,
full_smoothing=False,
store_norm_quants=False,
**kwargs)
filter_particles[-1].prior_norm = temp_prior_norm
# Update weights if not resampled
if not previous_resample:
weights *= particles.filter_weights[-1]
# Normalise weights
weights /= sum(weights)
# Append new filter particles and weights, discard old ones
start_point = 1 if stitching_required else 0
filter_particles = particles.filter_particles[start_point:] + [latest_filter_particles]
out_particles.filter_weights = np.append(out_particles.filter_weights[start_point:], weights[np.newaxis], axis=0)
# Store ESS
out_particles.ess_pf = np.append(out_particles.ess_pf, 1 / np.sum(weights ** 2))
# Update time intervals
if not hasattr(out_particles, 'time_intervals'):
out_particles.time_intervals = []
out_particles.time_intervals = np.append(out_particles.time_intervals, time_interval)
# Run backward simulation
backward_particles = backward_simulate(graph,
filter_particles,
out_particles.filter_weights,
out_particles.time_intervals[-lag:] if lag != 0 else [],
mm_model,
max_rejections,
store_ess_back=False,
store_norm_quants=True)
backward_particles.prior_norm = backward_particles.dev_norm_quants[0]
del backward_particles.dev_norm_quants
if stitching_required:
# Largest time not to be resampled
max_fixed_time = observation_times[m - lag - 1]
# Extract fixed particles
fixed_particles = out_particles.copy()
for j in range(n):
if out_particles[j] is None:
continue
max_fixed_time_index = np.where(out_particles[j][:, 0] == max_fixed_time)[0][0]
fixed_particles[j] = out_particles[j][:(max_fixed_time_index + 1)]
# Stitch
out_particles = fixed_lag_stitch_post_split(graph,
fixed_particles,
backward_particles,
np.ones(n) / n,
mm_model,
max_rejections)
else:
out_particles.particles = backward_particles.particles
out_particles.filter_particles = filter_particles
end = tm()
out_particles.time += end - start
return out_particles
def update_particles(graph: MultiDiGraph,
particles: MMParticles,
new_observation: np.ndarray,
time_interval: float,
mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
proposal_func: Callable = optimal_proposal,
update: str = 'BSi',
lag: int = 3,
max_rejections: int = 20,
**kwargs) -> MMParticles:
"""
Updates particle approximation in receipt of new observation
:param graph: encodes road network, simplified and projected to UTM
:param particles: unweighted particle approximation up to the previous observation time
:param new_observation: cartesian coordinate in UTM
:param time_interval: time between last observation and newly received observation
:param mm_model: MapMatchingModel
:param proposal_func: function to propagate and weight single particle
:param update:
* 'PF' for particle filter fixed-lag update
* 'BSi' for backward simulation fixed-lag update
must be consistent across updates
:param lag: fixed lag for resampling/stitching
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:param kwargs: optional parameters to pass to proposal
i.e. d_max, d_refine or var
as well as ess_threshold for backward simulation update
:return: MMParticles object
"""
if update == 'PF' or lag == 0:
return update_particles_flpf(graph,
particles,
new_observation,
time_interval,
mm_model,
proposal_func,
lag,
max_rejections,
**kwargs)
elif update == 'BSi':
return update_particles_flbs(graph,
particles,
new_observation,
time_interval,
mm_model,
proposal_func,
lag,
max_rejections,
**kwargs)
else:
raise ValueError("update " + update + " not recognised, see bmm.updates for valid options")
def _offline_map_match_fl(graph: MultiDiGraph,
polyline: np.ndarray,
n_samps: int,
timestamps: Union[float, np.ndarray],
mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
proposal_func: Callable = optimal_proposal,
update: str = 'BSi',
lag: int = 3,
d_refine: int = 1,
initial_d_truncate: float = None,
max_rejections: int = 20,
verbose: bool = True,
**kwargs) -> MMParticles:
"""
Runs offline map-matching but uses online fixed-lag techniques.
Only recommended for simulation purposes.
:param graph: encodes road network, simplified and projected to UTM
:param polyline: series of cartesian coordinates in UTM
:param n_samps: int number of particles
:param timestamps: seconds, either float if all times between observations are the same, or a series of timestamps
in seconds/UNIX timestamp
:param mm_model: MapMatchingModel
:param proposal_func: function to propagate and weight single particle defaults to optimal (discretised) proposal
:param update:
* 'PF' for particle filter fixed-lag update
* 'BSi' for backward simulation fixed-lag update
must be consistent across updates
:param lag: fixed lag for resampling/stitching
:param d_refine: metres, resolution of distance discretisation
:param initial_d_truncate: distance beyond which to assume zero likelihood probability at time zero,
defaults to 5 * mm_model.gps_sd
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching,
0 will do full fixed-lag stitching and track ess_stitch
:param verbose: bool whether to print ESS at each iterate
:param kwargs: optional parameters to pass to proposal
i.e. d_max or var as well as ess_threshold for backward simulation update
:return: MMParticles object
"""
num_obs = len(polyline)
ess_all = max_rejections == 0
# Initiate particles
particles = initiate_particles(graph, polyline[0], n_samps, mm_model=mm_model,
d_refine=d_refine, d_truncate=initial_d_truncate,
ess_all=ess_all,
filter_store=update == 'BSi')
if verbose:
print(str(particles.latest_observation_time) + " PF ESS: " + str(np.mean(particles.ess_pf[-1])))
if 'd_refine' in inspect.getfullargspec(proposal_func)[0]:
kwargs['d_refine'] = d_refine
time_interval_arr = get_time_interval_array(timestamps, num_obs)
if update == 'PF' or lag == 0:
update_func = update_particles_flpf
elif update == 'BSi':
update_func = update_particles_flbs
else:
raise ValueError('Update of ' + str(update) + ' not understood')
# Update particles
for i in range(num_obs - 1):
particles = update_func(graph, particles, polyline[1 + i], time_interval=time_interval_arr[i],
mm_model=mm_model, proposal_func=proposal_func, lag=lag, max_rejections=max_rejections,
**kwargs)
if verbose:
print(str(particles.latest_observation_time) + " PF ESS: " + str(np.mean(particles.ess_pf[-1])))
return particles
def offline_map_match(graph: MultiDiGraph,
polyline: np.ndarray,
n_samps: int,
timestamps: Union[float, np.ndarray],
mm_model: MapMatchingModel = ExponentialMapMatchingModel(),
proposal_func: Callable = optimal_proposal,
d_refine: int = 1,
initial_d_truncate: float = None,
max_rejections: int = 20,
ess_threshold: float = 1,
store_norm_quants: bool = False,
store_filter_particles: bool = False,
verbose: bool = True,
**kwargs) -> MMParticles:
"""
Runs offline map-matching, i.e. receives a full polyline and returns an equal probability collection
of trajectories.
Forward-filtering backward-simulation implementation - no fixed-lag approximation needed for offline inference.
:param graph: encodes road network, simplified and projected to UTM
:param polyline: series of cartesian cooridnates in UTM
:param n_samps: int
number of particles
:param timestamps: seconds
either float if all times between observations are the same, or a series of timestamps in seconds/UNIX timestamp
:param mm_model: MapMatchingModel
:param proposal_func: function to propagate and weight single particle
defaults to optimal (discretised) proposal
:param d_refine: metres, resolution of distance discretisation
:param initial_d_truncate: distance beyond which to assume zero likelihood probability at time zero
defaults to 5 * mm_model.gps_sd
:param max_rejections: number of rejections to attempt before doing full fixed-lag stitching
0 will do full fixed-lag stitching and track ess_stitch
:param ess_threshold: in [0,1], particle filter resamples if ess < ess_threshold * n_samps
:param store_norm_quants: if True normalisation quantities (including gradient evals) returned in out_particles
:param store_filter_particles: if True filter particles returned in out_particles
:param verbose: bool whether to print ESS at each iterate
:param kwargs: optional parameters to pass to proposal
i.e. d_max, d_refine or var
as well as ess_threshold for backward simulation update
:return: MMParticles object
"""
num_obs = len(polyline)
ess_all = max_rejections == 0
start = tm()
filter_particles = [None] * num_obs
filter_weights = np.zeros((num_obs, n_samps))
# Initiate filter_particles
filter_particles[0] = initiate_particles(graph, polyline[0], n_samps, mm_model=mm_model,
d_refine=d_refine, d_truncate=initial_d_truncate,
ess_all=ess_all)
filter_weights[0] = 1 / n_samps
live_weights = filter_weights[0].copy()
ess_pf = np.zeros(num_obs)
ess_pf[0] = n_samps
if verbose:
print("0 PF ESS: " + str(ess_pf[0]))
if 'd_refine' in inspect.getfullargspec(proposal_func)[0]:
kwargs['d_refine'] = d_refine
time_interval_arr = get_time_interval_array(timestamps, num_obs)
# Forward filtering, storing x_t-1, x_t ~ p(x_t-1:t|y_t)
for i in range(num_obs - 1):
resample = ess_pf[i] < ess_threshold * n_samps
filter_particles[i + 1], temp_weights, temp_prior_norm = propose_particles(proposal_func,
live_weights if resample else None,
graph,
filter_particles[i],
polyline[i + 1],
time_interval_arr[i],
mm_model,
full_smoothing=False,
store_norm_quants=store_norm_quants,
**kwargs)
filter_particles[i].prior_norm = temp_prior_norm
if not resample:
temp_weights *= live_weights
if temp_weights.sum() == 0.:
raise ValueError('Map-matching failed: filtering weights all zero,'
'try examining polyline bmm.plot(graph, polyline=polyline')
temp_weights /= np.sum(temp_weights)
filter_weights[i + 1] = temp_weights.copy()
live_weights = temp_weights.copy()
ess_pf[i + 1] = 1 / np.sum(temp_weights ** 2)
if verbose:
print(str(filter_particles[i + 1].latest_observation_time) + " PF ESS: " + str(ess_pf[i + 1]))
# Backward simulation
out_particles = backward_simulate(graph,
filter_particles,
filter_weights,
time_interval_arr,
mm_model,
max_rejections,
verbose=verbose,
store_norm_quants=store_norm_quants)
out_particles.ess_pf = ess_pf
if store_filter_particles:
out_particles.filter_particles = filter_particles.copy()
end = tm()
out_particles.time = end - start
return out_particles
def propose_particles(proposal_func: Callable,
resample_weights: Union[None, np.ndarray],
graph: MultiDiGraph,
particles: MMParticles,
new_observation: np.ndarray,
time_interval: float,
mm_model: MapMatchingModel,
full_smoothing: bool = True,
store_norm_quants: bool = False,
**kwargs) -> Tuple[MMParticles, np.ndarray, np.ndarray]:
"""
Samples a single particle from the (distance discretised) optimal proposal.
:param proposal_func: function to propagate and weight single particle
:param resample_weights: weights for resampling, None for no resampling
:param graph: encodes road network, simplified and projected to UTM
:param particles: all particles at last observation time
:param new_observation: cartesian coordinate in UTM
:param time_interval: time between last observation and newly received observation
:param mm_model: MapMatchingModel
:param full_smoothing: if True returns full trajectory
otherwise returns only x_t-1 to x_t
:param store_norm_quants: whether to additionally return quantities needed for gradient EM step
assuming deviation prior is used
:return: particle, unnormalised weight, prior_norm(_quants)
"""
n_samps = particles.n
out_particles = particles.copy()
if resample_weights is not None:
resample_inds = np.random.choice(n_samps, n_samps, replace=True, p=resample_weights)
not_prop_inds = np.arange(n_samps)[~np.isin(np.arange(n_samps), resample_inds)]
else:
resample_inds = np.arange(n_samps)
not_prop_inds = []
weights = np.zeros(n_samps)
prior_norms = np.zeros((n_samps, len(mm_model.distance_params) + 2)) if store_norm_quants else np.zeros(n_samps)
for j in range(n_samps):
in_particle = particles[resample_inds[j]]
in_particle = in_particle.copy() if in_particle is not None else None
out_particles[j], weights[j], prior_norms[resample_inds[j]] = proposal_func(graph,
in_particle,
new_observation,
time_interval,
mm_model,
full_smoothing=full_smoothing,
store_norm_quants=store_norm_quants,
**kwargs)
for k in not_prop_inds:
if particles[k] is not None:
prior_norms[k] = proposal_func(graph,
particles[k],
None,
time_interval,
mm_model,
full_smoothing=False,
store_norm_quants=store_norm_quants,
only_norm_const=True,
**kwargs)
else:
prior_norms[k] = np.zeros(len(mm_model.distance_params) + 2) if store_norm_quants else 0
return out_particles, weights, prior_norms
| 28,889 | 45.97561 | 120 | py |
bmm | bmm-master/tests/test_smc.py | ########################################################################################################################
# Module: tests/test_smc.py
# Description: Tests for SMC implementation.
#
# Web: https://github.com/SamDuffield/bayesian-traffic
########################################################################################################################
import unittest
import os
import json
import pandas as pd
import osmnx as ox
import numpy as np
from bmm.src.tools.edges import graph_edges_gdf
from bmm.src.inference import smc, proposal
from bmm.src.inference.model import ExponentialMapMatchingModel
def read_data(path, nrows=None):
data_reader = pd.read_csv(path, nrows=10)
data_columns = data_reader.columns
polyline_converters = {col_name: json.loads for col_name in data_columns
if 'POLYLINE' in col_name}
return pd.read_csv(path, converters=polyline_converters, nrows=nrows)
def load_test_data(test_data_path=None, nrows=None):
if test_data_path is None:
test_dir_path = os.path.dirname(os.path.realpath(__file__))
test_files = os.listdir(test_dir_path)
test_data_files = [file_name for file_name in test_files if file_name[:8] == 'testdata']
if len(test_data_files) == 0:
assert ValueError("Test data not found")
test_data_file = test_data_files[0]
test_data_path = test_dir_path + '/' + test_data_file
return read_data(test_data_path, nrows)
def load_graph(test_graph_path=None):
if test_graph_path is None:
test_dir_path = os.path.dirname(os.path.realpath(__file__))
test_files = os.listdir(test_dir_path)
test_graph_files = [file_name for file_name in test_files if file_name[:9] == 'testgraph']
if len(test_graph_files) == 0:
assert ValueError("Test graph not found")
test_graph_file = test_graph_files[0]
test_graph_path = test_dir_path + '/' + test_graph_file
return ox.load_graphml(test_graph_path)
class TestWithGraphAndData(unittest.TestCase):
def setUp(self):
self.graph = load_graph()
self.gdf = graph_edges_gdf(self.graph)
self.test_data = load_test_data(nrows=10)
class TestInitiateParticles(TestWithGraphAndData):
def test_initiate(self):
self.particles = smc.initiate_particles(self.graph, self.test_data['POLYLINE_UTM'][0][0], 10)
self.assertEqual(self.particles.n, 10)
self.assertEqual(self.particles[0].shape, (1, 8))
init_arr = np.array(self.particles.particles)[:, 0, :]
self.assertGreater(np.unique(init_arr[:, 5]).size, 3)
self.assertEqual(init_arr[:, 0].sum(), 0.)
self.assertEqual(init_arr[:, -1].sum(), 0.)
class TestProposeParticles(TestWithGraphAndData):
def test_propose(self):
self.particles = smc.initiate_particles(self.graph, self.test_data['POLYLINE_UTM'][0][0], 10)
proposed_particle, weight, prior_norm = proposal.optimal_proposal(self.graph,
self.particles[0],
self.test_data['POLYLINE_UTM'][0][1],
15,
ExponentialMapMatchingModel())
self.assertEqual(proposed_particle.shape[1], 8)
self.assertIsInstance(weight, float)
self.assertGreater(weight, 0.)
self.assertGreaterEqual(proposed_particle.shape[0], 2)
self.assertEqual(proposed_particle.shape[1], 8)
self.assertEqual(np.isnan(proposed_particle).sum(), 0)
self.assertEqual(proposed_particle[:, 0].sum(), 15.)
self.assertEqual(proposed_particle[-1, 0], 15.)
self.assertGreaterEqual(proposed_particle[-1, -1], 0.)
class TestUpdateParticlesPF(TestWithGraphAndData):
def test_update(self):
self.particles = smc.initiate_particles(self.graph, self.test_data['POLYLINE_UTM'][0][0], 10,
filter_store=False)
updated_particles = smc.update_particles_flpf(self.graph,
self.particles,
self.test_data['POLYLINE_UTM'][0][1],
15,
ExponentialMapMatchingModel(),
proposal.optimal_proposal)
self.assertEqual(updated_particles.n, 10)
self.assertEqual(len(updated_particles.particles), 10)
for proposed_particle in updated_particles:
self.assertEqual(proposed_particle.shape[1], 8)
self.assertGreaterEqual(proposed_particle.shape[0], 2)
self.assertEqual(proposed_particle.shape[1], 8)
self.assertEqual(np.isnan(proposed_particle).sum(), 0)
self.assertEqual(proposed_particle[:, 0].sum(), 15.)
self.assertEqual(proposed_particle[-1, 0], 15.)
self.assertGreaterEqual(proposed_particle[-1, -1], 0.)
self.assertGreater(np.unique([pp[-1, 5] for pp in updated_particles]).size, 3)
class TestUpdateParticlesBSi(TestWithGraphAndData):
def test_update(self):
self.particles = smc.initiate_particles(self.graph, self.test_data['POLYLINE_UTM'][0][0], 10,
filter_store=True)
updated_particles = smc.update_particles_flbs(self.graph,
self.particles,
self.test_data['POLYLINE_UTM'][0][1],
15,
ExponentialMapMatchingModel(),
proposal.optimal_proposal)
self.assertEqual(updated_particles.n, 10)
self.assertEqual(len(updated_particles.particles), 10)
for proposed_particle in updated_particles:
self.assertEqual(proposed_particle.shape[1], 8)
self.assertGreaterEqual(proposed_particle.shape[0], 2)
self.assertEqual(proposed_particle.shape[1], 8)
self.assertEqual(np.isnan(proposed_particle).sum(), 0)
self.assertEqual(proposed_particle[:, 0].sum(), 15.)
self.assertEqual(proposed_particle[-1, 0], 15.)
self.assertGreaterEqual(proposed_particle[-1, -1], 0.)
self.assertGreater(np.unique([pp[-1, 5] for pp in updated_particles]).size, 3)
if __name__ == '__main__':
unittest.main()
| 6,752 | 46.893617 | 120 | py |
bmm | bmm-master/tests/test_resampling.py | ########################################################################################################################
# Module: tests/test_resampling.py
# Description: Tests for resampling schemes.
#
# Web: https://github.com/SamDuffield/bayesian-traffic
########################################################################################################################
import unittest
import numpy as np
import numpy.testing as npt
from bmm.src.inference.particles import MMParticles
from bmm.src.inference import resampling
class TestMultinomial(unittest.TestCase):
def test_array_trivial(self):
array = np.arange(10)
weights = np.zeros(10)
weights[0] = 1
npt.assert_array_equal(resampling.multinomial(array, weights), np.zeros(10))
def test_array_repeated(self):
array = np.arange(10)
weights = np.arange(1, 11)
weights = weights / weights.sum()
repeated_resample = np.array([resampling.multinomial(array, weights) for _ in range(10000)])
empirical_weights = np.array([(repeated_resample == i).mean() for i in array])
npt.assert_array_almost_equal(weights, empirical_weights, decimal=2)
def test_list_trivial(self):
tlist = [a for a in range(10)]
weights = np.zeros(10)
weights[0] = 1
self.assertEqual(resampling.multinomial(tlist, weights), [0 for _ in range(10)])
def test_list_repeated(self):
tlist = [a for a in range(10)]
weights = np.arange(1, 11)
weights = weights / weights.sum()
repeated_resample = np.array([resampling.multinomial(tlist, weights) for _ in range(10000)])
empirical_weights = np.array([(repeated_resample == i).mean() for i in tlist])
npt.assert_array_almost_equal(weights, empirical_weights, decimal=2)
def test_mmparticles_trivial(self):
init_array = np.zeros((3, 6))
init_array += np.arange(3).reshape(3, 1)
mmp = MMParticles(init_array)
weights = np.array([0, 1, 0])
mmp_resampled = resampling.multinomial(mmp, weights)
for i in range(3):
npt.assert_array_equal(mmp_resampled[i], np.array([[0, 1, 1, 1, 1, 1, 1, 0]]))
def test_mmparticles_repeated(self):
init_array = np.zeros((10, 6))
init_array += np.arange(10).reshape(10, 1)
mmp = MMParticles(init_array)
weights = np.arange(1, 11)
weights = weights / weights.sum()
repeated_resample = [resampling.multinomial(mmp, weights) for _ in range(10000)]
repeated_resample_arr = np.array([p.particles for p in repeated_resample])[:, :, 0, 1]
empirical_weights = np.array([(repeated_resample_arr == i).mean() for i in np.arange(10)])
npt.assert_array_almost_equal(weights, empirical_weights, decimal=2)
if __name__ == '__main__':
unittest.main()
| 2,864 | 40.521739 | 120 | py |
bmm | bmm-master/tests/test_MMParticles.py | ########################################################################################################################
# Module: tests/test_MMParticles.py
# Description: Tests for MMParticles class.
#
# Web: https://github.com/SamDuffield/bayesian-traffic
########################################################################################################################
import unittest
import numpy as np
import numpy.testing as npt
from bmm.src.inference import particles
class TestMMParticles(unittest.TestCase):
def setUp(self):
self.mmp = particles.MMParticles(np.zeros((3, 6)))
class TestInit(TestMMParticles):
def test_initial_n(self):
self.assertEqual(self.mmp.n, 3)
def test_initial_latest_observation_time(self):
self.assertEqual(self.mmp.latest_observation_time, 0)
def test_initial_observation_times(self):
npt.assert_array_equal(self.mmp.observation_times, np.array([0]))
def test_initial_m(self):
self.assertEqual(self.mmp.m, 1)
def test_initial_index(self):
npt.assert_array_equal(self.mmp[0], np.zeros((1, 8)))
def test_initial_replace(self):
self.mmp[1] = np.array(np.ones((1, 8)))
npt.assert_array_equal(self.mmp[1], np.ones((1, 8)))
class TestUpdate(TestMMParticles):
def setUp(self):
super().setUp()
for i in range(self.mmp.n):
self.mmp.particles[i] = np.append(self.mmp.particles[i], [[4, 0, 0, 0, 0, 0, 0, 0]], axis=0)
def test_update_particle_shape(self):
self.assertEqual(self.mmp[0].shape, (2, 8))
def test_update_n(self):
self.assertEqual(self.mmp.n, 3)
def test_update_latest_observation_time(self):
self.assertEqual(self.mmp.latest_observation_time, 4)
def test_update_observation_times(self):
npt.assert_array_equal(self.mmp.observation_times, np.array([0, 4]))
def test_update_m(self):
self.assertEqual(self.mmp.m, 2)
def test_update_index(self):
npt.assert_array_equal(self.mmp[0], np.array([[0, 0, 0, 0, 0, 0, 0, 0],
[4, 0, 0, 0, 0, 0, 0, 0]]))
if __name__ == '__main__':
unittest.main()
| 2,205 | 30.070423 | 120 | py |
bmm | bmm-master/simulations/sanity_check.py |
import numpy as np
import pandas as pd
import osmnx as ox
import json
import bmm
# Download and project graph
graph = ox.graph_from_place('London, UK')
graph = ox.project_graph(graph)
# Generate synthetic route and polyline
generated_route, generated_polyline = bmm.sample_route(graph, timestamps=15, num_obs=20)
# Map-match
matched_particles = bmm.offline_map_match(graph, generated_polyline, n_samps=100, timestamps=15)
# Plot true route
bmm.plot(graph, generated_route, generated_polyline, particles_color='green')
# Plot map-matched particles
bmm.plot(graph, matched_particles, generated_polyline)
| 609 | 24.416667 | 96 | py |
bmm | bmm-master/simulations/porto/max_rejections_compare.py | import os
import json
import numpy as np
import osmnx as ox
import pandas as pd
import matplotlib.pyplot as plt
import bmm
from . import utils
seed = 0
np.random.seed(seed)
timestamps = 15
n_samps = np.array([50, 100, 150, 200])
lag = 3
mr_max = 20
# max_rejections = np.arange(0, mr_max + 1, step=int(mr_max/5))
max_rejections = np.array([0, 1, 2, 4, 8, 16, 32])
initial_truncation = None
num_repeats = 1
max_speed = 35
proposal_dict = {'proposal': 'optimal',
'num_inter_cut_off': 10,
'resample_fails': False,
'd_max_fail_multiplier': 2.}
setup_dict = {'seed': seed,
'n_samps': n_samps.tolist(),
'lag': lag,
'max_rejections': max_rejections.tolist(),
'initial_truncation': initial_truncation,
'num_repeats': num_repeats,
'num_inter_cut_off': proposal_dict['num_inter_cut_off'],
'max_speed': max_speed,
'resample_fails': proposal_dict['resample_fails'],
'd_max_fail_multiplier': proposal_dict['d_max_fail_multiplier']}
print(setup_dict)
porto_sim_dir = os.getcwd()
graph_path = porto_sim_dir + '/portotaxi_graph_portugal-140101.osm._simple.graphml'
graph = ox.load_graphml(graph_path)
test_route_data_path = porto_sim_dir + '/test_route.csv'
# Load long-lat polylines
polyline_ll = np.array(json.loads(pd.read_csv(test_route_data_path)['POLYLINE'][0]))
# Convert to utm
polyline = bmm.long_lat_to_utm(polyline_ll, graph)
save_dir = porto_sim_dir + '/mr_output/'
# Create save_dir if not found
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Save simulation parameters
with open(save_dir + 'setup_dict', 'w+') as f:
json.dump(setup_dict, f)
# Setup map-matching model
mm_model = bmm.ExponentialMapMatchingModel()
mm_model.max_speed = max_speed
fl_pf_routes = np.empty((num_repeats, len(n_samps), len(max_rejections)), dtype=object)
fl_bsi_routes = np.empty((num_repeats, len(n_samps), len(max_rejections)), dtype=object)
fl_pf_times = np.zeros((num_repeats, len(n_samps), len(max_rejections)))
fl_bsi_times = np.zeros((num_repeats, len(n_samps), len(max_rejections)))
n_pf_failures = 0
n_bsi_failures = 0
for i in range(num_repeats):
for j, n in enumerate(n_samps):
for k, max_reject_int in enumerate(max_rejections):
print(i, j, k)
try:
fl_pf_routes[i, j, k] = bmm._offline_map_match_fl(graph,
polyline,
n,
timestamps=timestamps,
mm_model=mm_model,
lag=lag,
update='PF',
max_rejections=max_reject_int,
initial_d_truncate=initial_truncation,
**proposal_dict)
fl_pf_times[i, j, k] = fl_pf_routes[i, j, k].time
print(f'FL PF {i} {j} {k}: {fl_pf_routes[i, j, k].time}')
except:
n_pf_failures += 1
print(f'FL PF failures: {n_pf_failures}')
utils.clear_cache()
try:
fl_bsi_routes[i, j, k] = bmm._offline_map_match_fl(graph,
polyline,
n,
timestamps=timestamps,
mm_model=mm_model,
lag=lag,
update='BSi',
max_rejections=max_reject_int,
initial_d_truncate=initial_truncation,
**proposal_dict)
fl_bsi_times[i, j, k] = fl_bsi_routes[i, j, k].time
print(f'FL BSi {i} {j} {k}: {fl_bsi_routes[i, j, k].time}')
except:
n_bsi_failures += 1
print(f'FL BSi failures: {n_bsi_failures}')
utils.clear_cache()
print(f'FL PF failures: {n_pf_failures}')
print(f'FL BSi failures: {n_bsi_failures}')
np.save(save_dir + 'fl_pf_routes', fl_pf_routes)
np.save(save_dir + 'fl_pf_times', fl_pf_times)
np.save(save_dir + 'fl_bsi_routes', fl_bsi_routes)
np.save(save_dir + 'fl_bsi_times', fl_bsi_times)
#
# fl_pf_routes = np.load(save_dir + 'fl_pf_routes.npy', allow_pickle=True)
# fl_pf_times = np.load(save_dir + 'fl_pf_times.npy')
# fl_bsi_routes = np.load(save_dir + 'fl_bsi_routes.npy', allow_pickle=True)
# fl_bsi_times = np.load(save_dir + 'fl_bsi_times.npy')
# with open(save_dir + 'setup_dict') as f:
# setup_dict = json.load(f)
n_obs = len(polyline)
fl_pf_times_per_obs = fl_pf_times / n_obs
fl_bsi_times_per_obs = fl_bsi_times / n_obs
line_styles = ['-', '--', ':', '-.']
def comp_plot(n_samps,
max_rejects,
times,
leg=False,
**kwargs):
fig, ax = plt.subplots()
fig.set_figwidth(5)
fig.set_figwidth(5)
fig.set_figheight(7)
fig.set_figheight(7)
for i, n in reversed(list(enumerate(n_samps))):
ax.plot(max_rejects, times[i], label=str(n), linestyle=line_styles[i], **kwargs)
# ax.plot(max_rejects, times[i], label=str(n))
ax.set_xlabel(r'$R$', fontsize=16)
ax.set_ylabel('Runtime per observation, s', fontsize=16)
if leg:
l = ax.legend(loc='upper right', title=r'$N$', fontsize=16)
plt.setp(l.get_title(), fontsize=16)
fig.tight_layout()
return fig, ax
pf_fig, pf_ax = comp_plot(n_samps, max_rejections, np.mean(fl_pf_times_per_obs, axis=0), color='red', leg=True)
bsi_fig, bsi_ax = comp_plot(n_samps, max_rejections, np.mean(fl_bsi_times_per_obs, axis=0), color='blue', leg=True)
pf_ax.set_ylim(bsi_ax.get_ylim())
pf_fig.savefig(save_dir + 'pf_mr_compare2', dpi=400)
bsi_fig.savefig(save_dir + 'bsi_mr_compare2', dpi=400)
# pf_ax.set_xticks(xt)
# bsi_ax.set_xticks(xt) | 6,552 | 36.878613 | 115 | py |
bmm | bmm-master/simulations/porto/bulk_map_match.py | import os
import json
import numpy as np
import osmnx as ox
import pandas as pd
import bmm
porto_sim_dir = os.getcwd()
graph_path = porto_sim_dir + '/portotaxi_graph_portugal-140101.osm._simple.graphml'
graph = ox.load_graphml(graph_path)
test_route_data_path = '' # Download from https://archive.ics.uci.edu/ml/datasets/Taxi+Service+Trajectory+-+Prediction+Challenge,+ECML+PKDD+2015
save_dir = porto_sim_dir + '/bulk_output/'
# Load long-lat polylines
polylines_ll = pd.read_csv(test_route_data_path, chunksize=5000).get_chunk()['POLYLINE'].apply(json.loads)
polylines_ll = [np.array(a) for a in polylines_ll]
num_routes = 500
min_length = 20
max_length = 60
polylines_ll = [c for c in polylines_ll if min_length <= len(c) <= max_length]
mm_routes = np.empty(num_routes, dtype=object)
failed_routes = []
i = 0
j = 0
while j < num_routes:
poly = bmm.long_lat_to_utm(polylines_ll[i], graph)
print('Route attempt:', i, len(poly))
print('Successful routes:', j)
try:
mm_route = bmm.offline_map_match(graph, poly, 100, 15.)
mm_routes[j] = mm_route
j += 1
except:
# Typically missing data in the polyline or the polyline leaves the graph
print(i, 'mm failed')
failed_routes.append(i)
i += 1
failed_routes = np.array(failed_routes)
np.save(save_dir + 'mm_routes', mm_routes)
np.save(save_dir + 'failed_routes', failed_routes)
mm_routes = np.load(save_dir + 'mm_routes.npy', allow_pickle=True)
failed_routes = np.load(save_dir + 'failed_routes.npy')
def is_multi_modal(particles):
route_nodes = particles.route_nodes()
return any([not np.array_equal(route_nodes[0], a) for a in route_nodes[1:]])
mm_routes_multi_modal = np.array([is_multi_modal(a) for a in mm_routes])
print(mm_routes_multi_modal.sum(), '/', mm_routes_multi_modal.size, ' routes multi-modal')
| 1,851 | 28.870968 | 146 | py |
bmm | bmm-master/simulations/porto/utils.py | import functools
import gc
import json
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import bmm
def read_data(path, chunksize=None):
data_reader = pd.read_csv(path, chunksize=10)
data_columns = data_reader.get_chunk().columns
polyline_converters = {col_name: json.loads for col_name in data_columns
if 'POLYLINE' in col_name}
return pd.read_csv(path, converters=polyline_converters, chunksize=chunksize)
def clear_cache():
gc.collect()
for a in gc.get_objects():
if isinstance(a, functools._lru_cache_wrapper):
a.cache_clear()
def total_variation_dists(dists_one,
dists_two,
bin_width=3):
n1 = len(dists_one)
n2 = len(dists_two)
all_dists = np.concatenate([dists_one, dists_two])
all_dists = np.unique(all_dists)
if bin_width is None:
tv = 0.
for dist in all_dists:
p_1 = np.sum(dists_one == dist) / n1
p_2 = np.sum(dists_two == dist) / n2
tv = tv + np.abs(p_1 - p_2)
else:
min_dist = np.min(dists_one)
max_dist = np.max(dists_one)
tv = 0
bin_linsp = np.arange(min_dist, max_dist, bin_width)
# Below min
tv += np.sum(dists_two < min_dist) / n2
# Above max
tv += np.sum(dists_two >= max_dist) / n2
for i in range(len(bin_linsp)):
int_min = bin_linsp[i]
int_max = int_min + bin_width
p_1 = np.sum((dists_one >= int_min) * (dists_one < int_max)) / n1
p_2 = np.sum((dists_two >= int_min) * (dists_two < int_max)) / n2
tv += np.abs(p_1 - p_2)
return tv / 2
def obs_rows_trim(particles, trail_zero_lim=3):
particles_obs_rows = []
for p in particles:
obs_rows = bmm.observation_time_rows(p)
zero_dist_bools = obs_rows[:, -1] == 0
if np.all(zero_dist_bools[-trail_zero_lim:]):
count = 3
is_zero = True
while is_zero and count < len(obs_rows):
count += 1
is_zero = zero_dist_bools[-count]
particles_obs_rows.append(obs_rows[:-(count - 1)])
else:
particles_obs_rows.append(obs_rows)
particles_obs_rows.append(bmm.observation_time_rows(p))
return particles_obs_rows
def interval_tv_dists(particles_one,
particles_two,
interval=60,
speeds=False,
bins=None,
trim_zeros=3):
observation_times = particles_one.observation_times
obs_int = observation_times[1]
if interval % obs_int != 0:
raise ValueError('interval must be a multiple of inter-observation times')
obs_per_int = int(interval / obs_int)
num_ints = int(observation_times[-1] / interval)
tv_each_time = np.zeros(num_ints)
particles_one_obs_rows = obs_rows_trim(particles_one, trim_zeros)
particles_two_obs_rows = obs_rows_trim(particles_two, trim_zeros)
for i in range(1, num_ints + 1):
start_time = observation_times[(i - 1) * obs_per_int]
end_time = observation_times[i * obs_per_int]
p1_dists = -np.ones(particles_one.n) * 2
for j in range(particles_one.n):
obs_rows = particles_one_obs_rows[j]
if end_time in obs_rows[:, 0]:
p1_dists[j] = np.sum(
obs_rows[np.logical_and(obs_rows[:, 0] >= start_time, obs_rows[:, 0] <= end_time), -1])
p2_dists = -np.ones(particles_two.n) * 3
for k in range(particles_two.n):
obs_rows = particles_two_obs_rows[k]
if end_time in obs_rows:
p2_dists[k] = np.sum(
obs_rows[np.logical_and(obs_rows[:, 0] >= start_time, obs_rows[:, 0] <= end_time), -1])
if speeds:
p1_dists /= interval
p2_dists /= interval
tv_each_time[i - 1] = total_variation_dists(p1_dists, p2_dists, bins)
return tv_each_time
def plot_metric_over_time(setup_dict, fl_pf_metric, fl_bsi_metric, save_dir=None,
t_linspace=None, x_lab='t', x_ticks=None):
lags = setup_dict['lags']
m = fl_pf_metric.shape[-1]
if t_linspace is None:
t_linspace = np.arange(m)
lines = [None] * (len(lags) + 1)
fig_pf, axes_pf = plt.subplots(len(setup_dict['fl_n_samps']), sharex='all', sharey='all', figsize=(8, 6))
fig_bs, axes_bs = plt.subplots(len(setup_dict['fl_n_samps']), sharex='all', sharey='all', figsize=(8, 6))
for j, n in enumerate(setup_dict['fl_n_samps']):
for k, lag in enumerate(lags):
axes_pf[j].plot(t_linspace, fl_pf_metric[j, k], label=f'Lag: {lag}')
lines[k], = axes_bs[j].plot(t_linspace, fl_bsi_metric[j, k], label=f'Lag: {lag}')
axes_pf[j].set_ylabel(f'N={n}', fontsize=18)
axes_bs[j].set_ylabel(f'N={n}', fontsize=18)
# axes_pf[j].set_ylim(0, 0.7)
# axes_bs[j].set_ylim(0, 0.7)
# axes[j, 0].set_yticks([0, 0.5, 1])
# axes[j, 1].set_yticks([0, 0.5, 1])
axes_pf[-1].set_xlabel(x_lab, fontsize=16)
axes_bs[-1].set_xlabel(x_lab, fontsize=16)
if x_ticks is not None:
axes_pf[-1].set_xticks(x_ticks)
axes_bs[-1].set_xticks(x_ticks)
plt.legend(loc='upper right', bbox_to_anchor=(0.8, 0.99))
fig_pf.set_figwidth(5)
fig_bs.set_figwidth(5)
# fig_pf.set_figheight(7)
# fig_bs.set_figheight(7)
fig_pf.set_figheight(11)
fig_bs.set_figheight(11)
fig_pf.tight_layout()
fig_bs.tight_layout()
if save_dir is not None:
fig_pf.savefig(save_dir + '_pf', dpi=400)
fig_bs.savefig(save_dir + '_bs', dpi=400)
return (fig_pf, axes_pf), (fig_bs, axes_bs)
| 5,822 | 31.171271 | 109 | py |
bmm | bmm-master/simulations/porto/parameter_training.py | ########################################################################################################################
# Module: parameter_inference.py
# Description: Tune hyperparameters using some Porto taxi data.
#
# Web: https://github.com/SamDuffield/bmm
########################################################################################################################
import os
import json
import numpy as np
import osmnx as ox
import pandas as pd
import bmm
np.random.seed(0)
timestamps = 15
n_iter = 200
n_particles = 100
sim_dir = os.getcwd()
graph_path = sim_dir + '/testgraph_portotaxi_graph_portugal-140101.osm._simple.graphml'
graph = ox.load_graphml(graph_path)
train_data_path = sim_dir + '/training_data.csv'
# Load long-lat polylines
polylines_ll = [np.array(json.loads(poly)) for poly in pd.read_csv(train_data_path)['POLYLINE']]
# Convert to utm
polylines = [bmm.long_lat_to_utm(poly, graph) for poly in polylines_ll]
# Initiate model
mm_model = bmm.ExponentialMapMatchingModel()
mm_model.distance_params['zero_dist_prob_neg_exponent'] = -np.log(0.15) / timestamps
mm_model.distance_params['lambda_speed'] = 1 / 10
mm_model.deviation_beta = 0.1
mm_model.gps_sd = 7.
params_track = bmm.offline_em(graph, mm_model, timestamps, polylines,
save_path=os.getcwd() + '/tuned_params.pickle',
n_iter=n_iter, max_rejections=30,
n_ffbsi=n_particles, initial_d_truncate=50,
gradient_stepsize_scale=1e-5, gradient_stepsize_neg_exp=0.5,
num_inter_cut_off=10, ess_threshold=1.)
| 1,652 | 33.4375 | 120 | py |
bmm | bmm-master/simulations/porto/total_variation_compare.py | import os
import json
import numpy as np
import osmnx as ox
import pandas as pd
import bmm
from . import utils
seed = 0
np.random.seed(seed)
timestamps = 15
ffbsi_n_samps = int(1e3)
fl_n_samps = np.array([50, 100, 150, 200])
lags = np.array([0, 3, 10])
max_rejections = 30
initial_truncation = None
num_repeats = 20
max_speed = 35
proposal_dict = {'proposal': 'optimal',
'num_inter_cut_off': 10,
'resample_fails': False,
'd_max_fail_multiplier': 2.}
setup_dict = {'seed': seed,
'ffbsi_n_samps': ffbsi_n_samps,
'fl_n_samps': fl_n_samps.tolist(),
'lags': lags.tolist(),
'max_rejections': max_rejections,
'initial_truncation': initial_truncation,
'num_repeats': num_repeats,
'num_inter_cut_off': proposal_dict['num_inter_cut_off'],
'max_speed': max_speed,
'resample_fails': proposal_dict['resample_fails'],
'd_max_fail_multiplier': proposal_dict['d_max_fail_multiplier']}
print(setup_dict)
porto_sim_dir = os.getcwd()
graph_path = porto_sim_dir + '/portotaxi_graph_portugal-140101.osm._simple.graphml'
graph = ox.load_graphml(graph_path)
test_route_data_path = porto_sim_dir + '/test_route.csv'
# Load long-lat polylines
polyline_ll = np.array(json.loads(pd.read_csv(test_route_data_path)['POLYLINE'][0]))
# Convert to utm
polyline = bmm.long_lat_to_utm(polyline_ll, graph)
save_dir = porto_sim_dir + '/tv_output/'
# Create save_dir if not found
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# Save simulation parameters
with open(save_dir + 'setup_dict', 'w+') as f:
json.dump(setup_dict, f)
# Setup map-matching model
mm_model = bmm.ExponentialMapMatchingModel()
mm_model.max_speed = max_speed
# Run FFBSi
ffbsi_route = bmm.offline_map_match(graph,
polyline,
ffbsi_n_samps,
timestamps=timestamps,
mm_model=mm_model,
max_rejections=max_rejections,
initial_d_truncate=initial_truncation,
**proposal_dict)
utils.clear_cache()
fl_pf_routes = np.empty((num_repeats, len(fl_n_samps), len(lags)), dtype=object)
fl_bsi_routes = np.empty((num_repeats, len(fl_n_samps), len(lags)), dtype=object)
n_pf_failures = 0
n_bsi_failures = 0
for i in range(num_repeats):
for j, n in enumerate(fl_n_samps):
for k, lag in enumerate(lags):
print(i, j, k)
# try:
fl_pf_routes[i, j, k] = bmm._offline_map_match_fl(graph,
polyline,
n,
timestamps=timestamps,
mm_model=mm_model,
lag=lag,
update='PF',
max_rejections=max_rejections,
initial_d_truncate=initial_truncation,
**proposal_dict)
print(f'FL PF {i} {j} {k}: {fl_pf_routes[i, j, k].time}')
# except:
# n_pf_failures += 1
print(f'FL PF failures: {n_pf_failures}')
utils.clear_cache()
if lag == 0 and fl_pf_routes[i, j, k] is not None:
fl_bsi_routes[i, j, k] = fl_pf_routes[i, j, k].copy()
print(f'FL BSi {i} {j} {k}:', fl_bsi_routes[i, j, k].time)
else:
# try:
fl_bsi_routes[i, j, k] = bmm._offline_map_match_fl(graph,
polyline,
n,
timestamps=timestamps,
mm_model=mm_model,
lag=lag,
update='BSi',
max_rejections=max_rejections,
initial_d_truncate=initial_truncation,
**proposal_dict)
print(f'FL BSi {i} {j} {k}:', fl_bsi_routes[i, j, k].time)
# except:
# n_bsi_failures += 1
print(f'FL BSi failures: {n_bsi_failures}')
utils.clear_cache()
print(f'FL PF failures: {n_pf_failures}')
print(f'FL BSi failures: {n_bsi_failures}')
np.save(save_dir + 'fl_pf', fl_pf_routes)
np.save(save_dir + 'fl_bsi', fl_bsi_routes)
ffbsi_route_arr = np.empty(1, dtype=object)
ffbsi_route_arr[0] = ffbsi_route
np.save(save_dir + 'ffbsi', ffbsi_route_arr)
#
# fl_pf_routes = np.load(save_dir + 'fl_pf.npy', allow_pickle=True)
# fl_bsi_routes = np.load(save_dir + 'fl_bsi.npy', allow_pickle=True)
# ffbsi_route = np.load(save_dir + 'ffbsi.npy', allow_pickle=True)[0]
# with open(save_dir + 'setup_dict') as f:
# setup_dict = json.load(f)
observation_times = ffbsi_route.observation_times
speeds = False
bins = 5
interval = 60
num_ints = int(observation_times[-1] / interval)
fl_pf_dist_tvs = np.empty(
(setup_dict['num_repeats'], len(setup_dict['fl_n_samps']), len(setup_dict['lags']), num_ints))
fl_bsi_dist_tvs = np.empty_like(fl_pf_dist_tvs)
# Calculate TV distance distances from FFBSi for each observations time
for i in range(setup_dict['num_repeats']):
for j, n in enumerate(setup_dict['fl_n_samps']):
for k, lag in enumerate(setup_dict['lags']):
print(i, j, k)
if fl_pf_routes[i, j, k] is not None:
fl_pf_dist_tvs[i, j, k] = utils.interval_tv_dists(ffbsi_route,
fl_pf_routes[i, j, k],
interval=interval,
speeds=speeds,
bins=bins)
else:
fl_pf_dist_tvs[i, j, k] = 1.
if fl_bsi_routes[i, j, k] is not None:
fl_bsi_dist_tvs[i, j, k] = utils.interval_tv_dists(ffbsi_route,
fl_bsi_routes[i, j, k],
interval=interval,
speeds=speeds,
bins=bins)
else:
fl_bsi_dist_tvs[i, j, k] = 1.
np.save(save_dir + f'fl_pf_tv_dist_speeds{speeds}_bins{bins}_interval{interval}', fl_pf_dist_tvs)
np.save(save_dir + f'fl_bsi_tv_dist_speeds{speeds}_bins{bins}_interval{interval}', fl_bsi_dist_tvs)
# fl_pf_dist_tvs = np.load(save_dir + f'fl_pf_tv_dist_speeds{speeds}_bins{bins}_interval{interval}.npy')
# fl_bsi_dist_tvs = np.load(save_dir + f'fl_bsi_tv_dist_speeds{speeds}_bins{bins}_interval{interval}.npy')
utils.plot_metric_over_time(setup_dict,
np.mean(fl_pf_dist_tvs, axis=0),
np.mean(fl_bsi_dist_tvs, axis=0),
save_dir=save_dir + f'each_tv_compare_dist_speeds{speeds}_bins{bins}_interval{interval}',
t_linspace=np.arange(1, num_ints + 1),
x_lab='Minute',
x_ticks=np.arange(num_ints + 1, step=int(num_ints/8)))
| 8,082 | 41.319372 | 117 | py |
bmm | bmm-master/simulations/cambridge/utils.py | import functools
import gc
import numpy as np
import osmnx as ox
from networkx import write_gpickle, read_gpickle
import bmm
def download_cambridge_graph(save_path):
cambridge_ll_bbox = [52.245, 52.150, 0.220, 0.025]
raw_graph = ox.graph_from_bbox(*cambridge_ll_bbox,
truncate_by_edge=True,
simplify=False,
network_type='drive')
projected_graph = ox.project_graph(raw_graph)
simplified_graph = ox.simplify_graph(projected_graph)
write_gpickle(simplified_graph, save_path)
# Load cam_graph of Cambridge
def load_graph(path):
graph = read_gpickle(path)
return graph
# Clear lru_cache
def clear_cache():
gc.collect()
wrappers = [
a for a in gc.get_objects()
if isinstance(a, functools._lru_cache_wrapper)]
for wrapper in wrappers:
wrapper.cache_clear()
| 933 | 21.238095 | 57 | py |
bmm | bmm-master/simulations/cambridge/simulated_parameter_training.py | import numpy as np
import os
from .utils import sample_route, download_cambridge_graph, load_graph
import bmm
np.random.seed(0)
# Load cam_graph
graph_path = os.getcwd() + '/cambridge_projected_simple.graphml'
if not os.path.exists(graph_path):
download_cambridge_graph(graph_path)
# Load networkx cam_graph
cam_graph = load_graph(graph_path)
timestamps = 15
gen_model = bmm.ExponentialMapMatchingModel()
gen_model.distance_params['zero_dist_prob_neg_exponent'] = -np.log(0.10) / timestamps
gen_model.distance_params['lambda_speed'] = 0.05
gen_model.deviation_beta = 0.05
gen_model.gps_sd = 3.0
gen_model.max_speed = 50
num_inter_cut_off = None
num_pos_routes_cap = 500
# Generate simulated routes
num_routes = 20
min_route_length = 40
max_route_length = 50
sample_d_refine = 1
n_iter = 200
params_track = []
routes = [sample_route(cam_graph, gen_model, timestamps, max_route_length, d_refine=sample_d_refine,
num_inter_cut_off=num_inter_cut_off, num_pos_route_cap=num_pos_routes_cap) for _ in range(num_routes)]
true_polylines = [bmm.observation_time_rows(rou)[:, 5:7] for rou in routes]
routes_obs_rows = [bmm.observation_time_rows(rou) for rou in routes]
len_routes = [len(rou) for rou in routes]
len_obs = np.array([len(po) for po in true_polylines])
while np.any(len_obs < min_route_length):
for i in range(num_routes):
if len_obs[i] < min_route_length:
routes[i] = sample_route(cam_graph, gen_model, timestamps, max_route_length, d_refine=sample_d_refine,
num_inter_cut_off=num_inter_cut_off, num_pos_route_cap=num_pos_routes_cap)
true_polylines = [bmm.observation_time_rows(rou)[:, 5:7] for rou in routes]
routes_obs_rows = [bmm.observation_time_rows(rou) for rou in routes]
len_routes = [len(rou) for rou in routes]
len_obs = np.array([len(po) for po in true_polylines])
print(np.sum(len_obs < min_route_length))
observations = [po + gen_model.gps_sd * np.random.normal(size=po.shape) for po in true_polylines]
###
distances = np.concatenate([a[1:, -1] for a in routes_obs_rows])
print(np.mean(distances < 1e-5))
# print(-np.log(np.mean(distances < 1e-5)) / 15)
print(np.sum(distances < 1e-5))
# Run EM
tune_model = bmm.ExponentialMapMatchingModel()
tune_model.distance_params['zero_dist_prob_neg_exponent'] = -np.log(0.15) / timestamps
tune_model.distance_params['lambda_speed'] = 1/10
tune_model.deviation_beta = 0.1
tune_model.gps_sd = 7.
tune_model.max_speed = 50
params_track_single = bmm.offline_em(cam_graph, tune_model, timestamps, observations,
save_path=os.getcwd() + '/tuned_sim_params.pickle',
n_iter=n_iter,
max_rejections=30, d_max_fail_multiplier=1.5,
initial_d_truncate=50, num_inter_cut_off=num_inter_cut_off,
ess_threshold=1.,
gradient_stepsize_scale=1e-5, gradient_stepsize_neg_exp=0.5)
| 3,048 | 36.641975 | 114 | py |
bmm | bmm-master/simulations/cambridge/single_route_ffbsi.py | import json
import numpy as np
import matplotlib.pyplot as plt
import os
from utils import download_cambridge_graph, load_graph
import bmm
# Setup
seed = 0
np.random.seed(seed)
# Model parameters
time_interval = 100
route_length = 4
gps_sd = 10
num_inter_cut_off = 10
# Inference parameters
n_samps = 1000
max_rejections = 30
proposal_dict = {'proposal': 'optimal'}
save_dir = os.getcwd() + '/single_ffbsi/'
# Initiate map-matching probabilistic model
mm_model = bmm.ExponentialMapMatchingModel()
mm_model.gps_sd = gps_sd
if not os.path.exists(save_dir):
os.makedirs(save_dir)
setup_dict = {'seed': seed,
'time_interval': time_interval,
'max_route_length': route_length,
'zero_dist_prob_neg_exponent': mm_model.distance_params['zero_dist_prob_neg_exponent'],
'lambda_speed': mm_model.distance_params['lambda_speed'],
'deviation_beta': mm_model.deviation_beta,
'gps_sd': mm_model.gps_sd,
'num_inter_cut_off': num_inter_cut_off,
'n_samps': n_samps,
'max_rejections': max_rejections}
with open(save_dir + 'setup_dict', 'w+') as f:
json.dump(setup_dict, f)
# Load cam_graph
graph_path = os.getcwd() + '/cambridge_projected_simple.graphml'
if not os.path.exists(graph_path):
download_cambridge_graph(graph_path)
cam_graph = load_graph(graph_path)
observations_ll = [[0.12188, 52.198387],
[0.125389, 52.197771],
[0.128354, 52.199379],
[0.130296, 52.201701],
[0.127742, 52.20407],
[0.126433, 52.205753],
[0.127836, 52.207831],
[0.126082, 52.212281]]
observations = bmm.long_lat_to_utm(observations_ll, cam_graph)
fig, ax = bmm.plot(cam_graph, polyline=observations)
fig.savefig(save_dir + 'observations', dpi=400, bbox_inches='tight')
ffbsi_route = bmm.offline_map_match(cam_graph, observations, n_samps, time_interval, mm_model,
max_rejections=max_rejections, num_inter_cut_off=num_inter_cut_off, d_max=700)
ffbsi_route_arr = np.empty(1, dtype='object')
ffbsi_route_arr[0] = ffbsi_route
np.save(save_dir + 'ffbsi_route', ffbsi_route_arr)
# ffbsi_route = np.load(save_dir + 'ffbsi_route.npy', allow_pickle=True)[0]
fig2, ax2 = bmm.plot(cam_graph, ffbsi_route, observations)
fig2.savefig(save_dir + 'ffbsi', dpi=400, bbox_inches='tight')
def dist_hist(particle_distances, viterbi_distances=None):
fig, axes = plt.subplots(len(particle_distances), sharex=True)
axes[-1].set_xlabel('Metres')
# axes[0].xlim = (0, 165)
for i, d in enumerate(particle_distances):
axes[i].hist(d, bins=40, color='purple', alpha=0.5, zorder=0, density=True)
axes[i].set_yticklabels([])
if viterbi_distances is not None:
axes[i].scatter(viterbi_distances[i], 0, s=100, zorder=1, color='blue')
axes[i].set_ylabel(f'$d_{i + 1}$')
plt.tight_layout()
return fig, axes
ffbsi_dists = np.array([bmm.observation_time_rows(p)[1:, -1] for p in ffbsi_route])
optim_route = np.load(save_dir + 'optim_route.npy', allow_pickle=True)
optim_dists = bmm.observation_time_rows(optim_route)[1:, -1]
fig_optim, ax_optim = bmm.plot(cam_graph, optim_route, observations)
fig_optim.savefig(save_dir + 'optim', dpi=400, bbox_inches='tight')
fig_hists, axes_hists = dist_hist(ffbsi_dists.T, optim_dists)
fig_hists.savefig(save_dir + 'ffbsi_hists', dpi=400)
| 3,504 | 31.155963 | 114 | py |
bmm | bmm-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'bmm'
copyright = '2021, Sam Duffield'
author = 'Sam Duffield'
# The full version, including alpha/beta/rc tags
release = '1.1'
version = '1.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'sphinx_rtd_theme']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
autodoc_typehints = "description"
html4_writer = True
| 2,323 | 34.753846 | 79 | py |
STEP | STEP-master/src/utils.py | import numpy as np
def get_neighbor_finder(data, uniform, max_node_idx=None):
max_node_idx = max(data.sources.max(), data.destinations.max()) if max_node_idx is None else max_node_idx
adj_list = [[] for _ in range(max_node_idx + 1)]
for source, destination, edge_idx, timestamp in zip(data.sources, data.destinations,
data.edge_idxs,
data.timestamps):
adj_list[source].append((destination, edge_idx, timestamp))
adj_list[destination].append((source, edge_idx, timestamp))
return NeighborFinder(adj_list, uniform=uniform)
def masked_get_neighbor_finder(data, edge_score_dict, pruning_ratio,uniform, max_node_idx=None):
max_node_idx = max(data.sources.max(), data.destinations.max()) if max_node_idx is None else max_node_idx
adj_list = [[] for _ in range(max_node_idx + 1)]
count = 0
score_list = list(edge_score_dict.values())
c = int(len(score_list) * pruning_ratio)
score_list.sort()
threshold = score_list[c]
for source, destination, edge_idx, timestamp in zip(data.sources, data.destinations,
data.edge_idxs,
data.timestamps):
temp_score = edge_score_dict[edge_idx]
if temp_score > threshold:
count += 1
adj_list[source].append((destination, edge_idx, timestamp))
adj_list[destination].append((source, edge_idx, timestamp))
print('Retain %f of edge, total %d edge' %(count/len(edge_score_dict), len(edge_score_dict)) )
return NeighborFinder(adj_list, uniform=uniform)
class NeighborFinder:
def __init__(self, adj_list, uniform=False, seed=None):
self.node_to_neighbors = []
self.node_to_edge_idxs = []
self.node_to_edge_timestamps = []
self.node_to_edge_type = []
for neighbors in adj_list:
# Neighbors is a list of tuples (neighbor, edge_idx, timestamp)
# We sort the list based on timestamp
sorted_neighhbors = sorted(neighbors, key=lambda x: x[2])
self.node_to_neighbors.append(np.array([x[0] for x in sorted_neighhbors]))
self.node_to_edge_idxs.append(np.array([x[1] for x in sorted_neighhbors]))
self.node_to_edge_timestamps.append(np.array([x[2] for x in sorted_neighhbors]))
self.uniform = uniform
if seed is not None:
self.seed = seed
self.random_state = np.random.RandomState(self.seed)
def find_before(self, src_idx_list, cut_time_list, n_neighbors, exclude_node):
"""
Extracts all the interactions happening before cut_time for user src_idx in the overall interaction graph. The returned interactions are sorted by time.
Returns 3 lists: neighbors, edge_idxs, timestamps
"""
neighbor_nodes_array = np.array([], dtype=np.int64)
node_to_edge_idxs_array = np.array([], dtype=np.int64)
node_to_edge_timestamps_array = np.array([])
edge_list = np.array([], dtype=np.int64).reshape([-1, 2])
for idx, (src_idx, cut_time) in enumerate(zip(src_idx_list, cut_time_list)):
i = np.searchsorted(self.node_to_edge_timestamps[src_idx], cut_time)
neighbor_nodes = self.node_to_neighbors[src_idx][:i]
neighbor_edge_idxs = self.node_to_edge_idxs[src_idx][:i]
neighbor_times = self.node_to_edge_timestamps[src_idx][:i]
index = np.where(~np.isin(neighbor_nodes, np.array(exclude_node)))
neighbor_nodes = neighbor_nodes[index]
neighbor_edge_idxs = neighbor_edge_idxs[index]
neighbor_times = neighbor_times[index]
n_min_neighbors = min(len(neighbor_nodes), n_neighbors)
if n_min_neighbors > 0:
if self.uniform:
sampled_idx = np.random.choice(range(len(neighbor_nodes)), n_min_neighbors, replace=False)
neighbor_nodes = neighbor_nodes[sampled_idx]
neighbor_edge_idxs = neighbor_edge_idxs[sampled_idx]
neighbor_times = neighbor_times[sampled_idx]
else:
neighbor_nodes = neighbor_nodes[-n_min_neighbors:]
neighbor_edge_idxs = neighbor_edge_idxs[-n_min_neighbors:]
neighbor_times = neighbor_times[-n_min_neighbors:]
temp_srcid = np.array([src_idx] * len(neighbor_nodes))
temp_edge_id = np.column_stack((temp_srcid, neighbor_nodes))
neighbor_nodes_array = np.concatenate((neighbor_nodes_array, neighbor_nodes))
node_to_edge_idxs_array = np.concatenate((node_to_edge_idxs_array, neighbor_edge_idxs))
node_to_edge_timestamps_array = np.concatenate((node_to_edge_timestamps_array, neighbor_times))
edge_list = np.concatenate((edge_list, temp_edge_id.astype(np.int64)))
return neighbor_nodes_array, node_to_edge_idxs_array, node_to_edge_timestamps_array, edge_list
def get_temporal_neighbor_all(self, source_node, timestamp, n_layer, n_neighbors):
#assert (len(source_node) == len(timestamp))
edge_list = np.array([], dtype=np.int64).reshape([-1, 2])
time_list = np.array([])
idx_list = np.array([], dtype=np.int64)
temp_center_node = [source_node]
temp_center_time = [timestamp]
exclude_node = []
for i in range(n_layer):
neighbor_nodes, neighbor_edge_idxs, neighbor_times, neighbor_edge_node= self.find_before(temp_center_node, temp_center_time, n_neighbors, exclude_node)
if len(neighbor_nodes) > 0 and n_neighbors>0:
idx_list = np.concatenate((idx_list, neighbor_edge_idxs))
time_list = np.concatenate((time_list, neighbor_times))
edge_list = np.concatenate((edge_list, neighbor_edge_node))
exclude_node = temp_center_node
temp_center_node = np.unique(neighbor_nodes).tolist()
temp_center_time = [timestamp] * len(temp_center_node)
else:
break
return edge_list, time_list, idx_list
class RandEdgeSampler(object):
def __init__(self, src_list, dst_list, seed=None):
self.seed = None
self.src_list = np.unique(src_list)
self.dst_list = np.unique(dst_list)
if seed is not None:
self.seed = seed
self.random_state = np.random.RandomState(self.seed)
def sample(self, size):
if self.seed is None:
src_index = np.random.randint(0, len(self.src_list), size)
dst_index = np.random.randint(0, len(self.dst_list), size)
else:
src_index = self.random_state.randint(0, len(self.src_list), size)
dst_index = self.random_state.randint(0, len(self.dst_list), size)
return self.src_list[src_index], self.dst_list[dst_index]
def reset_random_state(self):
self.random_state = np.random.RandomState(self.seed)
| 7,050 | 44.490323 | 163 | py |
STEP | STEP-master/src/train_gnn.py | import pytorch_lightning as pyl
import torch
import torch.nn.functional as F
import numpy as np
import datasets as dataset
import torch.utils.data
import sklearn
from option import args
from model.tgat import TGAT
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone(
batch['src_edge_feat'],
batch['src_edge_to_time'],
batch['src_center_node_idx'],
batch['src_neigh_edge'],
batch['src_node_features']
)
return x
def training_step(self, batch, batch_idx):
logits = self(batch)
lables = batch['labels']
loss = F.binary_cross_entropy_with_logits(
logits, lables, reduction='none')
loss = torch.mean(loss)
self.log("loss2", loss, on_step=True, prog_bar=True, logger=False)
return loss
def validation_step(self, batch, batch_idx):
org_logits = self(batch).sigmoid()
return {'org_proba': org_logits, 'label':batch['labels']}
def validation_epoch_end(self, outputs):
org_pred = torch.cat([output['org_proba'] for output in outputs])
label = torch.cat([output['label'] for output in outputs])
if torch.sum(label > 0):
org_valid_auc = sklearn.metrics.roc_auc_score(label.cpu().numpy().flatten(), org_pred.cpu().numpy().flatten())
else:
org_valid_auc = 0
self.log('org_valid_auc', org_valid_auc, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
scores, _ = self(batch)
proba = torch.sigmoid(scores)
labels = batch['labels']
return proba.cpu().numpy().flatten(), labels.cpu().numpy().flatten()
if __name__=='__main__':
config = args
dataset_train = dataset.DygDataset(config, 'train')
dataset_valid = dataset.DygDataset(config, 'valid')
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = TGAT(config)
model = ModelLightning(
config, backbone=backbone)
loader_train = torch.utils.data.DataLoader(
dataset=dataset_train,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
pin_memory=True,
#sampler=dataset.RandomDropSampler(dataset_train, 0),
collate_fn=collate_fn.dyg_collate_fn
)
loader_valid = torch.utils.data.DataLoader(
dataset=dataset_valid,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
trainer = pyl.Trainer(
logger=pyl.loggers.CSVLogger('../lightning_logs_gnn'),
gradient_clip_val=0.1,
replace_sampler_ddp=False,
max_epochs=10,
gpus=gpus
)
trainer.fit(
model, train_dataloaders=loader_train,
val_dataloaders=loader_valid
) | 3,844 | 28.128788 | 122 | py |
STEP | STEP-master/src/option.py | import argparse
parser = argparse.ArgumentParser(description='Denoise')
parser.add_argument('--dir_data', type=str, default='../dataset')
parser.add_argument('--data_set', type=str, default='wikipedia')
parser.add_argument('--output_edge_txt', type=str, default='./result/edge_pred.txt')
parser.add_argument('--mask_edge', action='store_true', default=False)
parser.add_argument('--bipartite', action='store_true')
parser.add_argument('--mode', type=str, default='gsn', choices=('origin', 'dropedge', 'gsn'))
parser.add_argument('--prior_ratio', type=float, default=0.5)
parser.add_argument('--pruning_ratio', type=float, default=0.5)
##data param
parser.add_argument('--n_neighbors', type=int, default=20, help='Maximum number of connected edge per node')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_data_workers', type=int, default=15)
parser.add_argument('--gpus', type=int, default=1)
parser.add_argument('--accelerator', type=str, default='dp')
##model param
parser.add_argument('--ckpt_file', type=str, default='./')
parser.add_argument('--input_dim', type=int, default=172)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--n_heads', type=int, default=2)
parser.add_argument('--drop_out', type=float, default=0.1)
parser.add_argument('--n_layer', type=int, default=2, help='Number of network layers')
parser.add_argument('--learning_rate', type=float, default=5e-4)
args = parser.parse_args()
| 1,480 | 45.28125 | 108 | py |
STEP | STEP-master/src/datasets_edge.py | import torch
import torch.utils.data
import os
import numpy as np
import random
import pandas as pd
class Data:
def __init__(self, sources, destinations, timestamps, edge_idxs, labels):
self.sources = sources
self.destinations = destinations
self.timestamps = timestamps
self.edge_idxs = edge_idxs
self.labels = labels
self.n_interactions = len(sources)
self.unique_nodes = set(sources) | set(destinations)
self.n_unique_nodes = len(self.unique_nodes)
class EdgeDataset(torch.utils.data.Dataset):
def __init__(self, config):
self.config = config
dataset_name = '{}/ml_{}'.format(self.config.dir_data, self.config.data_set)
self.full_data, self.positive_eids, self.edge_features, self.node_features = \
self.get_data(dataset_name)
self.index_start = self.positive_eids[0]
def get_data(self, dataset_name):
graph_df = pd.read_csv('{}.csv'.format(dataset_name))
edge_features = np.load('{}.npy'.format(dataset_name))
node_features = np.load('{}_node.npy'.format(dataset_name))
sources = graph_df.u.values
destinations = graph_df.i.values
edge_idxs = graph_df.idx.values
labels = graph_df.label.values
timestamps = graph_df.ts.values
random.seed(2020)
positive_eids = np.where(timestamps >= 0)[0]
full_data = Data(sources, destinations, timestamps, edge_idxs, labels)
return full_data, positive_eids, edge_features, node_features
def __getitem__(self, item):
item += self.index_start
edge_idx = self.full_data.edge_idxs[item]
edge_feature = self.edge_features[edge_idx]
edge_idx = np.array(edge_idx)
return {
'edge_feature': torch.from_numpy(edge_feature.astype(np.float32)).reshape(1,-1),
'edge_idx': torch.from_numpy(edge_idx).reshape(1)
}
def __len__(self):
return len(self.positive_eids)
class Collate:
def __init__(self, config):
self.config = config
def dyg_collate_fn(self, batch):
edge_feature = torch.cat([b['edge_feature'] for b in batch], dim=0) #n1,f
edge_idx = torch.cat([b['edge_idx'] for b in batch], dim=0) # n
return {
'edge_feature':edge_feature,
'edge_idx': edge_idx
}
class RandomDropSampler(torch.utils.data.Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, dataset, drop_rate):
self.dataset = dataset
self.drop_rate = drop_rate
self.drop_num = int(len(dataset) * drop_rate)
def __iter__(self):
arange = np.arange(len(self.dataset))
np.random.shuffle(arange)
#indices = arange[: (1 - self.drop_num)]
#return iter(np.sort(indices))
indices = arange
return iter(indices)
def __len__(self):
return len(self.dataset) - self.drop_num
| 3,073 | 26.693694 | 92 | py |
STEP | STEP-master/src/datasets.py | import torch
import torch.utils.data
import os
import numpy as np
from option import args
import random
import pandas as pd
from utils import get_neighbor_finder, masked_get_neighbor_finder
from operator import itemgetter
class Data:
def __init__(self, sources, destinations, timestamps, edge_idxs, labels):
self.sources = sources
self.destinations = destinations
self.timestamps = timestamps
self.edge_idxs = edge_idxs
self.labels = labels
self.n_interactions = len(sources)
self.unique_nodes = set(sources) | set(destinations)
self.n_unique_nodes = len(self.unique_nodes)
class DygDataset(torch.utils.data.Dataset):
def __init__(self, config, split_flag, split_list=[0.7, 0.15, 0.15]):
self.config = config
dataset_name = '{}/ml_{}'.format(self.config.dir_data, self.config.data_set)
self.full_data, self.positive_eids, self.edge_features, self.node_features = \
self.get_data(dataset_name, split_flag, split_list)
if self.config.mask_edge:
id_list = []
edge_score_list = []
with open(self.config.output_edge_txt) as f:
for idx, line in enumerate(f):
e = line.strip().split('\t')
id = int(e[0])
pred_score = float(e[1])
id_list.append(id)
edge_score_list.append(pred_score)
edge_score_dict = dict(zip(id_list,edge_score_list))
self.ngh_finder = masked_get_neighbor_finder(self.full_data, edge_score_dict, self.config.pruning_ratio,uniform=False)
else:
self.ngh_finder = get_neighbor_finder(self.full_data, uniform=False)
self.index_start = self.positive_eids[0]
def get_data(self, dataset_name, split_flag, split_list):
graph_df = pd.read_csv('{}.csv'.format(dataset_name))
edge_features = np.load('{}.npy'.format(dataset_name))
node_features = np.load('{}_node.npy'.format(dataset_name))
val_time, test_time = list(np.quantile(graph_df.ts, [split_list[0], split_list[0]+ split_list[1]]))
sources = graph_df.u.values
destinations = graph_df.i.values
edge_idxs = graph_df.idx.values
labels = graph_df.label.values
timestamps = graph_df.ts.values
random.seed(2020)
train_mask = np.where(timestamps <= val_time)[0]
test_mask = np.where(timestamps > test_time)[0]
val_mask = np.where(np.logical_and(timestamps <= test_time, timestamps > val_time))[0]
full_data = Data(sources, destinations, timestamps, edge_idxs, labels)
if split_flag == 'train':
positive_eids = train_mask
pass
elif split_flag == 'valid':
positive_eids = val_mask
pass
elif split_flag == 'test':
positive_eids = test_mask
pass
else:
raise RuntimeError(f'no recognize split: {split_flag}')
return full_data, positive_eids, edge_features, node_features
def edge_padding(self, neigh_edge, neigh_time, edge_feat, src_neigh_idx, source_node):
neigh_edge = np.concatenate((neigh_edge, np.tile(source_node.reshape(-1, 1), (1, 2))), axis=0)
neigh_time = np.concatenate((neigh_time, np.zeros([1], dtype=neigh_time.dtype)), axis=0)
edge_feat = np.concatenate((edge_feat, np.zeros([1, edge_feat.shape[1]], dtype=edge_feat.dtype)), axis=0)
src_neigh_idx = np.concatenate((src_neigh_idx, np.zeros([1], dtype=src_neigh_idx.dtype)), axis=0)
return neigh_edge, neigh_time, edge_feat, src_neigh_idx
def __getitem__(self, item):
item += self.index_start
source_node = self.full_data.sources[item]
target_node = self.full_data.destinations[item]
current_time = self.full_data.timestamps[item]
label = self.full_data.labels[item]
edge_idx = self.full_data.edge_idxs[item]
src_neigh_edge, src_neigh_time, src_neigh_idx = self.ngh_finder.get_temporal_neighbor_all(source_node,
current_time,
self.config.n_layer,
self.config.n_neighbors)
src_edge_feature = self.edge_features[src_neigh_idx].astype(np.float32)
src_edge_to_time = current_time - src_neigh_time
src_center_node_idx = np.reshape(source_node, [-1])
if src_neigh_edge.shape[0] == 0:
src_neigh_edge, src_edge_to_time, src_edge_feature, src_neigh_idx = self.edge_padding(
src_neigh_edge, src_edge_to_time, src_edge_feature, src_neigh_idx, src_center_node_idx)
label = np.reshape(label, [-1])
return {
'src_center_node_idx': src_center_node_idx,
'src_neigh_edge': torch.from_numpy(src_neigh_edge),
'src_edge_feature': torch.from_numpy(src_edge_feature),
'src_edge_to_time': torch.from_numpy(src_edge_to_time.astype(np.float32)),
'init_edge_index': torch.from_numpy(src_neigh_idx),
'label': torch.from_numpy(label)
}
def __len__(self):
return len(self.positive_eids)
class Collate:
def __init__(self, config):
self.config = config
dataset_name = '{}/ml_{}'.format(self.config.dir_data, self.config.data_set)
self.node_features = np.load('{}_node.npy'.format(dataset_name)).astype(np.float32)
def reindex_fn(self, edge_list, center_node_idx, batch_idx):
edge_list_projection = edge_list.view(-1).numpy().tolist()
edge_list_projection = [str(x) for x in edge_list_projection]
single_batch_idx = torch.unique(batch_idx).numpy().astype(np.int32).tolist()
single_batch_idx = [str(x) for x in single_batch_idx]
batch_idx_projection = batch_idx.reshape([-1, 1]).repeat((1, 2)).view(-1).numpy().astype(np.int32).tolist()
batch_idx_projection = [str(x) for x in batch_idx_projection]
center_node_idx_projection = center_node_idx.tolist()
center_node_idx_projection = [str(x) for x in center_node_idx_projection]
union_edge_list = list(map(lambda x: x[0] + '_' + x[1], zip(batch_idx_projection, edge_list_projection)))
union_center_node_list = list(
map(lambda x: x[0] + '_' + x[1], zip(single_batch_idx, center_node_idx_projection)))
org_node_id = union_edge_list + union_center_node_list
org_node_id = list(set(org_node_id))
new_node_id = torch.arange(0, len(org_node_id)).numpy()
reid_map = dict(zip(org_node_id, new_node_id))
true_org_node_id = [int(x.split('_')[1]) for x in org_node_id]
true_org_node_id = np.array(true_org_node_id)
keys = union_edge_list
new_edge_list = itemgetter(*keys)(reid_map)
new_edge_list = np.array(new_edge_list).reshape([-1, 2])
new_edge_list = torch.from_numpy(new_edge_list)
batch_node_features = self.node_features[true_org_node_id]
new_center_node_idx = np.array(itemgetter(*union_center_node_list)(reid_map))
return new_center_node_idx, new_edge_list, batch_node_features
def get_batchidx_fn(self, edge_list):
batch_size = len(edge_list)
feat_max_len = np.sum([feat.shape[0] for feat in edge_list])
mask = torch.zeros((feat_max_len))
count = 0
for i, ifeat in enumerate(edge_list):
size = ifeat.shape[0]
mask[count:count+size] = i + 1
count += size
return mask
def dyg_collate_fn(self, batch):
src_edge_feat = torch.cat([b['src_edge_feature'] for b in batch], dim=0) #n1,f
src_edge_to_time = torch.cat([b['src_edge_to_time'] for b in batch], dim=0) #n
init_edge_index = torch.cat([b['init_edge_index'] for b in batch], dim=0) # n
src_center_node_idx = np.concatenate([b['src_center_node_idx'] for b in batch], axis=0) #b
batch_idx = self.get_batchidx_fn([b['src_neigh_edge'] for b in batch])
src_neigh_edge = torch.cat([b['src_neigh_edge'] for b in batch], dim=0) #n,2
src_center_node_idx, src_neigh_edge, src_node_features = self.reindex_fn(src_neigh_edge, src_center_node_idx, batch_idx)
label = torch.cat([b['label'] for b in batch], dim=0)
return {
'src_edge_feat':src_edge_feat,
'src_edge_to_time':src_edge_to_time,
'src_center_node_idx':torch.from_numpy(src_center_node_idx),
'src_neigh_edge':src_neigh_edge,
'src_node_features': torch.from_numpy(src_node_features),
'init_edge_index': init_edge_index,
'batch_idx': batch_idx,
'labels':label
}
class RandomDropSampler(torch.utils.data.Sampler):
r"""Samples elements sequentially, always in the same order.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, dataset, drop_rate):
self.dataset = dataset
self.drop_rate = drop_rate
self.drop_num = int(len(dataset) * drop_rate)
def __iter__(self):
arange = np.arange(len(self.dataset))
np.random.shuffle(arange)
#indices = arange[: (1 - self.drop_num)]
#return iter(np.sort(indices))
indices = arange
return iter(indices)
def __len__(self):
return len(self.dataset) - self.drop_num
if __name__ == '__main__':
config = args
a = DygDataset(config, 'train')
#a = DygDatasetTest(config, 'val')
c = a[5000]
#print(c)
| 9,798 | 39.159836 | 130 | py |
STEP | STEP-master/src/build_dataset_graph.py | from option import args
import pandas as pd
import numpy as np
def preprocess(data_name):
u_list, i_list, ts_list, label_list = [], [], [], []
feat_l = []
idx_list = []
with open(data_name) as f:
s = next(f)
for idx, line in enumerate(f):
e = line.strip().split(',')
u = int(e[0])
i = int(e[1])
ts = float(e[2])
label = float(e[3]) # int(e[3])
feat = np.array([float(x) for x in e[4:]])
u_list.append(u)
i_list.append(i)
ts_list.append(ts)
label_list.append(label)
idx_list.append(idx)
feat_l.append(feat)
return pd.DataFrame({'u': u_list,
'i': i_list,
'ts': ts_list,
'label': label_list,
'idx': idx_list}), np.array(feat_l)
def reindex(df, bipartite=False):
new_df = df.copy()
if bipartite:
assert (df.u.max() - df.u.min() + 1 == len(df.u.unique()))
assert (df.i.max() - df.i.min() + 1 == len(df.i.unique()))
upper_u = df.u.max() + 1
new_i = df.i + upper_u
new_df.i = new_i
new_df.u += 1
new_df.i += 1
new_df.idx += 1
else:
new_df.u += 1
new_df.i += 1
new_df.idx += 1
return new_df
if __name__=='__main__':
dateset_dir = '{}/{}.csv'.format(args.dir_data, args.data_set)
OUT_DF = '{}/ml_{}.csv'.format(args.dir_data, args.data_set)
OUT_FEAT = '{}/ml_{}.npy'.format(args.dir_data, args.data_set)
OUT_NODE_FEAT = '{}/ml_{}_node.npy'.format(args.dir_data, args.data_set)
df, feat = preprocess(dateset_dir)
new_df = reindex(df, args.bipartite)
empty = np.zeros(feat.shape[1])[np.newaxis, :]
feat = np.vstack([empty, feat])
max_idx = max(new_df.u.max(), new_df.i.max())
rand_feat = np.zeros((max_idx + 1, 172))
new_df.to_csv(OUT_DF)
np.save(OUT_FEAT, feat)
np.save(OUT_NODE_FEAT, rand_feat)
| 1,891 | 23.894737 | 76 | py |
STEP | STEP-master/src/eval_gnn.py | import pytorch_lightning as pyl
import torch
import torch.nn.functional as F
import numpy as np
import datasets as dataset
import torch.utils.data
import sklearn
from option import args
from model.tgat import TGAT
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone(
batch['src_edge_feat'],
batch['src_edge_to_time'],
batch['src_center_node_idx'],
batch['src_neigh_edge'],
batch['src_node_features']
)
return x
def training_step(self, batch, batch_idx):
logits = self(batch)
lables = batch['labels']
loss = F.binary_cross_entropy_with_logits(
logits, lables, reduction='none')
loss = torch.mean(loss)
self.log("loss2", loss, on_step=True, prog_bar=True, logger=False)
return loss
def validation_step(self, batch, batch_idx):
org_logits = self(batch).sigmoid()
return {'org_proba': org_logits, 'label': batch['labels']}
def validation_epoch_end(self, outputs):
org_pred = torch.cat([output['org_proba'] for output in outputs])
label = torch.cat([output['label'] for output in outputs])
if torch.sum(label > 0):
org_valid_auc = sklearn.metrics.roc_auc_score(label.cpu().numpy().flatten(), org_pred.cpu().numpy().flatten())
else:
org_valid_auc = 0
self.log('org_valid_auc', org_valid_auc, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
scores = self(batch).sigmoid()
labels = batch['labels']
return scores.cpu().numpy().flatten(), labels.cpu().numpy().flatten()
if __name__=='__main__':
config = args
dataset_valid = dataset.DygDataset(config, 'test')
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = TGAT(config)
model = ModelLightning(
config, backbone=backbone)
ckpt_file = config.ckpt_file
pretrained_dict = torch.load(ckpt_file)['state_dict']
model_dict = model.state_dict()
state_dict = {k:v for k,v in pretrained_dict.items() if
k.split('.')[1] in ['embedding_module', 'time_encoder', 'node_preocess_fn',
'edge_preocess_fn', 'affinity_score']}
model_dict.update(state_dict)
model.load_state_dict(model_dict)
model.eval()
loader_valid = torch.utils.data.DataLoader(
dataset=dataset_valid,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
trainer = pyl.Trainer(
logger=pyl.loggers.CSVLogger('../lightning_logs_gnn'),
gradient_clip_val=0.1,
replace_sampler_ddp=False,
max_epochs=10,
gpus=gpus
)
with torch.no_grad():
pred = trainer.predit(model, loader_valid)
pass
prob, label = [x[0] for x in pred], [x[1] for x in pred]
prob = np.hstack(prob)
label = np.hstack(label)
org_valid_auc = sklearn.metrics.roc_auc_score(label.astype(int), prob)
print('test_acu>>>>>>>>>>>>>>>>>>', org_valid_auc) | 4,116 | 29.272059 | 122 | py |
STEP | STEP-master/src/edge_pruning.py | import pytorch_lightning as pyl
import torch
import torch.nn.functional as F
import numpy as np
import datasets_edge as dataset
import torch.utils.data
import sklearn
from option import args
from model.precom_model import Precom_Model
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone(
batch['edge_feature']
)
return x
def training_step(self, batch, batch_idx):
logits = self(batch)
lables = batch['labels']
loss = F.binary_cross_entropy_with_logits(
logits, lables, reduction='none')
loss = torch.mean(loss)
self.log("loss2", loss, on_step=True, prog_bar=True, logger=False)
return loss
def validation_step(self, batch, batch_idx):
org_logits = self(batch).sigmoid()
return {'org_proba': org_logits, 'label': batch['labels']}
def validation_epoch_end(self, outputs):
org_pred = torch.cat([output['org_proba'] for output in outputs])
label = torch.cat([output['label'] for output in outputs])
if torch.sum(label > 0):
org_valid_auc = sklearn.metrics.roc_auc_score(label.cpu().numpy().flatten(), org_pred.cpu().numpy().flatten())
else:
org_valid_auc = 0
self.log('org_valid_auc', org_valid_auc, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int , dataloader_idx: int = None):
scores = self(batch)
proba = torch.softmax(scores, dim=1)[:, 1]
edge_index = batch['edge_idx']
return edge_index.cpu().numpy().flatten(), proba.cpu().numpy().flatten()
if __name__=='__main__':
config = args
datasets = dataset.EdgeDataset(config)
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = Precom_Model(config.input_dim, config.hidden_dim, config.drop_out)
model = ModelLightning(
config, backbone=backbone)
ckpt_file = config.ckpt_file
pretrained_dict = torch.load(ckpt_file)['state_dict']
model_dict = model.state_dict()
state_dict = {k:v for k,v in pretrained_dict.items() if k.split('.')[1] in ['edge_precom'] }
rename_state_dict = { k.replace('.edge_precom', ''):v for k,v in state_dict.items()}
model_dict.update(rename_state_dict)
model.load_state_dict(model_dict)
dataloader = torch.utils.data.DataLoader(
dataset=datasets,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
trainer = pyl.Trainer(
accelerator=config.accelerator,
gpus=gpus
)
with torch.no_grad():
pred = trainer.predict(
model, dataloader)
pass
#edge_id = np.hstack(edge_idx)
edge_id, pred_score = [x[0] for x in pred], [x[1] for x in pred]
edge_id = np.hstack(edge_id)
pred_score = np.hstack(pred_score)
output_file = config.output_edge_txt
with open(output_file, 'w') as fout:
for i, (id, score) in enumerate(zip(edge_id, pred_score)):
fout.write(f'{id}\t')
fout.write(f'{score}\n')
pass
pass
pass | 4,096 | 29.125 | 122 | py |
STEP | STEP-master/src/train_gsn.py | import pytorch_lightning as pyl
import torch
import datasets as dataset
import torch.utils.data
from option import args
from model.tgat import TGAT
class ModelLightning(pyl.LightningModule):
def __init__(self, config, backbone):
super().__init__()
self.config = config
self.backbone = backbone
pass
def forward(self, batch):
##ToDo
x = self.backbone.forward_gsn(
batch['src_edge_feat'],
batch['src_edge_to_time'],
batch['src_center_node_idx'],
batch['src_neigh_edge'],
batch['src_node_features'],
batch['init_edge_index'],
batch['batch_idx'],
self.global_step
)
return x
def training_step(self, batch, batch_idx):
x = self(batch)
if self.global_step > 500:
lambda1 = 0.01
else:
lambda1 = 0
loss_mi = x['loss']
loss_sparse = x['loss_sparse']
loss_edge_pre = x['loss_edge_pred']
self.log('loss_mi', loss_mi, on_step=True, prog_bar=True, logger=False)
self.log('loss_sparse', loss_sparse, on_step=True, prog_bar=True, logger=False)
self.log('loss_edge_pre', loss_edge_pre, on_step=True, prog_bar=True, logger=False)
self.log('max_probs', x['max_probs'], on_step=True, prog_bar=True, logger=False)
self.log('min_probs', x['min_probs'], on_step=True, prog_bar=True, logger=False)
loss = loss_mi + 0.01 * loss_sparse + lambda1 * loss_edge_pre
return loss
def validation_step(self, batch, batch_idx):
output = self(batch)
loss_mi = output['loss']
loss_sparse = output['loss_sparse']
loss_edge_pre = output['loss_edge_pred']
return {'loss_mi': loss_mi, 'loss_sparse': loss_sparse, 'loss_edge_pre':loss_edge_pre}
def validation_epoch_end(self, outputs):
loss_mi = torch.cat([output['loss_mi'].reshape([1]) for output in outputs])
loss_sparse = torch.cat([output['loss_sparse'].reshape([1]) for output in outputs])
loss_edge_pre = torch.cat([output['loss_edge_pre'].reshape([1]) for output in outputs])
loss_mi = torch.mean(loss_mi)
loss_sparse = torch.mean(loss_sparse)
loss_edge_pre = torch.mean(loss_edge_pre)
self.log('loss_mi', loss_mi, sync_dist=True)
self.log('loss_sparse', loss_sparse, sync_dist=True)
self.log('loss_edge_pre', loss_edge_pre, sync_dist=True)
self.log('learning rate', self.optimizers(0).param_groups[0]['lr'])
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.config.learning_rate)
scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, step_size=10, gamma=0.7)
return [optimizer], [scheduler]
def backward(
self, loss, *args, **kargs):
super().backward(loss, *args, **kargs)
for p in self.parameters():
if (p.grad is not None and torch.any(torch.isnan(p.grad))) or \
torch.any(torch.isnan(p)):
raise RuntimeError('nan happend')
pass
pass
def predict_step(self, batch, batch_idx: int, dataloader_idx: int = None):
scores, _ = self(batch)
proba = torch.sigmoid(scores)
labels = batch['labels']
return proba.cpu().numpy().flatten(), labels.cpu().numpy().flatten()
if __name__ == '__main__':
config = args
dataset_train = dataset.DygDataset(config, 'train')
dataset_valid = dataset.DygDataset(config, 'valid')
gpus = None if config.gpus == 0 else config.gpus
collate_fn = dataset.Collate(config)
backbone = TGAT(config)
model = ModelLightning(
config, backbone=backbone)
loader_train = torch.utils.data.DataLoader(
dataset=dataset_train,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
pin_memory=True,
# sampler=dataset.RandomDropSampler(dataset_train, 0),
collate_fn=collate_fn.dyg_collate_fn
)
loader_valid = torch.utils.data.DataLoader(
dataset=dataset_valid,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_data_workers,
collate_fn=collate_fn.dyg_collate_fn
)
checkpoint_callback = pyl.callbacks.ModelCheckpoint(
monitor = None,
save_top_k = -1,
save_last=True
)
trainer = pyl.Trainer(
logger=pyl.loggers.CSVLogger('../lightning_logs_gsn'),
gradient_clip_val=0.1,
replace_sampler_ddp=False,
max_epochs=10,
gpus=gpus,
callbacks=[checkpoint_callback]
)
trainer.fit(
model, train_dataloaders=loader_train,
val_dataloaders=loader_valid
)
| 4,863 | 31.426667 | 95 | py |
STEP | STEP-master/src/modules/time_encoding.py | import torch
import numpy as np
class TimeEncode(torch.nn.Module):
# Time Encoding proposed by TGAT
def __init__(self, dimension):
super(TimeEncode, self).__init__()
self.dimension = dimension
self.w = torch.nn.Linear(1, dimension)
self.w.weight = torch.nn.Parameter((torch.from_numpy(1 / 10 ** np.linspace(0, 1.5, dimension)))
.float().reshape(dimension, -1))
self.w.bias = torch.nn.Parameter(torch.zeros(dimension).float())
def forward(self, t):
# t has shape [batch_size, seq_len]
# Add dimension at the end to apply linear layer --> [batch_size, seq_len, 1]
t = torch.log(t + 1)
t = t.unsqueeze(dim=1)
# output has shape [batch_size, seq_len, dimension]
output = torch.cos(self.w(t))
return output
| 802 | 28.740741 | 99 | py |
STEP | STEP-master/src/modules/utils.py | import numpy as np
import torch
from sklearn.metrics import roc_auc_score
import math
import time
class MergeLayer(torch.nn.Module):
def __init__(self, dim1, dim2, dim3, dim4):
super().__init__()
self.layer_norm = torch.nn.LayerNorm(dim1 + dim2)
self.fc1 = torch.nn.Linear(dim1 + dim2, dim3)
self.fc2 = torch.nn.Linear(dim3, dim4)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1)
#x = self.layer_norm(x)
h = self.act(self.fc1(x))
return self.fc2(h) + x2
class MergeLayer_output(torch.nn.Module):
def __init__(self, dim1, dim2, dim3= 1024, dim4=1, drop_out=0.2):
super().__init__()
self.fc1 = torch.nn.Linear(dim1 + dim2, dim3)
self.fc2 = torch.nn.Linear(dim3, dim3)
self.fc3 = torch.nn.Linear(dim3, dim2)
self.fc4 = torch.nn.Linear(dim2 , dim4 )
self.act = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(p=drop_out)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x1, x2):
x = torch.cat([x1, x2], dim=1)
h = self.act(self.fc1(x))
h = self.act(self.fc2(h))
h = self.dropout(self.act(self.fc3(h)))
h = self.fc4(h)
return h
class Feat_Process_Layer(torch.nn.Module):
def __init__(self, dim1, dim2):
super().__init__()
self.fc1 = torch.nn.Linear(dim1, dim2)
self.fc2 = torch.nn.Linear(dim2, dim2)
self.act = torch.nn.ReLU()
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
def forward(self, x):
h = self.act(self.fc1(x))
return self.fc2(h)
| 1,731 | 26.935484 | 67 | py |
STEP | STEP-master/src/modules/temporal_attention.py | import torch
import torch_scatter as scatter
from torch import nn
from modules.utils import MergeLayer
class TemporalAttentionLayer2(torch.nn.Module):
"""
Temporal attention layer. Return the temporal embedding of a node given the node itself,
its neighbors and the edge timestamps.
"""
def __init__(self, n_node_features, n_neighbors_features, n_edge_features, time_dim,
output_dimension, n_head=2,
dropout=0.1):
super(TemporalAttentionLayer2, self).__init__()
self.time_dim = time_dim
self.num_heads = n_head
self.reverse_flag = True
self.selfloop_flag = True
self.query_dim = n_node_features + time_dim
self.key_dim = n_node_features + time_dim + n_edge_features
self.out_dim = output_dimension
self.d_k = self.out_dim // self.num_heads
self.scale = self.d_k ** (-0.5)
self.q_linears = torch.nn.Sequential( torch.nn.Linear(self.query_dim, self.out_dim), torch.nn.ReLU())
self.k_linears = torch.nn.Sequential(torch.nn.Linear(self.key_dim, self.out_dim), torch.nn.ReLU())
self.v_linears = torch.nn.Linear(self.key_dim, self.out_dim)
self.dropout = torch.nn.Dropout(dropout)
self.merger = MergeLayer(n_node_features, n_node_features, n_node_features, output_dimension)
def forward(self, node_feature, edge_index, edge_feature, src_time_features, edge_time, mask=None, sample_ratio=None):
'''
:param node_feature:
:param edge_index:
:param edge_feature:
:param src_time_features:
:param edge_time:
:param mask:
:return:
'''
if mask is not None and sample_ratio is None:
edge_index, edge_feature, src_time_features, edge_time = self.mask_edge(edge_index,
edge_feature,
src_time_features,
edge_time,
mask)
if self.reverse_flag:
edge_index, edge_feature, src_time_features, edge_time, sample_ratio = self.reverse_edge(edge_index,
edge_feature,
src_time_features,
edge_time, sample_ratio)
if self.selfloop_flag:
edge_index, edge_feature, src_time_features, edge_time, sample_ratio = self.add_selfloop(node_feature,
edge_index,
edge_feature,
src_time_features,
edge_time, sample_ratio)
node_i = edge_index[:, 0]
node_j = edge_index[:, 1]
node_feat_i = node_feature[node_i, :]
node_feat_j = node_feature[node_j, :]
source_node_vec = torch.cat([node_feat_i, src_time_features], dim=1)
target_node_vec = torch.cat([node_feat_j, edge_feature, edge_time], dim=1)
q_mat = torch.reshape(self.q_linears(source_node_vec), [-1, self.num_heads, self.d_k]) # [T, N , D]
k_mat = torch.reshape(self.k_linears(target_node_vec) , [-1, self.num_heads, self.d_k]) # [T, N , D]
v_mat = torch.reshape(self.v_linears(target_node_vec) , [-1, self.num_heads, self.d_k]) # [T, N , D]
res_att_sub = torch.sum(torch.multiply(q_mat, k_mat), dim=-1 )* self.scale #[T, N]
'''
Softmax based on target node's id (edge_index_i). Store attention value in self.att.
'''
if sample_ratio is not None:
res_att_sub = torch.multiply(res_att_sub, sample_ratio.reshape([-1,1]).repeat(1, self.num_heads))
scores = self.scatter_softmax(res_att_sub, node_i)
#if self.dropout is not None:
# scores = self.dropout(scores)
v = torch.multiply(torch.unsqueeze(scores, dim=2), v_mat)
v = torch.reshape(v, [-1, self.out_dim])
out_emb = scatter.scatter_add(v, node_i, dim=0)
out_emb = self.agg_out(node_feature, out_emb)
return out_emb
def scatter_softmax(self, res_att, node_i):
n_head = self.num_heads
scores = torch.zeros_like(res_att)
for i in range(n_head):
scores[:, i] = scatter.composite.scatter_softmax(res_att[:, i], node_i)
return scores
def reverse_edge(self, edge_index, edge_feature, src_time_features, edge_time, sample_ratio):
reverse_edge_index = torch.cat((edge_index[:, 1].unsqueeze(1), edge_index[:, 0].unsqueeze(1)), dim=1)
two_edge_index = torch.cat((edge_index, reverse_edge_index), dim=0)
src_time_features = src_time_features.repeat(2, 1)
edge_feature = edge_feature.repeat(2, 1)
edge_time = edge_time.repeat(2, 1)
if sample_ratio is not None:
sample_ratio = sample_ratio.repeat(2)
return two_edge_index, edge_feature, src_time_features, edge_time, sample_ratio
def add_selfloop(self, node_feature, edge_index, edge_feature, src_time_features, edge_time, sample_ratio):
time_emb_unit = src_time_features[0, :].reshape(1, -1)
node_id = torch.arange(0, node_feature.shape[0], device=edge_index.device).reshape(-1,1)
edge_index = torch.cat([edge_index, node_id.repeat(1,2)], dim=0)
edge_feature = torch.cat([edge_feature, torch.zeros([node_id.shape[0], edge_feature.shape[1]], dtype=edge_feature.dtype, device=edge_feature.device)], dim=0)
src_time_features = torch.cat([src_time_features, time_emb_unit.repeat(node_id.shape[0], 1)], dim=0)
edge_time = torch.cat([edge_time, time_emb_unit.repeat(node_id.shape[0], 1)], dim=0)
if sample_ratio is not None:
sample_ratio =torch.cat([sample_ratio, torch.ones([node_id.shape[0]], dtype=sample_ratio.dtype, device=sample_ratio.device)])
return edge_index, edge_feature, src_time_features, edge_time, sample_ratio
def mask_edge(self, edge_index, edge_feature, src_time_features, edge_time, mask):
retain_index = torch.nonzero(mask).reshape([-1])
edge_index = edge_index[retain_index]
edge_feature = edge_feature[retain_index]
src_time_features = src_time_features[retain_index]
edge_time = edge_time[retain_index]
return edge_index, edge_feature, src_time_features, edge_time
def agg_out(self, node_feat_pre, node_rep):
out_embedding = self.merger(node_rep, node_feat_pre)
return out_embedding | 6,626 | 43.47651 | 161 | py |
STEP | STEP-master/src/modules/embedding_module.py | import torch
from torch import nn
import numpy as np
import math
from modules.temporal_attention import TemporalAttentionLayer2
class EmbeddingModule(nn.Module):
def __init__(self, time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim, hidden_dim, dropout):
super(EmbeddingModule, self).__init__()
self.time_encoder = time_encoder
self.n_layers = n_layers
self.n_node_features = node_features_dims
self.n_edge_features = edge_features_dims
self.n_time_features = time_features_dim
self.dropout = dropout
self.embedding_dimension = hidden_dim
def compute_embedding(self, neigh_edge, edge_to_time, edge_feat, node_feat):
pass
class GraphEmbedding(EmbeddingModule):
def __init__(self, time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim, hidden_dim, n_heads=2, dropout=0.1):
super(GraphEmbedding, self).__init__(time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim,
hidden_dim, dropout)
def compute_embedding(self, neigh_edge, edge_to_time, edge_feat, node_feat, edge_mask=None, sample_ratio=None):
'''
:param neigh_edge: [E, 2]
:param edge_to_time: [E]
:param edge_feat: [E, D]
:param node_feat: [N, D]
:return:
'''
n_layers = self.n_layers
assert (n_layers >= 0)
temp_node_feat = node_feat
src_time_embeddings = self.time_encoder(torch.zeros_like(edge_to_time))
edge_time_embeddings = self.time_encoder(edge_to_time)
mask = edge_mask
for layer in range(n_layers):
temp_node_feat = self.aggregate(n_layers, temp_node_feat,
neigh_edge,
edge_feat,
src_time_embeddings,
edge_time_embeddings,
mask, sample_ratio)
out = temp_node_feat
return out
def aggregate(self, n_layers, node_features, edge_index,
edge_feature,
src_time_features, edge_time_embeddings, mask, sample_ratio):
return None
# class GraphSumEmbedding(GraphEmbedding):
# def __init__(self, time_encoder, n_layers, node_features_dims, edge_features_dims,
# time_features_dim, hidden_dim, n_heads=2, dropout=0.1):
# super(GraphSumEmbedding, self).__init__( time_encoder=time_encoder, n_layers=n_layers,
# node_features_dims=node_features_dims,
# edge_features_dims=edge_features_dims,
# time_features_dim=time_features_dim,
# hidden_dim=hidden_dim,
# n_heads=n_heads, dropout=dropout)
#
# self.linear_1 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim + time_features_dim +
# edge_features_dims, hidden_dim)
# for _ in range(n_layers)])
# self.linear_2 = torch.nn.ModuleList(
# [torch.nn.Linear(hidden_dim + node_features_dims + time_features_dim,
# hidden_dim) for _ in range(n_layers)])
#
# def aggregate(self, n_layer, source_node_features, source_nodes_time_embedding,
# neighbor_embeddings,
# edge_time_embeddings, edge_features, mask):
# neighbors_features = torch.cat([neighbor_embeddings, edge_time_embeddings, edge_features],
# dim=2)
# neighbor_embeddings = self.linear_1[n_layer - 1](neighbors_features)
# neighbors_sum = torch.nn.functional.relu(torch.sum(neighbor_embeddings, dim=1))
#
# source_features = torch.cat([source_node_features,
# source_nodes_time_embedding.squeeze()], dim=1)
# source_embedding = torch.cat([neighbors_sum, source_features], dim=1)
# source_embedding = self.linear_2[n_layer - 1](source_embedding)
#
# return source_embedding
class GraphAttentionEmbedding(GraphEmbedding):
def __init__(self, time_encoder, n_layers, node_features_dims, edge_features_dims,
time_features_dim, hidden_dim, n_heads=2, dropout=0.1):
super(GraphAttentionEmbedding, self).__init__(time_encoder, n_layers,
node_features_dims, edge_features_dims,
time_features_dim,
hidden_dim,
n_heads, dropout)
self.attention_models = torch.nn.ModuleList([TemporalAttentionLayer2(
n_node_features=node_features_dims,
n_neighbors_features=node_features_dims,
n_edge_features=edge_features_dims,
time_dim=time_features_dim,
n_head=n_heads,
dropout=dropout,
output_dimension=hidden_dim)
for _ in range(n_layers)])
def aggregate(self, n_layer, node_features, edge_index,
edge_feature,
src_time_features, edge_time_embeddings, mask, sample_ratio):
attention_model = self.attention_models[n_layer - 1]
source_embedding = attention_model(node_features,
edge_index,
edge_feature,
src_time_features,
edge_time_embeddings,
mask, sample_ratio)
return source_embedding
def get_embedding_module(module_type, time_encoder, n_layers,
node_features_dims, edge_features_dims, time_features_dim,
hidden_dim, n_heads=2, dropout=0.1):
if module_type == "graph_attention":
return GraphAttentionEmbedding( time_encoder=time_encoder,
n_layers=n_layers,
node_features_dims=node_features_dims,
edge_features_dims=edge_features_dims,
time_features_dim=time_features_dim,
hidden_dim=hidden_dim,
n_heads=n_heads, dropout=dropout)
# elif module_type == "graph_sum":
# return GraphSumEmbedding(time_encoder=time_encoder,
# n_layers=n_layers,
# node_features_dims=node_features_dims,
# edge_features_dims=edge_features_dims,
# time_features_dim=time_features_dim,
# hidden_dim=hidden_dim,
# n_heads=n_heads, dropout=dropout)
else:
raise ValueError("Embedding Module {} not supported".format(module_type))
| 7,015 | 42.57764 | 113 | py |
STEP | STEP-master/src/model/tgat.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch_scatter as scatter
from modules.utils import MergeLayer_output, Feat_Process_Layer
from modules.embedding_module import get_embedding_module
from modules.time_encoding import TimeEncode
from model.gsn import Graph_sampling_network
from model.gpn import Graph_pruning_network
class TGAT(torch.nn.Module):
def __init__(self, config, embedding_module_type="graph_attention"):
super().__init__()
self.cfg = config
self.nodes_dim = self.cfg.input_dim
self.edge_dim = self.cfg.input_dim
self.dims = self.cfg.hidden_dim
self.n_heads = self.cfg.n_heads
self.dropout = self.cfg.drop_out
self.n_layers = self.cfg.n_layer
self.mode = self.cfg.mode
self.time_encoder = TimeEncode(dimension=self.dims)
self.embedding_module_type = embedding_module_type
self.embedding_module = get_embedding_module(module_type=embedding_module_type,
time_encoder=self.time_encoder,
n_layers=self.n_layers,
node_features_dims=self.dims,
edge_features_dims=self.dims,
time_features_dim=self.dims,
hidden_dim=self.dims,
n_heads=self.n_heads, dropout=self.dropout)
self.node_preocess_fn = Feat_Process_Layer(self.nodes_dim, self.dims)
self.edge_preocess_fn = Feat_Process_Layer(self.edge_dim, self.dims)
self.affinity_score = MergeLayer_output(self.dims, self.dims, drop_out=0.2)
self.predictor = nn.Sequential(nn.Linear(self.dims, self.dims)) # output layer
self.gsn = Graph_sampling_network(self.dims, self.cfg.batch_size, mask_ratio=self.cfg.prior_ratio)
self.edge_precom = Graph_pruning_network(self.edge_dim, self.dims, self.dropout)
def forward(self, src_org_edge_feat, src_edge_to_time, src_center_node_idx, src_neigh_edge, src_node_features):
# apply tgat
source_node_embedding, src_edge_feat = self.compute_temporal_embeddings(src_neigh_edge, src_edge_to_time,
src_org_edge_feat, src_node_features)
loclsrc_node_embedding = source_node_embedding[src_center_node_idx, :]
score = self.affinity_score(loclsrc_node_embedding, loclsrc_node_embedding)
return score
def forward_gsn(self, src_org_edge_feat, src_edge_to_time, src_center_node_idx, src_neigh_edge, src_node_features,
init_edge_index, batch_idx, step=0):
# apply tgat
source_node_embedding, src_edge_feat = self.compute_temporal_embeddings(src_neigh_edge, src_edge_to_time,
src_org_edge_feat, src_node_features)
loclsrc_node_embedding = source_node_embedding[src_center_node_idx,:]
source_node_embedding_clone = source_node_embedding
src_edge_feat_clone = src_edge_feat
time_encodding = self.time_encoder(src_edge_to_time)
src_edge_probs, src_edge_mask = self.gsn.forward(source_node_embedding_clone, src_neigh_edge, time_encodding,
src_edge_feat_clone, batch_idx, src_center_node_idx)
gsn_node_embedding, _ = self.compute_temporal_embeddings(src_neigh_edge, src_edge_to_time,
src_org_edge_feat, src_node_features,
None, src_edge_probs)
gsnsrc_node_embedding = gsn_node_embedding[src_center_node_idx, :]
unique_edge_label = self.Merge_same_edge(init_edge_index, src_edge_mask)
temp_edge_label = unique_edge_label.long()
edge_logit = self.edge_precom(src_org_edge_feat)
loss_edge_pred = self.edge_precom.loss(edge_logit.reshape([-1, 2]), temp_edge_label)
loss_sparse = self.gsn.sparse_loss(src_edge_probs)
loss_mi = self.ddgcl(loclsrc_node_embedding, gsnsrc_node_embedding)
max_probs = torch.max(src_edge_probs)
min_probs = torch.min(src_edge_probs)
return {'loss': loss_mi, 'loss_sparse': loss_sparse, 'loss_edge_pred':loss_edge_pred,
'edge_index': src_neigh_edge, 'edge_probs': src_edge_probs,
'max_probs':max_probs, 'min_probs':min_probs}
def compute_temporal_embeddings(self, neigh_edge, edge_to_time, edge_feat, node_feat, edge_mask=None, sample_ratio=None):
node_feat = self.node_preocess_fn(node_feat)
edge_feat = self.edge_preocess_fn(edge_feat)
node_embedding = self.embedding_module.compute_embedding(neigh_edge, edge_to_time,
edge_feat, node_feat, edge_mask, sample_ratio)
return node_embedding, edge_feat
def ddgcl(self, x1, x2):
x1 = self.predictor(x1)
l_pos = torch.sigmoid(torch.sum(x1 * x2, dim=-1)).reshape([-1, 1])
l_neg = torch.sigmoid(torch.sum(torch.einsum('nc,kc->nkc', x1, x2), dim=-1))
matrix = torch.diag_embed(torch.diag(l_neg))
l_neg = l_neg - matrix
label1 = torch.ones_like(l_pos)
label2 = torch.zeros_like(l_neg)
logits = torch.cat([l_pos, l_neg], dim=1).reshape([-1])
labels = torch.cat([label1, label2], dim=1).reshape([-1])
loss_bce = torch.nn.BCELoss()
loss = loss_bce(logits, labels)
return loss
def Merge_same_edge(self, init_edge_index, src_edge_mask):
output, _ = scatter.scatter_max(src_edge_mask, init_edge_index, dim=0)
output = output[init_edge_index]
return output | 5,947 | 48.983193 | 125 | py |
STEP | STEP-master/src/model/gsn.py | import torch
import torch.nn.functional as F
import torch_scatter as scatter
class Graph_sampling_network(torch.nn.Module):
def __init__(self, dim, batch_size, mask_ratio=0.5):
super(Graph_sampling_network, self).__init__()
self.mask_act = 'sigmoid'
self.mask_ratio = mask_ratio
self.dim = dim
self.batch_size = batch_size
self.elayers1 = torch.nn.Sequential(
torch.nn.Linear(self.dim * 4, self.dim),
torch.nn.ReLU()
)
self.elayers3 = torch.nn.Sequential(
torch.nn.Linear(2 + self.dim, 1)
#torch.nn.Linear(2, 1)
)
# torch.nn.init.xavier_normal_(self.elayers2.weight)
def concrete_sample(self, log_alpha, beta=1.0):
if self.training:
bias = 0.1
random_noise = torch.empty(log_alpha.shape, dtype=log_alpha.dtype, device=log_alpha.device).uniform_(bias, 1-bias)
gate_inputs = torch.log(random_noise) - torch.log(1-random_noise)
gate_inputs = (gate_inputs + log_alpha) / beta
gate_inputs = torch.sigmoid(gate_inputs)
else:
gate_inputs = torch.sigmoid(log_alpha)
return gate_inputs
def forward(self, node_embeddings, edge_index, time_encodding, edge_feat, batch_idx, src_center_node_idx):
node_i = edge_index[:, 0]
node_j = edge_index[:, 1]
node_feat_i = node_embeddings[node_i, :]
node_feat_j = node_embeddings[node_j, :]
center_node_feat = node_embeddings[src_center_node_idx, :]
h = torch.cat([node_feat_i, node_feat_j, edge_feat, time_encodding], dim=1)
h1 = self.elayers1(h)
redundancy_score = self.redundancy_attention(h1) #[n, 1]
relevance_score = self.relevance_attention(h1, batch_idx.long(), center_node_feat) #[n, 1]
attn_score = torch.cat([redundancy_score, relevance_score, h1], dim=-1)
log_alpha = self.elayers3(attn_score)
edge_sample_probs = self.concrete_sample(log_alpha)
edge_sample_probs = edge_sample_probs.reshape([-1])
_, rank_idx = edge_sample_probs.sort(dim=0)
cut_off_nums = round(edge_sample_probs.shape[0] * self.mask_ratio)
low_idx = rank_idx[:cut_off_nums]
high_idx = rank_idx[cut_off_nums:]
edge_mask = edge_sample_probs.clone().detach()
edge_mask[low_idx] = 0
edge_mask[high_idx] = 1
return edge_sample_probs, edge_mask.byte()
def redundancy_attention(self, x):
x = F.normalize(x, p=2, dim=1)
dots = x @ x.transpose(-1, -2) #[m, m]
attn = torch.softmax(dots, dim=-1)
out = attn - torch.diag_embed(torch.diag(attn))
out = torch.sum(out, dim=-1)
return out.reshape([-1, 1])
def relevance_attention(self, x, batch_id, center_node_feat):
all_node_feat = center_node_feat[batch_id-1, :]
dots = torch.sum(torch.multiply(x, all_node_feat), dim=-1 )
attn = scatter.composite.scatter_softmax(dots, batch_id)
return attn.reshape([-1, 1])
def drop_edge(self, edge_index, batch_idx):
edge_sample_probs = torch.rand(edge_index.shape[0])
# y = torch.unique(batch_idx)
# mask_idx = [ self.get_mask_by_batch_fn(edge_sample_probs, batch_idx, x) for x in y]
# low_idx = torch.cat([x[0] for x in mask_idx], dim=0)
# high_idx= torch.cat([x[1] for x in mask_idx], dim=0)
_, rank_idx = edge_sample_probs.sort(dim=0)
cut_off_nums = round(edge_sample_probs.shape[0] * self.mask_ratio)
low_idx = rank_idx[:cut_off_nums]
high_idx = rank_idx[cut_off_nums:]
edge_mask = edge_sample_probs.clone()
edge_mask[low_idx] = 0
edge_mask[high_idx] = 1
return edge_mask.byte()
def get_mask_by_batch_fn(self, edge_sample_probs, batch_idx, x):
index = torch.nonzero(torch.where(batch_idx == x, batch_idx.clone().detach(), torch.tensor(0.0, device=x.device))).reshape([-1])
edge_sample_probs = edge_sample_probs[index]
_, rank_idx = edge_sample_probs.sort(dim=0)
cut_off_nums = round(edge_sample_probs.shape[0] * self.mask_ratio)
low_idx = rank_idx[:cut_off_nums]
true_low_idx = index[low_idx]
high_idx = rank_idx[cut_off_nums:]
true_high_idx = index[high_idx]
return true_low_idx, true_high_idx
def sparse_loss(self, log_alpha):
var_x = torch.mean(log_alpha * log_alpha) - torch.mean(log_alpha) * torch.mean(log_alpha)
loss_1 = torch.abs(var_x - self.mask_ratio * (1 - self.mask_ratio))
loss_2 = torch.abs(torch.mean(log_alpha) - (1 - self.mask_ratio))
loss = 1 * loss_1 + 1 * loss_2
return loss | 4,736 | 37.201613 | 136 | py |
STEP | STEP-master/src/model/gpn.py | import torch
from modules.utils import MergeLayer_output, Feat_Process_Layer
class Graph_pruning_network(torch.nn.Module):
def __init__(self, input_dim, hidden_dim, drop_out):
super(Graph_pruning_network, self).__init__()
self.edge_dim = input_dim
self.dims = hidden_dim
self.dropout = drop_out
self.affinity_score = Precomput_output(self.edge_dim, self.dims, 2, drop_out=0.2)
self.loss = torch.nn.CrossEntropyLoss()
def forward(self, edge_feat):
edge_logit = self.affinity_score(edge_feat)
return edge_logit
class Precomput_output(torch.nn.Module):
def __init__(self, dim1, dim2, dim3=2, drop_out=0.2):
super().__init__()
self.fc1 = torch.nn.Linear(dim1, dim2)
self.fc2 = torch.nn.Linear(dim2, dim2)
self.fc3 = torch.nn.Linear(dim2, dim3)
self.act = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(p=drop_out)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.xavier_normal_(self.fc2.weight)
torch.nn.init.xavier_normal_(self.fc3.weight)
def forward(self, x):
h = self.act(self.fc1(x))
h = self.dropout(self.act(self.fc2(h)))
h = self.fc3(h)
h = self.concrete_sample(h)
return h
def concrete_sample(self, log_alpha, beta=1.0):
if self.training:
log_alpha = log_alpha.reshape([-1])
bias = 0.1
random_noise = torch.empty(log_alpha.shape, dtype = log_alpha.dtype, device=log_alpha.device).uniform_(bias, 1-bias)
gate_inputs = torch.log(random_noise) - torch.log(1-random_noise)
gate_inputs = (gate_inputs + log_alpha) / beta
gate_inputs = gate_inputs.reshape([-1, 2])
else:
gate_inputs = log_alpha
return gate_inputs
| 1,839 | 34.384615 | 128 | py |
SIT | SIT-master/tree_util.py | import numpy as np
import math
import matplotlib.pyplot as plt
import ipdb
import torch
def rotation_matrix(thea):
return np.array([
[np.cos(thea), -1 * np.sin(thea)],
[np.sin(thea), np.cos(thea)]
])
def generating_tree(seq, dir_list, split_interval=4, degree=3):
# seq [N n seq_len 2]
# dir_list left, right, straight
# return ----> [N n*degree seq_len+interval 2]
tree = np.zeros((seq.shape[0], seq.shape[1] * degree, seq.shape[2] + split_interval, seq.shape[-1]))
for i in range(degree):
curr_seq = seq
curr_dir = np.expand_dims(dir_list[i], 2) # N 1 1 2
for j in range(split_interval):
next_point = curr_seq[:, :, -1:] + curr_dir
curr_seq = np.concatenate((curr_seq, next_point), axis=-2)
tree[:, seq.shape[1] * i:seq.shape[1] * (i + 1)] = curr_seq
return tree
def get_dir(seq, thea=12, degree=3, dir_interval=1):
straight_dir = seq[:, :, -1] - seq[:, :, -dir_interval-1] # N n 2
straight_dir = straight_dir / dir_interval
dir_list = [straight_dir]
num_thea = int((degree - 1) / 2)
for i in range(num_thea):
th = (i + 1) * math.pi / thea
left_dir = np.matmul(np.expand_dims(rotation_matrix(th), 0), np.transpose(straight_dir, (0, 2, 1)))
right_dir = np.matmul(np.expand_dims(rotation_matrix(-th), 0), np.transpose(straight_dir, (0, 2, 1)))
left_dir = np.transpose(left_dir, (0, 2, 1))
right_dir = np.transpose(right_dir, (0, 2, 1))
dir_list.append(left_dir)
dir_list.append(right_dir)
return dir_list
def tree_v3(traj_seq, degree, split_interval, pred_len=12, thea=12):
# traj_seq [N obs_len 2]
basic_tree = traj_seq # N obs_len 2
basic_tree = np.expand_dims(basic_tree, 1) # N 1 obs_len 2
dir_list = get_dir(basic_tree, thea=thea, degree=degree) # split directions with the angle=pi/thea
tree = generating_tree(basic_tree, dir_list, split_interval, degree)
# angle= [4, 4]
for i in range(1, int(np.ceil(pred_len / split_interval))):
tree = generating_tree(tree, dir_list, split_interval, degree)
dir_list = get_dir(tree, 12 // (i + 1), degree=degree)
# dir_list = get_dir(tree, angle[i-1], degree=degree)
# dir_list = get_dir(tree, thea, degree=degree)
return tree
def tree_build(traj_batches, split_interval=4, degree=3, pred_len=12, obs_len=8, thea=6):
assert 1 <= split_interval <= pred_len
tree_batches = []
for b in traj_batches:
obs_traj = b[:, :obs_len]
tree = tree_v3(obs_traj, degree, split_interval, pred_len=pred_len, thea=thea)
tree_batches.append(tree[:, :, obs_traj.shape[1]:b.shape[1]]) # truncating if over-length
return tree_batches
def coarse_gt(full_trajs):
# full_traj N pred_len+1 2
obs_end_fut_traj = full_trajs[:, 7:]
obs_traj = full_trajs[:, :8]
selected_point = [0, 4, 8, 12]
selected_seq = obs_end_fut_traj[:, selected_point]
high_vel = selected_seq[:, 1:] - selected_seq[:, :-1]
high_vel = high_vel / 4
for i in range(12):
if i < 4:
next_point = obs_traj[:, -1:] + high_vel[:, 0:1]
obs_traj = np.concatenate((obs_traj, next_point), axis=1)
if 4 <= i < 8:
next_point = obs_traj[:, -1:] + high_vel[:, 1:2]
obs_traj = np.concatenate((obs_traj, next_point), axis=1)
if 8 <= i < 12:
next_point = obs_traj[:, -1:] + high_vel[:, 2:3]
obs_traj = np.concatenate((obs_traj, next_point), axis=1)
gt_ = obs_traj[:, 8:]
return gt_
def tree_label(tree, traj_seq):
closet_branch_index_batches = []
coarse_gt_list = []
for i in range(len(tree)):
gt = coarse_gt(traj_seq[i])
coarse_gt_list.append(gt)
gt = np.expand_dims(gt, 1) # N 1 pred_len 2
tr = tree[i]
distance_branch = np.linalg.norm(tr - gt, axis=-1) # N n T
# ade = np.mean(distance_branch, axis=-1)
fde = distance_branch[:, :, -1]
# distance = ade + fde
# distance_branch = np.max(distance_branch, axis=-1) # N n
# one-hot label
closet_branch_index = np.argmin(fde, axis=-1)
closet_branch_index_batches.append(closet_branch_index)
return closet_branch_index_batches, coarse_gt_list
def tree_build_iter(traj, split_interval=4, degree=3, pred_len=12, thea=12):
traj = traj.permute(0, 2, 1)
traj = traj[:, :, 2:]
traj = traj.numpy()
assert 1 <= split_interval <= pred_len
obs_traj = traj
tree = tree_v3(obs_traj, degree, split_interval, pred_len=pred_len, thea=thea)
tree = tree - tree[:, :, 7:8]
tree = tree[:, :, obs_traj.shape[1]:20]
return torch.from_numpy(tree).float()
def tree_label(tree, traj_seq):
# label_batches = []
closest_branch_index_batches = []
# closest_dir_index_batches = []
coarse_gt_list = []
interval = 4
for i in range(len(tree)):
# gt = traj_seq[i][:, 8:]
gt = coarse_gt(traj_seq[i])
coarse_gt_list.append(gt)
gt = np.expand_dims(gt, 1) # N 1 pred_len 2
tr = tree[i]
# dir = snowflake_[i] # N n interval 2
# distance_dir = np.linalg.norm(dir - gt[:, :, :interval], axis=-1) # N n T
# distance_dir = np.max(distance_dir, axis=-1) # N n
# one-hot label
# closet_dir_index = np.argmin(distance_dir, axis=-1) # N
# closet_dir_index_batches.append(closet_dir_index)
#
# ade = np.linalg.norm(tr - gt, axis=-1).mean(axis=-1) # N n
# distance_ = np.exp(-ade)
# dis_sum = np.sum(distance_, axis=1, keepdims=True)
# soft_label = distance_ / dis_sum
# min_fde_index = np.argmin(ade, axis=-1)
# label_batches.append(min_fde_index)
distance_branch = np.linalg.norm(tr - gt, axis=-1) # N n T
ade = np.mean(distance_branch, axis=1)
fde = distance_branch[:, :, -1]
# distance_branch = np.max(distance_branch, axis=-1) # N n
# one-hot label
closet_branch_index = np.argmin(fde, axis=-1)
# sec_fde_index = np.argsort(fde, axis=-1)[:, 1]
closest_branch_index_batches.append(closet_branch_index)
return closest_branch_index_batches, coarse_gt_list
def vis2(seq1):
for i in range(seq1.shape[0]):
plt.clf()
for j in range(seq1.shape[1]):
x1 = seq1[i, j, :, 0]
y1 = seq1[i, j, :, 1]
# x2 = seq2[i, :, 0]
# y2 = seq2[i, :, 1]
plt.plot(x1, y1, linestyle="-.", marker='.', color='red')
# plt.plot(x2, y2, linestyle="-.", marker='.', color='green')
plt.savefig('test_tree.png')
ipdb.set_trace()
| 6,747 | 33.080808 | 109 | py |
SIT | SIT-master/dataset.py | import pickle
import numpy as np
from torch.utils import data
from util import get_train_test_data, data_augmentation
from tree_util import tree_build, tree_label
class DatasetETHUCY(data.Dataset):
def __init__(self, data_path, dataset_name, batch_size, is_test, end_centered=True,
data_flip=False, data_scaling=None, obs_len=8, pred_len=12,
split_interval=4, degree=3, thea=6):
'preprocessing for eth-ucy dataset'
data_file = get_train_test_data(data_path, dataset_name, batch_size, is_test)
with open(data_file, 'rb') as f:
data = pickle.load(f)
trajs, masks = data
trajs_new = []
for traj in trajs:
t = np.array(traj)
t = t[:, :, 2:4]
t = data_augmentation(t, end_centered, data_flip, data_scaling)
trajs_new.append(t)
masks_new = []
for mask in masks:
masks_new.append(mask)
traj_new = np.array(trajs_new)
masks_new = np.array(masks_new)
self.trajectory_batches = traj_new.copy()
self.mask_batches = masks_new.copy()
traj_tree = tree_build(traj_new.copy(), split_interval=split_interval, degree=degree, pred_len=pred_len, obs_len=obs_len, thea=thea)
traj_tree = np.array(traj_tree) # N n T 2
self.traj_tree_batches = traj_tree.copy()
# coarse ground truth
if is_test is not True:
closest_branch_index_batches, coarse_gt_list = \
tree_label(traj_tree.copy(), traj_new.copy())
closest_branch_index_batches = np.array(closest_branch_index_batches)
coarse_gt_ = np.array(coarse_gt_list)
self.closest_branch_index_batches = closest_branch_index_batches.copy()
self.coarse_gt_batches = coarse_gt_.copy()
print("Initialized dataloader for ucy-eth!") | 1,893 | 34.074074 | 140 | py |
SIT | SIT-master/run.py | import argparse
from dataset import DatasetETHUCY
import util
import logging
import torch
from model.trajectory_model import TrajectoryModel
from torch.optim import Adam, lr_scheduler
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def run(args: util.Args, device):
logger.info('**** data loading ******')
train_dataset = DatasetETHUCY(args.data_dir, args.dataset, args.train_batch_size, False, args.end_centered,
args.data_flip, args.data_scaling, args.obs_len, args.pred_len,
args.split_temporal_interval, args.tree_degree, args.split_thea)
test_dataset = DatasetETHUCY(args.data_dir, args.dataset, args.train_batch_size, True, args.end_centered,
False, None, args.obs_len, args.pred_len,
args.split_temporal_interval, args.tree_degree, args.split_thea)
logger.info('**** model loading ******')
model_args = util.ModelArgs # You can change the arguments of model directly in the ModelArgs class
model = TrajectoryModel(model_args).to(device)
optimizer = Adam(model.parameters(), lr=args.lr)
reg_criterion = torch.nn.SmoothL1Loss().to(device)
clf_criterion = torch.nn.CrossEntropyLoss().to(device)
if args.lr_scheduler == 0:
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_milestones, gamma=args.lr_gamma)
if args.lr_scheduler == 1:
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.lr_milestones[0])
min_ade = 99
min_fde = 99
best_epoch = 0
logger.info('**** model training ******')
for epoch in range(args.epoch):
total_loss, coarse_reg_loss, fine_reg_loss, clf_loss = train(args, model, optimizer, train_dataset,
reg_criterion, clf_criterion, device)
util.logging(
f'dataset:{args.dataset} '
f'epoch:{epoch} ',
f'total_loss:{sum(total_loss) / len(total_loss)} ',
f'coarse_reg_loss:{sum(coarse_reg_loss) / len(coarse_reg_loss)} ',
f'fine_reg_loss:{sum(fine_reg_loss) / len(fine_reg_loss)} ',
f'clf_loss:{sum(clf_loss) / len(clf_loss)} ',
verbose=True,
file_type='train_loss',
append_log=True
)
ade, fde = test(args, model, test_dataset, device)
util.logging(
f'dataset:{args.dataset} '
f'epoch:{epoch} ',
f'ade:{ade} ',
f'fde:{fde} ',
verbose=True,
file_type='ade_fde',
append_log=True
)
if args.lr_scheduler == 1 or args.lr_scheduler == 0:
scheduler.step()
if min_fde + min_ade > ade + fde:
min_fde = fde
min_ade = ade
best_epoch = epoch
torch.save(model.state_dict(), args.checkpoint + args.dataset + '/model.pth')
logger.info(f'dataset:{args.dataset}, curr_best_epoch:{best_epoch}, curr_min_ade:{min_ade},'
f' curr_min_fde:{min_fde}')
logger.info(f'dataset:{args.dataset}, best_epoch:{best_epoch}, min_ade:{min_ade}, min_fde:{min_fde}')
return
def get_train_loss(fine_trajs, gt_trajs, coarse_trajs, coarse_gt, path_score, closest_label, reg_criterion,
clf_criterion):
fine_trajs = fine_trajs.reshape(gt_trajs.shape)
coarse_trajs = coarse_trajs.reshape(coarse_gt.shape)
coarse_reg_loss = reg_criterion(coarse_trajs, coarse_gt)
fine_reg_loss = reg_criterion(fine_trajs, gt_trajs)
clf_loss = clf_criterion(path_score, closest_label)
loss = coarse_reg_loss + fine_reg_loss + clf_loss
return loss, coarse_reg_loss, fine_reg_loss, clf_loss
def train(args: util.Args, model, optimizer, dataloader, reg_criterion, clf_criterion, device):
model.train()
train_loss_list = []
coarse_reg_loss_list = []
fine_reg_loss_list = []
clf_loss_list = []
for i, (trajs, masks, trees, coarse_gt, closest_label) in enumerate(
zip(dataloader.trajectory_batches, dataloader.mask_batches, dataloader.traj_tree_batches,
dataloader.coarse_gt_batches, dataloader.closest_branch_index_batches)):
trajs = torch.FloatTensor(trajs).to(device)
masks = torch.FloatTensor(masks).to(device)
trees = torch.FloatTensor(trees).to(device)
coarse_gt = torch.FloatTensor(coarse_gt).to(device)
closest_label = torch.LongTensor(closest_label).to(device)
obs_trajs = trajs[:, :args.obs_len, :]
gt_trajs = trajs[:, args.obs_len:, :]
optimizer.zero_grad()
path_score, coarse_trajs, fine_trajs = model(obs_trajs, trees, coarse_gt, closest_label, masks, device)
loss, coarse_reg_loss, fine_reg_loss, clf_loss = \
get_train_loss(fine_trajs, gt_trajs, coarse_trajs, coarse_gt, path_score, closest_label, reg_criterion,
clf_criterion)
loss.backward()
optimizer.step()
train_loss_list.append(loss.item())
coarse_reg_loss_list.append(coarse_reg_loss.item())
fine_reg_loss_list.append(fine_reg_loss.item())
clf_loss_list.append(clf_loss.item())
return train_loss_list, coarse_reg_loss_list, fine_reg_loss_list, clf_loss_list
def test(args: util.Args, model, dataloader, device):
model.eval()
ade = 0
fde = 0
num_ped = 0
num_trajs = 0
for i, (trajs, masks, trees) in enumerate(zip(dataloader.trajectory_batches, dataloader.mask_batches,
dataloader.traj_tree_batches)):
trajs = torch.FloatTensor(trajs).to(device)
masks = torch.FloatTensor(masks).to(device)
trees = torch.FloatTensor(trees).to(device)
with torch.no_grad():
obs_trajs = trajs[:, :args.obs_len, :]
gt_trajs = trajs[:, args.obs_len:, :]
num_trajs += obs_trajs.shape[0]
pred_trajs, _ = model.predict(obs_trajs, trees, masks, args.num_k, device)
min_ade, min_fde = util.get_ade_fde(pred_trajs, gt_trajs, args.num_k)
ade += min_ade.item()
fde += min_fde.item()
num_ped += trajs.shape[0]
ade = ade / num_ped
fde = fde / num_ped
return ade, fde
def main():
logger.info('**** project args ******')
parser = argparse.ArgumentParser()
util.add_argument(parser)
args: util.Args = parser.parse_args()
util.init(args, logger)
device = torch.device('cuda:' + str(args.gpu_num) if torch.cuda.is_available() and args.cuda else 'cpu')
logger.info("device: {}".format(device))
run(args, device)
logger.info(f'Finished!')
if __name__ == '__main__':
main()
| 6,964 | 36.446237 | 115 | py |
SIT | SIT-master/util.py | from typing import Dict
import os
import subprocess
import random
import pickle
import torch
import numpy as np
import argparse
class Args:
dataset = None
epoch = None
lr = None
lr_scheduler = None
lr_milestones = None
lr_gamma = None
obs_len = None
pred_len = None
train_batch_size = None
test_batch_size = None
seed = None
gpu_num = None
checkpoint = None
data_dir = None
log_dir = None
cuda = None
end_centered = None
data_flip = None
data_scaling = None
# Arguments for the building of tree
split_thea = None
split_temporal_interval = None
tree_degree = None
num_k = None
class ModelArgs:
# Arguments for model
in_dim = 2
obs_len = 8
pred_len = 12
hidden1 = 1024
hidden2 = 256
enc_dim = 64
att_layer = 3
tf = True # teacher forcing
out_dim = 2
num_k = 20
def add_argument(parser):
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--dataset', type=str, default='eth', help='eth,hotel,univ,zara1,zara2,sdd')
parser.add_argument('--data_dir', type=str,
default='./dataset/')
parser.add_argument('--log_dir', type=str)
parser.add_argument('--epoch', type=int, default=350)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--lr_scheduler', type=int, default=0, help='0:MultiStepLR, 1:CosineAnnealingLR, other numbers:None')
parser.add_argument('--lr_milestones', type=int, nargs='+', default=[50, 150, 250])
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--obs_len', type=int, default=8)
parser.add_argument('--pred_len', type=int, default=12)
parser.add_argument('--train_batch_size', type=int, default=512,
help='256 or 512 for eth-ucy, 512 for sdd')
parser.add_argument('--test_batch_size', type=int, default=512,
help='256, 512 or 4096 for eth-ucy, 4096 for sdd')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--gpu_num', type=str, default='6')
parser.add_argument('--checkpoint', type=str, default='./checkpoints/')
parser.add_argument('--end_centered', action='store_true')
parser.add_argument('--data_flip', action='store_true')
parser.add_argument('--data_scaling', type=float, nargs='+', default=None)
parser.add_argument('--split_thea', type=int, default=4)
parser.add_argument('--split_temporal_interval', type=int, default=4)
parser.add_argument('--tree_degree', type=int, default=3)
parser.add_argument('--num_k', type=int, default=20)
def get_input_data(data_dict: Dict, key=None):
try:
return data_dict[key]
except KeyError:
print('KeyError')
args: Args = None
logger = None
def init(args_: Args, logger_):
global args, logger
args = args_
logger = logger_
# assert os.path.exists(args.checkpoint + args.dataset)
assert os.path.exists(args.data_dir + 'test')
assert os.path.exists(args.data_dir + 'train')
if args.log_dir is None:
args.log_dir = args.checkpoint + args.dataset
# os.makedirs(args.checkpoint + args.dataset, exist_ok=True)
# os.makedirs(args.log_dir, exist_ok=True)
if os.path.exists(args.checkpoint + args.dataset):
subprocess.check_output('rm -r {}'.format(args.checkpoint + args.dataset), shell=True, encoding='utf-8')
os.makedirs(args.checkpoint + args.dataset, exist_ok=False)
logger.info("*******" + ' args ' + "******")
# args_dict = vars(args)
# for key in args_dict:
# print("\033[32m" + key + "\033[0m", args_dict[key], end='\t')
# print('')
logging(vars(args_), verbose=True, sep=' ', save_as_pickle=True, file_type=args.dataset + '.args')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
def logging(*inputs, verbose=False, sep=' ', save_as_pickle=False, file_type='args', append_log=False):
'''
write something into log file
:return:
'''
if verbose:
print(*inputs, sep=sep)
if not hasattr(args, 'log_dir'):
return
file = os.path.join(args.log_dir, file_type)
if save_as_pickle:
with open(file, 'wb') as pickle_file:
pickle.dump(*inputs, pickle_file)
if append_log:
with open(file, "a", encoding='utf-8') as fout:
print(*tuple(inputs), file=fout, sep=sep)
print(file=fout)
def get_train_test_data(data_path, dataset_name, batch_size, is_test):
if is_test:
if dataset_name == 'sdd':
return data_path + '/test' + "/social_" + dataset_name + "_test" + "_" + str(
4096) + "_" + str(0) + "_" + str(100) + ".pickle"
else:
return data_path + '/test' + "/social_" + dataset_name + "_test" + "_" + str(
batch_size) + "_" + str(0) + "_" + str(50) + ".pickle"
else:
if dataset_name == 'sdd':
return data_path + '/train' + "/social_" + dataset_name + "_train" + "_" + str(
512) + "_" + str(0) + "_" + str(100) + ".pickle"
else:
return data_path + '/train' + "/social_" + dataset_name + "_train" + "_" + str(
batch_size) + "_" + str(0) + "_" + str(50) + ".pickle"
def data_augmentation(data_, end_centered, is_flip, data_scaling):
if end_centered:
data_ = data_ - data_[:, 7:8]
if is_flip:
data_ = np.flip(data_, axis=-1).copy()
if data_scaling is not None:
data_[:, :, 0] = data_[:, :, 0] * data_scaling[0]
data_[:, :, 1] = data_[:, :, 1] * data_scaling[1]
return data_
def get_ade_fde(pred_trajs, gt_trajs, num_k):
pred_trajs = pred_trajs.reshape(gt_trajs.shape[0], num_k, gt_trajs.shape[1], -1)
gt_trajs = gt_trajs.unsqueeze(1)
norm_ = torch.norm(pred_trajs - gt_trajs, p=2, dim=-1)
ade_ = torch.mean(norm_, dim=-1)
fde_ = norm_[:, :, -1]
min_ade, _ = torch.min(ade_, dim=-1)
min_fde, _ = torch.min(fde_, dim=-1)
min_ade = torch.sum(min_ade)
min_fde = torch.sum(min_fde)
return min_ade, min_fde
| 6,257 | 29.231884 | 125 | py |
SIT | SIT-master/model/component.py | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Activation_Fun(nn.Module):
def __init__(self, act_name):
super(Activation_Fun, self).__init__()
if act_name == 'relu':
self.act = nn.ReLU()
if act_name == 'prelu':
self.act = nn.PReLU()
if act_name == 'sigmoid':
self.act == nn.Sigmoid()
def forward(self, x):
return self.act(x)
class MLP(nn.Module):
def __init__(self, in_size, out_size=None, normalization=False, act_name='prelu'):
super(MLP, self).__init__()
if out_size is None:
out_size = in_size
self.linear = nn.Linear(in_size, out_size)
self.ln = LayerNorm(out_size) if normalization else nn.Sequential()
self.activation = Activation_Fun(act_name)
def forward(self, x):
x = self.linear(x)
x = self.ln(x)
x = self.activation(x)
return x
class LayerNorm(nn.Module):
r"""
Layer normalization.
"""
def __init__(self, hidden_size, eps=1e-5):
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class SelfAttention(nn.Module):
def __init__(self, in_size, hidden_size=256, out_size=64, non_linear=True):
super(SelfAttention, self).__init__()
self.query = nn.Sequential(
nn.Linear(in_size, hidden_size),
nn.PReLU(),
nn.Linear(hidden_size, out_size)
) if non_linear else nn.Linear(in_size, out_size)
self.key = nn.Sequential(
nn.Linear(in_size, hidden_size),
nn.PReLU(),
nn.Linear(hidden_size, out_size)
) if non_linear else nn.Linear(in_size, out_size)
self.softmax = nn.Softmax(dim=-1)
def forward(self, query, key, mask=None, interaction=True):
assert len(query.shape) == 3
query = self.query(query) # batch_size seq_len d_model
query = query / float(math.sqrt(query.shape[-1]))
key = self.key(key) # batch_size seq_len d_model
attention = torch.matmul(query, key.permute(0, 2, 1)) # (batch_size, seq_len, seq_len)
if mask is None and interaction is True:
return attention # for path scoring
if mask is not None and interaction is True:
attention = F.softmax(attention, dim=-1)
attention = attention * mask # setting the attention score of pedestrian who are not in the scene to zero
attention = F.normalize(attention, p=1, dim=-1) # normalizing the non-zero value
return attention | 2,946 | 31.384615 | 118 | py |
SIT | SIT-master/model/trajectory_model.py |
import torch
import torch.nn as nn
from model.component import MLP
from model.component import SelfAttention
from util import ModelArgs
class TrajectoryModel(nn.Module):
def __init__(self, args: ModelArgs):
super(TrajectoryModel, self).__init__()
in_dim = args.in_dim
obs_len = args.obs_len
pred_len = args.pred_len
hidden1 = args.hidden1
hidden2 = args.hidden2
enc_dim = args.enc_dim
att_layer = args.att_layer
out_dim = args.out_dim
self.obs_enc = nn.Sequential(
MLP(in_dim*obs_len, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, enc_dim)
)
# self attention for interaction
self.int_att = nn.ModuleList(
[SelfAttention(in_size=enc_dim, hidden_size=hidden2, out_size=enc_dim) for _ in range(att_layer)]
)
self.tree_enc = nn.Sequential(
MLP(in_dim*pred_len, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, enc_dim)
)
self.coarse_prediction = nn.Sequential(
MLP(enc_dim*2, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, out_dim*pred_len)
)
self.refining_enc = nn.Sequential(
MLP(in_dim*pred_len, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, enc_dim)
)
self.scoring_att = SelfAttention(in_size=enc_dim, hidden_size=hidden2, out_size=enc_dim)
self.refining = nn.Sequential(
MLP(enc_dim*2, hidden1),
MLP(hidden1, hidden1),
MLP(hidden1, hidden2),
nn.Linear(hidden2, out_dim*pred_len)
)
self.output = nn.Linear(out_dim*pred_len, out_dim*pred_len)
self.tf = args.tf
def forward(self, obs_trajs, tree, coarse_gt, closest_label, mask, device):
obs_trajs_ = obs_trajs.reshape(obs_trajs.shape[0], 1, -1) # N 1 16
tree = tree.reshape(tree.shape[0], tree.shape[1], -1) # N n 24
obs_enc = self.obs_enc(obs_trajs_) # N 1 enc_dim
obs_enc = obs_enc.permute(1, 0, 2) # 1 N enc_dim
for i in range(len(self.int_att)):
int_mat = self.int_att[i](obs_enc, obs_enc, mask)
obs_enc = obs_enc + torch.matmul(int_mat, obs_enc)
obs_enc = obs_enc.permute(1, 0, 2) # N 1 enc_dim
tree_enc = self.tree_enc(tree) # N n enc_dim
path_score = self.scoring_att(obs_enc, tree_enc).squeeze() # N n # cross attention for classification
ped_index = torch.arange(0, obs_trajs.shape[0]).to(device)
closet_branch_enc = tree_enc[ped_index, closest_label] # N enc_dim
con_enc = torch.cat((obs_enc.squeeze(), closet_branch_enc), dim=-1) # N enc_dim*2
coarse_pred_traj = self.coarse_prediction(con_enc) # N 24
if self.tf:
coarse_traj_ = coarse_gt.reshape(coarse_gt.shape) # Teacher forcing
else:
coarse_traj_ = coarse_pred_traj # without teacher forcing
coarse_traj_ = coarse_traj_.reshape(coarse_traj_.shape[0], -1)
coarse_enc = self.refining_enc(coarse_traj_)
con_coarse_enc = torch.cat((obs_enc.squeeze(), coarse_enc), dim=-1) # [N 128]
refining_traj = self.refining(con_coarse_enc)
predicted_traj = self.output(refining_traj)
return path_score, coarse_pred_traj, predicted_traj
def predict(self, obs_trajs, tree, mask, num_k, device):
obs_trajs_ = obs_trajs.reshape(obs_trajs.shape[0], 1, -1) # N 1 16
tree = tree.reshape(tree.shape[0], tree.shape[1], -1) # N n 24
obs_enc = self.obs_enc(obs_trajs_) # N 1 enc_dim
tree_enc = self.tree_enc(tree) # N n enc_dim
obs_enc = obs_enc.permute(1, 0, 2) # 1 N enc_dim
for i in range(len(self.int_att)):
int_mat = self.int_att[i](obs_enc, obs_enc, mask)
obs_enc = obs_enc + torch.matmul(int_mat, obs_enc)
obs_enc = obs_enc.permute(1, 0, 2) # N 1 enc_dim
path_score = self.scoring_att(obs_enc, tree_enc).squeeze() # N n # cross attention for classification
top_k_indices = torch.topk(path_score, k=num_k, dim=-1).indices # N num_k
top_k_indices = top_k_indices.flatten() # N*num_k
ped_indices = torch.arange(0, obs_trajs.shape[0]).unsqueeze(1).to(device) # N 1
ped_indices = ped_indices.repeat(1, num_k).flatten() # N*num_k
selected_paths_enc = tree_enc[ped_indices, top_k_indices] # N*num_k enc_dim
selected_paths_enc = selected_paths_enc.reshape(tree_enc.shape[0], num_k, -1)
obs_enc = obs_enc.repeat(1, selected_paths_enc.shape[1], 1) # N num_k enc_dim
con_enc = torch.cat((obs_enc, selected_paths_enc), dim=-1) # N num_k enc_dim*2
coarse_traj = self.coarse_prediction(con_enc) # N num_k 24
coarse_enc = self.refining_enc(coarse_traj)
con_coarse_enc = torch.cat((obs_enc, coarse_enc), dim=-1)
refining_traj = self.refining(con_coarse_enc) # N num_k enc_dim
predicted_traj = self.output(refining_traj) # N num_k 24
return predicted_traj, path_score
# sdd thea: 4 12 6
# 9.71 17.26
# 9.48 16.70
# 9.44 16.62
# 9.61 16.50
# 9.62 16.19
# 9.38 15.97
# 9.25 15.57
# 9.11 15.74
# 9.12 15.63
# 9.23 15.47
# 9.09 15.54
# eth
# 0.41 0.62
# 0.41 0.59 lr:0.001 thea:4 6 4
# hotel
# 0.15 0.29 lr:0.001 thea:4 6 4
# 0.17 0.29 lr:0.001 thea:12 6 4
# 0.18 0.28 lr:0.001 thea:12 12 12
# 0.15 0.26 flip
# 0.15 0.22 thea:12 6 4
# 0.15 0.25 thea: 4 6 4
# 0.14 0.25 thea: 4 6 4 [250]
# 0.14 0.22 thea: 6 6 4
# univ
# 0.65 1.18 bs:512 thea:4 6 4
# 0.27 0.47 bs:256 thea:4 6 4
# zara1
# 0.23 0.37 lr:0.003 thea:4 6 4
# 0.21 0.36 lr:0.001 thea:4 6 4
# 0.21 0.36
# 0.20 0.34 thea:12 6 4
# 0.19 0.33
# zara2
# 0.17 0.29 lr:0.003 thea:4 6 4
# 0.16 0.29 lr:0.001
# 0.16 0.30 12 6 4
| 6,157 | 33.022099 | 111 | py |
MCEq | MCEq-master/setup.py | import sys
from os.path import join, dirname, abspath
from setuptools import setup, Extension
from distutils.command import build_ext
def get_export_symbols(self, ext):
"""From https://bugs.python.org/issue35893"""
parts = ext.name.split(".")
# print('parts', parts)
if parts[-1] == "__init__":
initfunc_name = "PyInit_" + parts[-2]
else:
initfunc_name = "PyInit_" + parts[-1]
build_ext.build_ext.get_export_symbols = get_export_symbols
# Require pytest-runner only when running tests
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup_requires = pytest_runner
libnrlmsise00 = Extension(
'MCEq.geometry.nrlmsise00.libnrlmsise00',
sources=[
join('MCEq/geometry/nrlmsise00', sf)
for sf in ['nrlmsise-00_data.c', 'nrlmsise-00.c']
],
include_dirs=['MCEq/geometry/nrlmsise00'])
libcorsikaatm = Extension(
'MCEq.geometry.corsikaatm.libcorsikaatm',
sources=['MCEq/geometry/corsikaatm/corsikaatm.c'])
# This method is adopted from iMinuit https://github.com/scikit-hep/iminuit
# Getting the version number at this point is a bit tricky in Python:
# https://packaging.python.org/en/latest/development.html#single-sourcing-the-version-across-setup-py-and-your-project
# This is one of the recommended methods that works in Python 2 and 3:
def get_version():
version = {}
with open("MCEq/version.py") as fp:
exec (fp.read(), version)
return version['__version__']
__version__ = get_version()
this_directory = abspath(dirname(__file__))
if sys.version_info.major == 3:
with open(join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
else:
with open(join(this_directory, 'README.md')) as f:
long_description = f.read()
skip_marker = "# MCEq"
long_description = long_description[long_description.index(skip_marker) :].lstrip()
setup(
name='MCEq',
version=__version__,
description='Numerical cascade equation solver',
long_description=long_description,
long_description_content_type='text/markdown',
author='Anatoli Fedynitch',
author_email='[email protected]',
license='BSD 3-Clause License',
url='https://github.com/afedynitch/MCEq',
packages=['MCEq', 'MCEq.tests', 'MCEq.geometry',
'MCEq.geometry.nrlmsise00', 'MCEq.geometry.corsikaatm'],
setup_requires=[] + pytest_runner,
package_data={
'MCEq': ['data/README.md', "geometry/nrlmsise00/nrlmsise-00.h"],
},
install_requires=[
'six',
'h5py',
'particletools',
'crflux>1.0.4',
'scipy',
'numpy',
'tqdm',
'requests'
],
py_modules=['mceq_config'],
ext_modules=[libnrlmsise00, libcorsikaatm],
extras_require={
'MKL': ['mkl==2020.0'],
'CUDA': ['cupy-cuda112==9.2.0']
},
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Science/Research',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License'
])
| 3,622 | 31.936364 | 118 | py |
MCEq | MCEq-master/mceq_config.py | from __future__ import print_function
import sys
import platform
import os.path as path
import warnings
base_path = path.dirname(path.abspath(__file__))
#: Debug flag for verbose printing, 0 silences MCEq entirely
debug_level = 1
#: Override debug prinput for functions listed here (just give the name,
#: "get_solution" for instance) Warning, this option slows down initialization
#: by a lot. Use only when needed.
override_debug_fcn = []
#: Override debug printout for debug levels < value for the functions above
override_max_level = 10
#: Print module name in debug output
print_module = False
# =================================================================
# Paths and library locations
# =================================================================
#: Directory where the data files for the calculation are stored
data_dir = path.join(base_path, 'MCEq', 'data')
#: File name of the MCEq database
mceq_db_fname = "mceq_db_lext_dpm191_v12.h5"
#: File name of the MCEq database
em_db_fname = "mceq_db_EM_Tsai-Max_Z7.31.h5"
# =================================================================
# Atmosphere and geometry settings
# =================================================================
#: The latest versions of MCEq work in kinetic energy not total energy
#: If you want the result to be compatible with the previous choose
#: 'total energy' else 'kinetic energy'
return_as = "kinetic energy"
#: Atmospheric model in the format: (model, (arguments))
density_model = ('CORSIKA', ('BK_USStd', None))
#: density_model = ('MSIS00_IC',('SouthPole','January'))
#: density_model = ('GeneralizedTarget', None)
#: Definition of prompt: default ctau < 0.123 cm (that of D0)
prompt_ctau = 0.123
#: Average mass of target (for interaction length calculations)
#: Change parameter only in combination with interaction model setting.
#: By default all particle production matrices are calculated for air targets
#: expect those for models with '_pp' suffix. These are valid for hydrogen targets.
#: <A> = 14.6568 for air as below (source https://en.wikipedia.org/wiki/Atmosphere_of_Earth)
A_target = sum([f[0]*f[1] for f in [(0.78084, 14), (0.20946, 16), (0.00934, 40)]])
#: parameters for EarthGeometry
r_E = 6391.e3 # Earth radius in m
h_obs = 0. # observation level in m
h_atm = 112.8e3 # top of the atmosphere in m
#: Default parameters for GeneralizedTarget
#: Total length of the target [m]
len_target = 1000.
#: density of default material in g/cm^3
env_density = 0.001225
env_name = "air"
#: Approximate value for the maximum density expected. Needed for the
#: resonance approximation. Default value: air at the surface
max_density = 0.001225,
#: Material for ionization and radiation (=continuous) loss terms
#: Currently available choices: 'air', 'water', 'ice'
dedx_material = 'air'
# =================================================================
# Parameters of numerical integration
# =================================================================
#: Minimal energy for grid
#: The minimal energy (technically) is 1e-2 GeV. Currently you can run into
#: stability problems with the integrator with such low thresholds. Use with
#: care and check results for oscillations and feasibility.
e_min = .1
#: The maximal energy is 1e12 GeV, but not all interaction models run at such
#: high energies. If you are interested in lower energies, reduce this value
#: for inclusive calculations to max. energy of interest + 4-5 orders of
#: magnitude. For single primaries the maximal energy is directly limited by
#: this value. Smaller grids speed up the initialization and integration.
e_max = 1e11
#: Enable electromagnetic cascade with matrices from EmCA
enable_em = False
#: Selection of integrator (euler/odepack)
integrator = "euler"
#: euler kernel implementation (numpy/MKL/CUDA).
#: With serious nVidia GPUs CUDA a few times faster than MKL
#: autodetection of fastest kernel below
kernel_config = "numpy"
#: Select CUDA device ID if you have multiple GPUs
cuda_gpu_id = 0
#: CUDA Floating point precision (default 32-bit 'float')
cuda_fp_precision = 32
#: Number of MKL threads (for sparse matrix multiplication the performance
#: advantage from using more than a few threads is limited by memory bandwidth)
#: Irrelevant for GPU integrators, but can affect initialization speed if
#: numpy is linked to MKL.
mkl_threads = 8
#: parameters for the odepack integrator. More details at
#: http://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html#scipy.integrate.ode
ode_params = {
'name': 'lsoda',
'method': 'bdf',
'nsteps': 1000,
# 'max_step': 10.0,
'rtol': 0.01
}
# =========================================================================
# Advanced settings
# =========================================================================
#: The leading process is can be either "decays" or "interactions". This depends
#: on the target density and it is usually chosen automatically. For
#: advanced applications one can force "interactions" to be the dominant
#: process. Essentially this affects how the adaptive step size is computed.
#: There is also the choice of "auto" that takes both processes into account
leading_process = "auto"
#: Stability margin for the integrator. The default 0.95 means that step
#: sizes are chosen 5% away from the stability circle. Usually no need to
#: change, except you know what it does.
stability_margin = 0.95
#: Ratio of decay_length/interaction_length where particle interactions
#: are neglected and the resonance approximation is used
#: 0.5 ~ precision loss <+3% speed gain ~ factor 10
#: If smoothness and shape accuracy for prompt flux is crucial, use smaller
#: values around 0.1 or 0.05
hybrid_crossover = 0.5
#: Maximal integration step dX in g/cm2. No limit necessary in most cases,
#: use for debugging purposes when searching for stability issues.
dXmax = 10.
#: Enable default tracking particles, such as pi_numu, pr_mu+, etc.
#: If only total fluxes are of interest, disable this feature to gain
#: performance since the eqution system becomes smaller and sparser
enable_default_tracking = True
#: Muon energy loss according to Kokoulin et al.
enable_muon_energy_loss = True
#: enable EM ionization loss
enable_em_ion = False
#: Improve (explicit solver) stability by averaging the continous loss
#: operator
average_loss_operator = True
#: Step size (dX) for averaging
loss_step_for_average = 1e-1
#: Raise exception when requesting unknown particles from get_solution
excpt_on_missing_particle = False
#: When using modified particle production matrices use
#: isospin symmetries to determine the corresponding
#: modification for neutrons and K0L/K0S
use_isospin_sym = True
#: Helicity dependent muons decays from analytical expressions
muon_helicity_dependence = True
#: Assume nucleon, pion and kaon cross sections for interactions of
#: rare or exotic particles (mostly relevant for non-compact mode)
assume_nucleon_interactions_for_exotics = True
#: This is not used in the code as before, instead the low energy
#: extension is compiled into the HDF backend files.
low_energy_extension = {
"he_le_transition": 80, # GeV
"nbins_interp": 3,
"use_unknown_cs": True,
}
#: Advanced settings (some options might be obsolete/not working)
adv_set = {
#: Disable particle production by all hadrons, except nucleons
"disable_interactions_of_unstable": False,
#: Disable particle production by charm *projectiles* (interactions)
"disable_charm_pprod": False,
#: Disable resonance/prompt contribution (this group of options
#: is either obsolete or needs maintenance.)
#: "disable_resonance_decay" : False,
#: Allow only those particles to be projectiles (incl. anti-particles)
#: Faster initialization,
#: For inclusive lepton flux computations:
#: precision loss ~ 1%, for SIBYLL2.3.X with charm 5% above 10^7 GeV
#: Might be different for yields (set_single_primary_particle)
#: For full precision or if in doubt, use []
"allowed_projectiles": [], # [2212, 2112, 211, 321, 130, 11, 22],
#: Disable particle (production)
"disabled_particles": [], #20, 19, 18, 17, 97, 98, 99, 101, 102, 103
#: Disable leptons coming from prompt hadron decays at the vertex
"disable_direct_leptons": False,
#: Difficult to explain parameter
'disable_leading_mesons': False,
#: Do not apply mixing to these particles
"exclude_from_mixing": [13],
#: Switch off decays. E.g., disable muon decay with [13,-13]
"disable_decays": [],
#: Force particles to be treated as resonance
"force_resonance": [],
#: Disable mixing between resonance approx. and full propagation
"no_mixing": False
}
#: Particles for compact mode
standard_particles = [
11, 12, 13, 14, 16, 211, 321, 2212, 2112, 3122, 411, 421, 431
]
#: Anti-particles
standard_particles += [-pid for pid in standard_particles]
#: unflavored particles
#: append 221, 223, 333, if eta, omega and phi needed directly
standard_particles += [22, 111, 130, 310] #: , 221, 223, 333]
#: This construct provides access to the attributes as in previous
#: versions, using `from mceq_config import config`. The future versions
#: will access the module attributes directly.
#: Autodetect best solver
#: determine shared library extension and MKL path
pf = platform.platform()
if 'Linux' in pf:
mkl_path = path.join(sys.prefix, 'lib', 'libmkl_rt.so')
elif 'Darwin' in pf:
mkl_path = path.join(sys.prefix, 'lib', 'libmkl_rt.dylib')
else:
# Windows case
mkl_path = path.join(sys.prefix, 'Library', 'bin', 'mkl_rt.dll')
# mkl library handler
mkl = None
# Check if MKL library found
if path.isfile(mkl_path):
has_mkl = True
else:
has_mkl = False
# Look for cupy module
try:
import cupy
has_cuda = True
except ImportError:
has_cuda = False
# CUDA is usually fastest, then MKL. Fallback to numpy.
if has_cuda:
kernel_config = 'CUDA'
elif has_mkl:
kernel_config = 'MKL'
else:
kernel_config = 'numpy'
if debug_level >= 2:
print('Auto-detected {0} solver.'.format(kernel_config))
def set_mkl_threads(nthreads):
global mkl_threads, mkl
from ctypes import cdll, c_int, byref
mkl = cdll.LoadLibrary(mkl_path)
# Set number of threads
mkl_threads = nthreads
mkl.mkl_set_num_threads(byref(c_int(nthreads)))
if debug_level >= 5:
print('MKL threads limited to {0}'.format(nthreads))
if has_mkl:
set_mkl_threads(mkl_threads)
# Compatibility layer for dictionary access to config attributes
# This is deprecated and will be removed in future
class MCEqConfigCompatibility(dict):
"""This class provides access to the attributes of the module as a
dictionary, as it was in the previous versions of MCEq
This method is deprecated and will be removed in future.
"""
def __init__(self, namespace):
self.__dict__.update(namespace)
if debug_level > 1:
warn_str = ("Config dictionary is deprecated. " +
"Use config.variable instead of config['variable']")
warnings.warn(warn_str, FutureWarning)
def __setitem__(self, key, value):
key = key.lower()
if key not in self.__dict__:
raise Exception('Unknown config key', key)
return super(MCEqConfigCompatibility, self).__setitem__(key, value)
config = MCEqConfigCompatibility(globals())
class FileIntegrityCheck:
"""
A class to check a file integrity against provided checksum
Attributes
----------
filename : str
path to the file
checksum : str
hex of sha256 checksum
Methods
-------
is_passed():
returns True if checksum and calculated checksum of the file are equal
get_file_checksum():
returns checksum of the file
"""
import hashlib
def __init__(self, filename, checksum = ''):
self.filename = filename
self.checksum = checksum
self.sha256_hash = self.hashlib.sha256()
self.hash_is_calculated = False
def _calculate_hash(self):
if not self.hash_is_calculated:
try:
with open(self.filename, "rb") as file:
for byte_block in iter(lambda: file.read(4096),b""):
self.sha256_hash.update(byte_block)
self.hash_is_calculated = True
except EnvironmentError as ex:
print("FileIntegrityCheck: {0}".format(ex))
def is_passed(self):
self._calculate_hash()
return (self.hash_is_calculated and self.sha256_hash.hexdigest() == self.checksum)
def get_file_checksum(self):
self._calculate_hash()
return self.sha256_hash.hexdigest()
def _download_file(url, outfile):
"""Downloads the MCEq database from github"""
from tqdm import tqdm
import requests
import math
# Streaming, so we can iterate over the response.
r = requests.get(url, stream=True)
# Total size in bytes.
total_size = int(r.headers.get('content-length', 0))
block_size = 1024 * 1024
wrote = 0
with open(outfile, 'wb') as f:
for data in tqdm(r.iter_content(block_size), total=math.ceil(total_size // block_size),
unit='MB', unit_scale=True):
wrote = wrote + len(data)
f.write(data)
if total_size != 0 and wrote != total_size:
raise Exception("ERROR, something went wrong")
# Download database file from github
base_url = 'https://github.com/afedynitch/MCEq/releases/download/'
release_tag = 'builds_on_azure/'
url = base_url + release_tag + mceq_db_fname
# sha256 checksum of the file
# https://github.com/afedynitch/MCEq/releases/download/builds_on_azure/mceq_db_lext_dpm191_v12.h5
file_checksum="6353f661605a0b85c3db32e8fd259f68433392b35baef05fd5f0949b46f9c484"
filepath_to_database = path.join(data_dir, mceq_db_fname)
if path.isfile(filepath_to_database):
is_file_complete = FileIntegrityCheck(filepath_to_database, file_checksum).is_passed()
else:
is_file_complete = False
if not is_file_complete:
print('Downloading for mceq database file {0}.'.format(mceq_db_fname))
if debug_level >= 2:
print(url)
_download_file(url, filepath_to_database)
old_database = 'mceq_db_lext_dpm191.h5'
filepath_to_old_database = path.join(data_dir, old_database)
if path.isfile(filepath_to_old_database):
import os
print('Removing previous database {0}.'.format(old_database))
os.unlink(filepath_to_old_database)
| 14,601 | 33.601896 | 99 | py |
MCEq | MCEq-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# Matrix Cascade Equation (MCEq) documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 21 10:13:38 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..')
sys.path.append(base_path)
import MCEq.version
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.autosummary'
]
intersphinx_mapping = {
'python': ('http://docs.python.org/2.7', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'crflux': ('http://crfluxmodels.readthedocs.org/en/latest', None),
'particletools': ('http://particledatatool.readthedocs.org/en/latest',
None)
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MCEq'
copyright = '2019, Anatoli Fedynitch'
autodoc_member_order = 'groupwise'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = MCEq.version.__version__
# The full version, including alpha/beta/rc tags.
release = MCEq.version.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
]
}
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MatrixCascadeEquationMCEqdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MatrixCascadeEquationMCEq.tex',
'Matrix Cascade Equation (MCEq) Documentation', 'Anatoli Fedynitch',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', 'matrixcascadeequationmceq',
'Matrix Cascade Equation (MCEq) Documentation',
['Anatoli Fedynitch'], 1)]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'MatrixCascadeEquationMCEq',
'Matrix Cascade Equation (MCEq) Documentation', 'Anatoli Fedynitch',
'MatrixCascadeEquationMCEq', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 9,553 | 31.386441 | 83 | py |
MCEq | MCEq-master/MCEq/core.py | import os
import six
from time import time
import numpy as np
import mceq_config as config
from MCEq.misc import normalize_hadronic_model_name, info
from MCEq.particlemanager import ParticleManager
import MCEq.data
class MCEqRun(object):
"""Main class for handling the calculation.
This class is the main user interface for the caclulation. It will
handle initialization and various error/configuration checks. The
setup has to be accomplished before invoking the integration routine
is :func:`MCeqRun.solve`. Changes of configuration, such as:
- interaction model in :meth:`MCEqRun.set_interaction_model`,
- primary flux in :func:`MCEqRun.set_primary_model`,
- zenith angle in :func:`MCEqRun.set_theta_deg`,
- density profile in :func:`MCEqRun.set_density_model`,
- member particles of the special ``obs_`` group in :func:`MCEqRun.set_obs_particles`,
can be made on an active instance of this class, while calling
:func:`MCEqRun.solve` subsequently to calculate the solution
corresponding to the settings.
The result can be retrieved by calling :func:`MCEqRun.get_solution`.
Args:
interaction_model (string): PDG ID of the particle
density_model (string,sting,string): model type, location, season
primary_model (class, param_tuple): classes derived from
:class:`crflux.models.PrimaryFlux` and its parameters as tuple
theta_deg (float): zenith angle :math:`\\theta` in degrees,
measured positively from vertical direction
adv_set (dict): advanced settings, see :mod:`mceq_config`
obs_ids (list): list of particle name strings. Those lepton decay
products will be scored in the special ``obs_`` categories
"""
def __init__(self, interaction_model, primary_model, theta_deg, **kwargs):
self._mceq_db = MCEq.data.HDF5Backend()
interaction_model = normalize_hadronic_model_name(interaction_model)
# Save atmospheric parameters
self.density_config = kwargs.pop('density_model', config.density_model)
self.theta_deg = theta_deg
#: Interface to interaction tables of the HDF5 database
self._interactions = MCEq.data.Interactions(mceq_hdf_db=self._mceq_db)
#: handler for cross-section data of type :class:`MCEq.data.HadAirCrossSections`
self._int_cs = MCEq.data.InteractionCrossSections(
mceq_hdf_db=self._mceq_db)
#: handler for cross-section data of type :class:`MCEq.data.HadAirCrossSections`
self._cont_losses = MCEq.data.ContinuousLosses(mceq_hdf_db=self._mceq_db,
material=config.dedx_material)
#: Interface to decay tables of the HDF5 database
self._decays = MCEq.data.Decays(mceq_hdf_db=self._mceq_db)
#: Particle manager (initialized/updated in set_interaction_model)
self.pman = None
# Particle list to keep track of previously initialized particles
self._particle_list = None
# General Matrix dimensions and shortcuts, controlled by
# grid of yield matrices
self._energy_grid = self._mceq_db.energy_grid
# Initialize solution vector
self._solution = np.zeros(1)
# Initialize empty state (particle density) vector
self._phi0 = np.zeros(1)
# Initialize matrix builder (initialized in set_interaction_model)
self.matrix_builder = None
# Save initial condition (primary flux) to restore after dimensional resizing
self._restore_initial_condition = []
# Set interaction model and compute grids and matrices
self.set_interaction_model(
interaction_model,
particle_list = kwargs.pop('particle_list', None),
build_matrices = kwargs.pop('build_matrices', True)
)
# Default GPU device id for CUDA
self._cuda_device = kwargs.pop('cuda_gpu_id', config.cuda_gpu_id)
# Print particle list after tracking particles have been initialized
self.pman.print_particle_tables(2)
# Set atmosphere and geometry
self.integration_path, self.int_grid, self.grid_var = None, None, None
self.set_density_model(self.density_config)
# Set initial flux condition
if primary_model is not None:
self.set_primary_model(*primary_model)
@property
def e_grid(self):
"""Energy grid (bin centers)"""
return self._energy_grid.c
@property
def e_bins(self):
"""Energy grid (bin edges)"""
return self._energy_grid.b
@property
def e_widths(self):
"""Energy grid (bin widths)"""
return self._energy_grid.w
@property
def dim(self):
"""Energy grid (dimension)"""
return self._energy_grid.d
@property
def dim_states(self):
"""Number of cascade particles times dimension of grid
(dimension of the equation system)"""
return self.pman.dim_states
def closest_energy(self, kin_energy):
"""Convenience function to obtain the nearest grid energy
to the `energy` argument, provided as kinetik energy in lab. frame."""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
return self._energy_grid.c[eidx]
def get_solution(self,
particle_name,
mag=0.,
grid_idx=None,
integrate=False,
return_as=config.return_as):
"""Retrieves solution of the calculation on the energy grid.
Some special prefixes are accepted for lepton names:
- the total flux of muons, muon neutrinos etc. from all sources/mothers
can be retrieved by the prefix ``total_``, i.e. ``total_numu``
- the conventional flux of muons, muon neutrinos etc. from all sources
can be retrieved by the prefix ``conv_``, i.e. ``conv_numu``
- correspondigly, the flux of leptons which originated from the decay
of a charged pion carries the prefix ``pi_`` and from a kaon ``k_``
- conventional leptons originating neither from pion nor from kaon
decay are collected in a category without any prefix, e.g. ``numu`` or
``mu+``
Args:
particle_name (str): The name of the particle such, e.g.
``total_mu+`` for the total flux spectrum of positive muons or
``pr_antinumu`` for the flux spectrum of prompt anti muon neutrinos
mag (float, optional): 'magnification factor': the solution is
multiplied by ``sol`` :math:`= \\Phi \\cdot E^{mag}`
grid_idx (int, optional): if the integrator has been configured to save
intermediate solutions on a depth grid, then ``grid_idx`` specifies
the index of the depth grid for which the solution is retrieved. If
not specified the flux at the surface is returned
integrate (bool, optional): return averge particle number instead of
flux (multiply by bin width)
Returns:
(numpy.array): flux of particles on energy grid :attr:`e_grid`
"""
res = np.zeros(self._energy_grid.d)
ref = self.pman.pname2pref
sol = None
if grid_idx is not None and len(self.grid_sol) == 0:
raise Exception(
'Solution not has not been computed on grid. Check input.')
if grid_idx is None:
sol = np.copy(self._solution)
elif grid_idx >= len(self.grid_sol) or grid_idx is None:
sol = self.grid_sol[-1, :]
else:
sol = self.grid_sol[grid_idx, :]
def sum_lr(lep_str, prefix):
result = np.zeros(self.dim)
nsuccess = 0
for ls in lep_str, lep_str + '_l', lep_str + '_r':
if prefix + ls not in ref:
info(
15, 'No separate left and right handed particles,',
'or, unavailable particle prefix {0}.'.format(prefix +
ls))
continue
result += sol[ref[prefix + ls].lidx:ref[prefix + ls].uidx]
nsuccess += 1
if nsuccess == 0 and config.excpt_on_missing_particle:
raise Exception(
'Requested particle {0} not found.'.format(particle_name))
return result
lep_str = particle_name.split(
'_')[1] if '_' in particle_name else particle_name
if particle_name.startswith('total_'):
# Note: This has changed from previous MCEq versions,
# since pi_ and k_ prefixes are mere tracking counters
# and no full particle species anymore
res = sum_lr(lep_str, prefix='')
elif particle_name.startswith('conv_'):
# Note: This changed from previous MCEq versions,
# conventional is defined as total - prompt
res = (self.get_solution('total_' + lep_str,
mag=0,
grid_idx=grid_idx,
integrate=False,
return_as='kinetic energy') -
self.get_solution('pr_' + lep_str,
mag=0,
grid_idx=grid_idx,
integrate=False,
return_as='kinetic energy'))
elif particle_name.startswith('pr_'):
if 'prcas_' + lep_str in ref:
res += sum_lr(lep_str, prefix='prcas_')
if 'prres_' + lep_str in ref:
res += sum_lr(lep_str, prefix='prres_')
if 'em_' + lep_str in ref:
res += sum_lr(lep_str, prefix='em_')
else:
try:
res = sum_lr(particle_name, prefix='')
except KeyError:
info(10,
'Requested particle {0} not found.'.format(particle_name))
# When returning in Etot, interpolate on different grid
if return_as == 'total energy':
etot_bins = self.e_bins + ref[lep_str].mass
etot_grid = np.sqrt(etot_bins[1:] * etot_bins[:-1])
if not integrate:
return etot_grid, res * etot_grid**mag
else:
return etot_grid, res * etot_grid**mag * (etot_bins[1:] -
etot_bins[:-1])
elif return_as == 'kinetic energy':
if not integrate:
return res * self._energy_grid.c**mag
else:
return res * self._energy_grid.c**mag * self._energy_grid.w
elif return_as == 'total momentum':
ptot_bins = np.sqrt((self.e_bins + ref[lep_str].mass)**2 -
ref[lep_str].mass**2)
ptot_grid = np.sqrt(ptot_bins[1:] * ptot_bins[:-1])
dEkindp = ptot_grid / np.sqrt(ptot_grid**2 + ref[lep_str].mass**2)
res *= dEkindp
if not integrate:
return ptot_grid, res * ptot_grid**mag
else:
return ptot_grid, res * ptot_grid**mag * (ptot_bins[1:] -
ptot_bins[:-1])
else:
raise Exception(
"Unknown 'return_as' variable choice.",
'the options are "kinetic energy", "total energy", "total momentum"'
)
def set_interaction_model(self,
interaction_model,
particle_list=None,
update_particle_list=True,
force=False,
build_matrices=True):
"""Sets interaction model and/or an external charm model for calculation.
Decay and interaction matrix will be regenerated automatically
after performing this call.
Args:
interaction_model (str): name of interaction model
charm_model (str, optional): name of charm model
force (bool): force loading interaction model
"""
interaction_model = normalize_hadronic_model_name(interaction_model)
info(1, interaction_model)
if not force and (self._interactions.iam == interaction_model
) and particle_list != self._particle_list:
info(2, 'Skip, since current model identical to',
interaction_model + '.')
return
self._int_cs.load(interaction_model)
# TODO: simplify this, stuff not needed anymore
if not update_particle_list and self._particle_list is not None:
info(10, 'Re-using particle list.')
self._interactions.load(interaction_model,
parent_list=self._particle_list)
self.pman.set_interaction_model(self._int_cs, self._interactions)
self.pman.set_decay_channels(self._decays)
self.pman.set_continuous_losses(self._cont_losses)
elif self._particle_list is None:
info(10, 'New initialization of particle list.')
# First initialization
if particle_list is None:
self._interactions.load(interaction_model)
else:
self._interactions.load(interaction_model,
parent_list=particle_list)
self._decays.load(parent_list=self._interactions.particles)
self._particle_list = self._interactions.particles + self._decays.particles
# Create particle database
self.pman = ParticleManager(self._particle_list, self._energy_grid,
self._int_cs)
self.pman.set_interaction_model(self._int_cs, self._interactions)
self.pman.set_decay_channels(self._decays)
self.pman.set_continuous_losses(self._cont_losses)
self.matrix_builder = MatrixBuilder(self.pman)
elif (update_particle_list and particle_list != self._particle_list):
info(10, 'Updating particle list.')
# Updated particle list received
if particle_list is None:
self._interactions.load(interaction_model)
else:
self._interactions.load(interaction_model,
parent_list=particle_list)
self._decays.load(parent_list=self._interactions.particles)
self._particle_list = self._interactions.particles + self._decays.particles
self.pman.set_interaction_model(
self._int_cs,
self._interactions,
updated_parent_list=self._particle_list)
self.pman.set_decay_channels(self._decays)
self.pman.set_continuous_losses(self._cont_losses)
else:
raise Exception('Should not happen in practice.')
self._resize_vectors_and_restore()
# initialize matrices
if not build_matrices:
return
self.int_m, self.dec_m = self.matrix_builder.construct_matrices(
skip_decay_matrix=False)
def _resize_vectors_and_restore(self):
"""Update solution and grid vectors if the number of particle species
or the interaction models change. The previous state, such as the
initial spectrum, are restored."""
# Update dimensions if particle dimensions changed
self._phi0 = np.zeros(self.dim_states)
self._solution = np.zeros(self.dim_states)
# Restore insital condition if present
if len(self._restore_initial_condition) > 0:
for con in self._restore_initial_condition:
con[0](*con[1:])
def set_primary_model(self, mclass, tag):
"""Sets primary flux model.
This functions is quick and does not require re-generation of
matrices.
Args:
interaction_model (:class:`CRFluxModel.PrimaryFlux`): reference
to primary model **class**
tag (tuple): positional argument list for model class
"""
info(1, mclass.__name__, tag if tag is not None else '')
# Save primary flux model for restauration after interaction model changes
self._restore_initial_condition = [
(self.set_primary_model, mclass, tag)]
# Initialize primary model object
self.pmodel = mclass(tag)
self.get_nucleon_spectrum = np.vectorize(self.pmodel.p_and_n_flux)
try:
self.dim_states
except AttributeError:
self.finalize_pmodel = True
# Save initial condition
minimal_energy = 3.
if (2212, 0) in self.pman:
e_tot = self._energy_grid.c + self.pman[(2212, 0)].mass
else:
info(
10,
'No protons in eqn system, quering primary flux with kinetic energy.'
)
e_tot = self._energy_grid.c
min_idx = np.argmin(np.abs(e_tot - minimal_energy))
self._phi0 *= 0
p_top, n_top = self.get_nucleon_spectrum(e_tot[min_idx:])[1:]
if (2212, 0) in self.pman:
self._phi0[min_idx + self.pman[(2212, 0)].lidx:self.pman[(
2212, 0)].uidx] = 1e-4 * p_top
else:
info(
1,
'Warning protons not part of equation system, can not set primary flux.'
)
if (2112, 0) in self.pman and not self.pman[(2112, 0)].is_resonance:
self._phi0[min_idx + self.pman[(2112, 0)].lidx:self.pman[(
2112, 0)].uidx] = 1e-4 * n_top
elif (2212, 0) in self.pman:
info(2, 'Neutrons not part of equation system,',
'substituting initial flux with protons.')
self._phi0[min_idx + self.pman[(2212, 0)].lidx:self.pman[(
2212, 0)].uidx] += 1e-4 * n_top
def set_single_primary_particle(self, E, corsika_id=None, pdg_id=None, append=False):
"""Set type and kinetic energy of a single primary nucleus to
calculation of particle yields.
The functions uses the superposition theorem, where the flux of
a nucleus with mass A and charge Z is modeled by using Z protons
and A-Z neutrons at energy :math:`E_{nucleon}= E_{nucleus} / A`
The nucleus type is defined via :math:`\\text{CORSIKA ID} = A*100 + Z`. For
example iron has the CORSIKA ID 5226.
Single leptons or hadrons can be defined by specifiying `pdg_id` instead of
`corsika_id`.
The `append` argument can be used to compose an initial state with
multiple particles. If it is `False` the initial condition is reset to zero
before adding the particle.
A continuous input energy range is allowed between
:math:`50*A~ \\text{GeV} < E_\\text{nucleus} < 10^{10}*A \\text{GeV}`.
Args:
E (float): kinetic energy of a nucleus in GeV
corsika_id (int): ID of a nucleus (see text)
pdg_id (int): PDG ID of a particle
append (bool): If True, keep previous state and append a new particle.
"""
import warnings
from scipy.linalg import solve
from MCEq.misc import getAZN_corsika, getAZN
if corsika_id and pdg_id:
raise Exception('Provide either corsika or PDG ID')
info(
2, 'CORSIKA ID {0}, PDG ID {1}, energy {2:5.3g} GeV'.format(
corsika_id, pdg_id, E))
if append == False:
self._restore_initial_condition = [(self.set_single_primary_particle, E,
corsika_id, pdg_id)]
self._phi0 *= 0.
else:
self._restore_initial_condition.append((self.set_single_primary_particle, E,
corsika_id, pdg_id))
egrid = self._energy_grid.c
ebins = self._energy_grid.b
ewidths = self._energy_grid.w
if corsika_id:
n_nucleons, n_protons, n_neutrons = getAZN_corsika(corsika_id)
elif pdg_id:
n_nucleons, n_protons, n_neutrons = getAZN(pdg_id)
En = E / float(n_nucleons) if n_nucleons > 0 else E
if En < np.min(self._energy_grid.c):
raise Exception('energy per nucleon too low for primary ' +
str(corsika_id))
info(3, ('superposition: n_protons={0}, n_neutrons={1}, ' +
'energy per nucleon={2:5.3g} GeV').format(
n_protons, n_neutrons, En))
cenbin = np.argwhere(En < ebins)[0][0] - 1
# Equalize the first three moments for 3 normalizations around the central
# bin
emat = np.vstack(
(ewidths[cenbin - 1:cenbin + 2],
ewidths[cenbin - 1:cenbin + 2] * egrid[cenbin - 1:cenbin + 2],
ewidths[cenbin - 1:cenbin + 2] * egrid[cenbin - 1:cenbin + 2]**2))
if n_nucleons == 0:
# This case handles other exotic projectiles
b_particle = np.array([1., En, En**2])
lidx = self.pman[pdg_id].lidx
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._phi0[lidx + cenbin - 1:lidx + cenbin + 2] += solve(
emat, b_particle)
return
if n_protons > 0:
b_protons = np.array(
[n_protons, En * n_protons, En**2 * n_protons])
p_lidx = self.pman[2212].lidx
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._phi0[p_lidx + cenbin - 1:p_lidx + cenbin + 2] += solve(
emat, b_protons)
if n_neutrons > 0:
b_neutrons = np.array(
[n_neutrons, En * n_neutrons, En**2 * n_neutrons])
n_lidx = self.pman[2112].lidx
with warnings.catch_warnings():
warnings.simplefilter('ignore')
self._phi0[n_lidx + cenbin - 1:n_lidx + cenbin + 2] += solve(
emat, b_neutrons)
def set_initial_spectrum(self, spectrum, pdg_id=None, append=False):
"""Set a user-defined spectrum for an arbitrary species as initial condition.
This function is an equivalent to :func:`set_single_primary_particle`. It
allows to define an arbitrary spectrum for each available particle species
as initial condition for the integration. Set the `append` argument to `True`
for subsequent species to define initial spectra combined from different particles.
The (differential) spectrum has to be distributed on the energy grid as dN/dptot, i.e.
divided by the bin widths and with the total momentum units in GeV(/c).
Args:
spectrum (np.array): spectrum dN/dptot
pdg_id (int): PDG ID in case of a particle
"""
from MCEq.misc import getAZN_corsika, getAZN
info(2, 'PDG ID {0}'.format(pdg_id))
if not append:
self._restore_initial_condition = [(self.set_initial_spectrum,
pdg_id, append)]
self._phi0 *= 0
else:
self._restore_initial_condition.append((self.set_initial_spectrum,
pdg_id, append))
egrid = self._energy_grid.c
ebins = self._energy_grid.b
ewidths = self._energy_grid.w
if len(spectrum) != self.dim:
raise Exception(
'Lengths of spectrum and energy grid do not match.')
self._phi0[self.pman[pdg_id].lidx:self.pman[pdg_id].uidx] += spectrum
def set_density_model(self, density_config):
"""Sets model of the atmosphere.
To choose, for example, a CORSIKA parametrization for the Southpole in January,
do the following::
mceq_instance.set_density_model(('CORSIKA', ('PL_SouthPole', 'January')))
More details about the choices can be found in :mod:`MCEq.geometry.density_profiles`. Calling
this method will issue a recalculation of the interpolation and the integration path.
From version 1.2 and above, the `density_config` parameter can be a reference to
an instance of a density class directly. The class has to be derived either from
:class:`MCEq.geometry.density_profiles.EarthsAtmosphere` or
:class:`MCEq.geometry.density_profiles.GeneralizedTarget`.
Args:
density_config (tuple of strings): (parametrization type, arguments)
"""
import MCEq.geometry.density_profiles as dprof
# Check if string arguments or an instance of the density class is provided
if not isinstance(density_config, (dprof.EarthsAtmosphere, dprof.GeneralizedTarget)):
base_model, model_config = density_config
available_models = [
'MSIS00', 'MSIS00_IC', 'CORSIKA', 'AIRS', 'Isothermal',
'GeneralizedTarget'
]
if base_model not in available_models:
info(0, 'Unknown density model. Available choices are:\n',
'\n'.join(available_models))
raise Exception('Choose a different profile.')
info(1, 'Setting density profile to', base_model, model_config)
if base_model == 'MSIS00':
self.density_model = dprof.MSIS00Atmosphere(*model_config)
elif base_model == 'MSIS00_IC':
self.density_model = dprof.MSIS00IceCubeCentered(*model_config)
elif base_model == 'CORSIKA':
self.density_model = dprof.CorsikaAtmosphere(*model_config)
elif base_model == 'AIRS':
self.density_model = dprof.AIRSAtmosphere(*model_config)
elif base_model == 'Isothermal':
self.density_model = dprof.IsothermalAtmosphere(*model_config)
elif base_model == 'GeneralizedTarget':
self.density_model = dprof.GeneralizedTarget()
else:
raise Exception('Unknown atmospheric base model.')
self.density_config = density_config
else:
self.density_model = density_config
self.density_config = density_config
if self.theta_deg is not None and isinstance(self.density_model, dprof.EarthsAtmosphere):
self.set_theta_deg(self.theta_deg)
elif isinstance(self.density_model, dprof.GeneralizedTarget):
self.integration_path = None
else:
raise Exception('Density model not supported.')
# TODO: Make the pman aware of that density might have changed and
# indices as well
# self.pmod._gen_list_of_particles()
def set_theta_deg(self, theta_deg):
"""Sets zenith angle :math:`\\theta` as seen from a detector.
Currently only 'down-going' angles (0-90 degrees) are supported.
Args:
theta_deg (float): zenith angle in the range 0-90 degrees
"""
import MCEq.geometry.density_profiles as dprof
info(2, 'Zenith angle {0:6.2f}'.format(theta_deg))
if isinstance(self.density_model, dprof.GeneralizedTarget):
raise Exception('GeneralizedTarget does not support angles.')
if self.density_model.theta_deg == theta_deg:
info(2,
'Theta selection correponds to cached value, skipping calc.')
return
self.density_model.set_theta(theta_deg)
self.integration_path = None
def set_mod_pprod(self,
prim_pdg,
sec_pdg,
x_func,
x_func_args,
delay_init=False):
"""Sets combination of projectile/secondary for error propagation.
The production spectrum of ``sec_pdg`` in interactions of
``prim_pdg`` is modified according to the function passed to
:func:`InteractionYields.init_mod_matrix`
Args:
prim_pdg (int): interacting (primary) particle PDG ID
sec_pdg (int): secondary particle PDG ID
x_func (object): reference to function
x_func_args (tuple): arguments passed to ``x_func``
delay_init (bool): Prevent init of mceq matrices if you are
planning to add more modifications
"""
info(
1, '{0}/{1}, {2}, {3}'.format(prim_pdg, sec_pdg, x_func.__name__,
str(x_func_args)))
init = self._interactions._set_mod_pprod(prim_pdg, sec_pdg, x_func,
x_func_args)
# Need to regenerate matrices completely
return int(init)
def unset_mod_pprod(self, dont_fill=False):
"""Removes modifications from :func:`MCEqRun.set_mod_pprod`.
Args:
skip_fill (bool): If `true` do not regenerate matrices
(has to be done at a later step by hand)
"""
from collections import defaultdict
info(1, 'Particle production modifications reset to defaults.')
self._interactions.mod_pprod = defaultdict(lambda: {})
# Need to regenerate matrices completely
if not dont_fill:
self.regenerate_matrices()
def regenerate_matrices(self, skip_decay_matrix=False):
"""Call this function after applying particle prod. modifications aka
Barr parameters"""
# TODO: Not all particles need to be reset and there is some performance loss
# This can be optmized by refreshing only the particles that change or through
# lazy evaluation, i.e. hadronic channels dict. calls data.int..get_matrix on demand
self.pman.set_interaction_model(self._int_cs,
self._interactions,
force=True)
self.int_m, self.dec_m = self.matrix_builder.construct_matrices(
skip_decay_matrix=skip_decay_matrix)
def solve(self, int_grid=None, grid_var='X', **kwargs):
"""Launches the solver.
The setting `integrator` in the config file decides which solver
to launch.
Args:
int_grid (list): list of depths at which results are recorded
grid_var (str): Can be depth `X` or something else (currently only `X` supported)
kwargs (dict): Arguments are passed directly to the solver methods.
"""
info(2, "Launching {0} solver".format(config.integrator))
if not kwargs.pop('skip_integration_path', False):
if int_grid is not None and np.any(np.diff(int_grid) < 0):
raise Exception('The X values in int_grid are required to be strickly',
'increasing.')
# Calculate integration path if not yet happened
self._calculate_integration_path(int_grid, grid_var)
else:
info(2,'Warning: integration path calculation skipped.')
phi0 = np.copy(self._phi0)
nsteps, dX, rho_inv, grid_idcs = self.integration_path
info(2, 'for {0} integration steps.'.format(nsteps))
import MCEq.solvers
start = time()
if config.kernel_config.lower() == 'numpy':
kernel = MCEq.solvers.solv_numpy
args = (nsteps, dX, rho_inv, self.int_m, self.dec_m, phi0,
grid_idcs)
elif (config.kernel_config.lower() == 'cuda'):
kernel = MCEq.solvers.solv_CUDA_sparse
try:
self.cuda_context.set_matrices(self.int_m, self.dec_m)
except AttributeError:
from MCEq.solvers import CUDASparseContext
self.cuda_context = CUDASparseContext(
self.int_m, self.dec_m, device_id=self._cuda_device)
args = (nsteps, dX, rho_inv, self.cuda_context, phi0, grid_idcs)
elif (config.kernel_config.lower() == 'mkl'):
kernel = MCEq.solvers.solv_MKL_sparse
args = (nsteps, dX, rho_inv, self.int_m, self.dec_m, phi0,
grid_idcs)
else:
raise Exception(
"Unsupported integrator setting '{0}'.".format(
config.kernel_config))
self._solution, self.grid_sol = kernel(*args)
info(
2, 'time elapsed during integration: {0:5.2f}sec'.format(time() -
start))
def _calculate_integration_path(self, int_grid, grid_var, force=False):
if (self.integration_path and np.alltrue(int_grid == self.int_grid)
and np.alltrue(self.grid_var == grid_var) and not force):
info(5, 'skipping calculation.')
return
self.int_grid, self.grid_var = int_grid, grid_var
if grid_var != 'X':
raise NotImplementedError(
'the choice of grid variable other than the depth X are not possible, yet.'
)
max_X = self.density_model.max_X
ri = self.density_model.r_X2rho
max_lint = self.matrix_builder.max_lint
max_ldec = self.matrix_builder.max_ldec
info(2, 'X_surface = {0:7.2f}g/cm2'.format(max_X))
dX_vec = []
rho_inv_vec = []
X = 0.0
step = 0
grid_step = 0
grid_idcs = []
if True or (max_ldec / self.density_model.max_den > max_lint
and config.leading_process == 'decays'):
info(3, "using decays as leading eigenvalues")
def delta_X(X):
return config.stability_margin / (max_ldec * ri(X))
elif config.leading_process == 'interactions':
info(2, "using interactions as leading eigenvalues")
def delta_X(X):
return config.stability_margin / max_lint
else:
def delta_X(X):
dX = min(
config.stability_margin / (max_ldec * ri(X)),
config.stability_margin / max_lint)
# if dX/self.density_model.max_X < 1e-7:
# raise Exception(
# 'Stiffness warning: dX <= 1e-7. Check configuration or' +
# 'manually call MCEqRun._calculate_integration_path(int_grid, "X", force=True).')
return dX
dXmax = config.dXmax
while X < max_X:
dX = min(delta_X(X), dXmax)
if (np.any(int_grid) and (grid_step < len(int_grid))
and (X + dX >= int_grid[grid_step])):
dX = int_grid[grid_step] - X
grid_idcs.append(step)
grid_step += 1
dX_vec.append(dX)
rho_inv_vec.append(ri(X))
X = X + dX
step += 1
# Integrate
dX_vec = np.array(dX_vec)
rho_inv_vec = np.array(rho_inv_vec)
self.integration_path = len(dX_vec), dX_vec, rho_inv_vec, grid_idcs
def n_particles(self, label, grid_idx=None, min_energy_cutoff=1e-1):
"""Returns number of particles of type `label` at a grid step above
an energy threshold for counting.
Args:
label (str): Particle name
grid_idx (int): Depth grid index (for profiles)
min_energy_cutoff (float): Energy threshold > mceq_config.e_min
"""
ie_min = np.argmin(
np.abs(self.e_bins -
self.e_bins[self.e_bins >= min_energy_cutoff][0]))
info(
10,
'Energy cutoff for particle number calculation {0:4.3e} GeV'.format(
self.e_bins[ie_min]))
info(
15,
'First bin is between {0:3.2e} and {1:3.2e} with midpoint {2:3.2e}'
.format(self.e_bins[ie_min], self.e_bins[ie_min + 1],
self.e_grid[ie_min]))
return np.sum(
self.get_solution(label, mag=0, integrate=True, grid_idx=grid_idx)[ie_min:])
def n_mu(self, grid_idx=None, min_energy_cutoff=1e-1):
"""Returns the number of positive and negative muons at a grid step above
`min_energy_cutoff`.
Args:
grid_idx (int): Depth grid index (for profiles)
min_energy_cutoff (float): Energy threshold > mceq_config.e_min
"""
return (self.n_particles('total_mu+', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff) +
self.n_particles('total_mu-', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff))
def n_e(self, grid_idx=None, min_energy_cutoff=1e-1):
"""Returns the number of electrons plus positrons at a grid step above
`min_energy_cutoff`.
Args:
grid_idx (int): Depth grid index (for profiles)
min_energy_cutoff (float): Energy threshold > mceq_config.e_min
"""
return (self.n_particles('e+', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff) +
self.n_particles('e-', grid_idx=grid_idx, min_energy_cutoff=min_energy_cutoff))
def z_factor(self, projectile_pdg, secondary_pdg, definition='primary_e'):
"""Energy dependent Z-factor according to Thunman et al. (1996)"""
proj = self.pman[projectile_pdg]
sec = self.pman[secondary_pdg]
if not proj.is_projectile:
raise Exception('{0} is not a projectile particle.'.format(
proj.name))
info(
10, 'Computing e-dependent Zfactor for {0} -> {1}'.format(
proj.name, sec.name))
if not proj.is_secondary(sec):
raise Exception('{0} is not a secondary particle of {1}.'.format(
sec.name, proj.name))
if proj == 2112:
nuc_flux = self.pmodel.p_and_n_flux(self.e_grid)[2]
else:
nuc_flux = self.pmodel.p_and_n_flux(self.e_grid)[1]
zfac = np.zeros(self.dim)
smat = proj.hadr_yields[sec]
proj_cs = proj.inel_cross_section()
zfac = np.zeros_like(self.e_grid)
# Definition wrt CR energy (different from Thunman) on x-axis
if definition == 'primary_e':
min_energy = 2.
for p_eidx, e in enumerate(self.e_grid):
if e < min_energy:
min_idx = p_eidx + 1
continue
zfac[p_eidx] = np.sum(
smat[min_idx:p_eidx + 1, p_eidx] * nuc_flux[p_eidx] /
nuc_flux[min_idx:p_eidx + 1] * proj_cs[p_eidx] /
proj_cs[min_idx:p_eidx + 1])
return zfac
else:
# Like in Thunman et al. 1996
for p_eidx, _ in enumerate(self.e_grid):
zfac[p_eidx] = np.sum(smat[p_eidx, p_eidx:] *
nuc_flux[p_eidx:] / nuc_flux[p_eidx] *
proj_cs[p_eidx:] / proj_cs[p_eidx])
return zfac
def decay_z_factor(self, parent_pdg, child_pdg):
"""Energy dependent Z-factor according to Lipari (1993)."""
proj = self.pman[parent_pdg]
sec = self.pman[child_pdg]
if proj.is_stable:
raise Exception('{0} does not decay.'.format(proj.name))
info(
10, 'Computing e-dependent decay Zfactor for {0} -> {1}'.format(
proj.name, sec.name))
if not proj.is_child(sec):
raise Exception('{0} is not a a child particle of {1}.'.format(
sec.name, proj.name))
cr_gamma = self.pmodel.nucleon_gamma(self.e_grid)
zfac = np.zeros(self.dim)
zfac = np.zeros_like(self.e_grid)
for p_eidx, e in enumerate(self.e_grid):
# if e < min_energy:
# min_idx = p_eidx + 1
# continue
xlab, xdist = proj.dNdec_dxlab(e, sec)
zfac[p_eidx] = np.trapz(xlab**(-cr_gamma[p_eidx] - 2.) * xdist,
x=xlab)
return zfac
class MatrixBuilder(object):
"""This class constructs the interaction and decay matrices."""
def __init__(self, particle_manager):
self._pman = particle_manager
self._energy_grid = self._pman._energy_grid
self.int_m = None
self.dec_m = None
self._construct_differential_operator()
def construct_matrices(self, skip_decay_matrix=False):
r"""Constructs the matrices for calculation.
These are:
- :math:`\boldsymbol{M}_{int} = (-\boldsymbol{1} + \boldsymbol{C}){\boldsymbol{\Lambda}}_{int}`,
- :math:`\boldsymbol{M}_{dec} = (-\boldsymbol{1} + \boldsymbol{D}){\boldsymbol{\Lambda}}_{dec}`.
For debug_levels >= 2 some general information about matrix shape and the number of
non-zero elements is printed. The intermediate matrices :math:`\boldsymbol{C}` and
:math:`\boldsymbol{D}` are deleted afterwards to save memory.
Set the ``skip_decay_matrix`` flag to avoid recreating the decay matrix. This is not necessary
if, for example, particle production is modified, or the interaction model is changed.
Args:
skip_decay_matrix (bool): Omit re-creating D matrix
"""
from itertools import product
info(
3, "Start filling matrices. Skip_decay_matrix = {0}".format(
skip_decay_matrix))
self._fill_matrices(skip_decay_matrix=skip_decay_matrix)
cparts = self._pman.cascade_particles
# interaction part
# -I + C
# In first interaction mode it is just C
self.max_lint = 0.
for parent, child in product(cparts, cparts):
idx = (child.mceqidx, parent.mceqidx)
# Main diagonal
if child.mceqidx == parent.mceqidx and parent.can_interact:
# Subtract unity from the main diagonals
info(10, 'subtracting main C diagonal from', child.name,
parent.name)
self.C_blocks[idx][np.diag_indices(self.dim)] -= 1.
if idx in self.C_blocks:
# Multiply with Lambda_int and keep track the maximal
# interaction length for the calculation of integration steps
self.max_lint = np.max([
self.max_lint,
np.max(parent.inverse_interaction_length())
])
self.C_blocks[idx] *= parent.inverse_interaction_length()
if child.mceqidx == parent.mceqidx and parent.has_contloss:
if config.enable_muon_energy_loss and abs(
parent.pdg_id[0]) == 13:
info(5, 'Cont. loss for', parent.name)
self.C_blocks[idx] += self.cont_loss_operator(
parent.pdg_id)
if config.enable_em_ion and abs(parent.pdg_id[0]) == 11:
info(5, 'Cont. loss for', parent.name)
self.C_blocks[idx] += self.cont_loss_operator(
parent.pdg_id)
self.int_m = self._csr_from_blocks(self.C_blocks)
# -I + D
if not skip_decay_matrix or self.dec_m is None:
self.max_ldec = 0.
for parent, child in product(cparts, cparts):
idx = (child.mceqidx, parent.mceqidx)
# Main diagonal
if child.mceqidx == parent.mceqidx and not parent.is_stable:
# Subtract unity from the main diagonals
info(10, 'subtracting main D diagonal from', child.name,
parent.name)
self.D_blocks[idx][np.diag_indices(self.dim)] -= 1.
if idx not in self.D_blocks:
info(25, parent.pdg_id[0], child.pdg_id, 'not in D_blocks')
continue
# Multiply with Lambda_dec and keep track of the
# maximal decay length for the calculation of integration steps
self.max_ldec = max(
[self.max_ldec,
np.max(parent.inverse_decay_length())])
self.D_blocks[idx] *= parent.inverse_decay_length()
self.dec_m = self._csr_from_blocks(self.D_blocks)
for mname, mat in [('C', self.int_m), ('D', self.dec_m)]:
mat_density = (float(mat.nnz) / float(np.prod(mat.shape)))
info(5, "{0} Matrix info:".format(mname))
info(5, " density : {0:3.2%}".format(mat_density))
info(5, " shape : {0} x {1}".format(*mat.shape))
info(5, " nnz : {0}".format(mat.nnz))
info(10, " sum :", mat.sum())
info(3, "Done filling matrices.")
return self.int_m, self.dec_m
def _average_operator(self, op_mat):
"""Averages the continuous loss operator by performing
1/max_step explicit euler steps"""
n_steps = int(1. / config.loss_step_for_average)
info(
10,
'Averaging continuous loss using {0} intermediate steps.'.format(
n_steps))
op_step = np.eye(
self._energy_grid.d) + op_mat * config.loss_step_for_average
return np.linalg.matrix_power(op_step, n_steps) - np.eye(
self._energy_grid.d)
def cont_loss_operator(self, pdg_id):
"""Returns continuous loss operator that can be summed with appropriate
position in the C matrix."""
op_mat = -np.diag(1 / self._energy_grid.c).dot(
self.op_matrix.dot(np.diag(self._pman[pdg_id].dEdX)))
if config.average_loss_operator:
return self._average_operator(op_mat)
else:
return op_mat
@property
def dim(self):
"""Energy grid (dimension)"""
return self._pman.dim
@property
def dim_states(self):
"""Number of cascade particles times dimension of grid
(dimension of the equation system)"""
return self._pman.dim_states
def _zero_mat(self):
"""Returns a new square zero valued matrix with dimensions of grid.
"""
return np.zeros((self._pman.dim, self._pman.dim))
def _csr_from_blocks(self, blocks):
"""Construct a csr matrix from a dictionary of submatrices (blocks)
Note::
It's super pain the a** to construct a properly indexed sparse matrix
directly from the blocks, since bmat totally messes up the order.
"""
from scipy.sparse import csr_matrix
new_mat = np.zeros((self.dim_states, self.dim_states))
for (c, p), d in six.iteritems(blocks):
rc, rp = self._pman.mceqidx2pref[c], self._pman.mceqidx2pref[p]
try:
new_mat[rc.lidx:rc.uidx, rp.lidx:rp.uidx] = d
except ValueError:
raise Exception(
'Dimension mismatch: matrix {0}x{1}, p={2}:({3},{4}), c={5}:({6},{7})'
.format(self.dim_states, self.dim_states, rp.name, rp.lidx,
rp.uidx, rc.name, rc.lidx, rc.uidx))
return csr_matrix(new_mat)
def _follow_chains(self, p, pprod_mat, p_orig, idcs, propmat, reclev=0):
"""Some recursive magic.
"""
info(40, reclev * '\t', 'entering with', p.name)
# print 'orig, p', p_orig.pdg_id, p.pdg_id
for d in p.children:
info(40, reclev * '\t', 'following to', d.name)
if not d.is_resonance:
# print 'adding stuff', p_orig.pdg_id, p.pdg_id, d.pdg_id
dprop = self._zero_mat()
p._assign_decay_idx(d, idcs, d.hadridx, dprop)
propmat[(d.mceqidx, p_orig.mceqidx)] += dprop.dot(pprod_mat)
if config.debug_level >= 20:
pstr = 'res'
dstr = 'Mchain'
if idcs == p.hadridx:
pstr = 'prop'
dstr = 'Mprop'
info(
40, reclev * '\t',
'setting {0}[({1},{3})->({2},{4})]'.format(
dstr, p_orig.name, d.name, pstr, 'prop'))
if d.is_mixed or d.is_resonance:
dres = self._zero_mat()
p._assign_decay_idx(d, idcs, d.residx, dres)
reclev += 1
self._follow_chains(d, dres.dot(pprod_mat), p_orig, d.residx,
propmat, reclev)
else:
info(20, reclev * '\t', '\t terminating at', d.name)
def _fill_matrices(self, skip_decay_matrix=False):
"""Generates the interaction and decay matrices from scratch.
"""
from collections import defaultdict
# Fill decay matrix blocks
if not skip_decay_matrix or self.dec_m is None:
# Initialize empty D matrix
self.D_blocks = defaultdict(lambda: self._zero_mat())
for p in self._pman.cascade_particles:
# Fill parts of the D matrix related to p as mother
if not p.is_stable and bool(p.children) and not p.is_tracking:
self._follow_chains(p,
np.diag(np.ones((self.dim))),
p,
p.hadridx,
self.D_blocks,
reclev=0)
else:
info(20, p.name, 'stable or not added to D matrix')
# Initialize empty C blocks
self.C_blocks = defaultdict(lambda: self._zero_mat())
for p in self._pman.cascade_particles:
# if p doesn't interact, skip interaction matrices
if not p.is_projectile:
if p.is_hadron:
info(
1, 'No interactions by {0} ({1}).'.format(
p.name, p.pdg_id))
continue
for s in p.hadr_secondaries:
# if s not in self.pman.cascade_particles:
# print 'Doing nothing with', p.pdg_id, s.pdg_id
# continue
if not s.is_resonance:
cmat = self._zero_mat()
p._assign_hadr_dist_idx(s, p.hadridx, s.hadridx, cmat)
self.C_blocks[(s.mceqidx, p.mceqidx)] += cmat
cmat = self._zero_mat()
p._assign_hadr_dist_idx(s, p.hadridx, s.residx, cmat)
self._follow_chains(s,
cmat,
p,
s.residx,
self.C_blocks,
reclev=1)
def _construct_differential_operator(self):
"""Constructs a derivative operator for the contiuous losses.
This implmentation uses a 6th-order finite differences operator,
only depends on the energy grid. This is an operator for a sub-matrix
of dimension (energy grid, energy grid) for a single particle. It
can be likewise applied to all particle species. The dEdX values are
applied later in ...
"""
# First rows of operator matrix (values are truncated at the edges
# of a matrix.)
diags_leftmost = [0, 1, 2, 3]
coeffs_leftmost = [-11, 18, -9, 2]
denom_leftmost = 6
diags_left_1 = [-1, 0, 1, 2, 3]
coeffs_left_1 = [-3, -10, 18, -6, 1]
denom_left_1 = 12
diags_left_2 = [-2, -1, 0, 1, 2, 3]
coeffs_left_2 = [3, -30, -20, 60, -15, 2]
denom_left_2 = 60
# Centered diagonals
# diags = [-3, -2, -1, 1, 2, 3]
# coeffs = [-1, 9, -45, 45, -9, 1]
# denom = 60.
diags = diags_left_2
coeffs = coeffs_left_2
denom = 60.
# Last rows at the right of operator matrix
diags_right_2 = [-d for d in diags_left_2[::-1]]
coeffs_right_2 = [-d for d in coeffs_left_2[::-1]]
denom_right_2 = denom_left_2
diags_right_1 = [-d for d in diags_left_1[::-1]]
coeffs_right_1 = [-d for d in coeffs_left_1[::-1]]
denom_right_1 = denom_left_1
diags_rightmost = [-d for d in diags_leftmost[::-1]]
coeffs_rightmost = [-d for d in coeffs_leftmost[::-1]]
denom_rightmost = denom_leftmost
h = np.log(self._energy_grid.b[1:] / self._energy_grid.b[:-1])
dim_e = self._energy_grid.d
last = dim_e - 1
op_matrix = np.zeros((dim_e, dim_e))
op_matrix[0, np.asarray(diags_leftmost)] = np.asarray(
coeffs_leftmost) / (denom_leftmost * h[0])
op_matrix[1, 1 +
np.asarray(diags_left_1)] = np.asarray(coeffs_left_1) / (
denom_left_1 * h[1])
op_matrix[2, 2 +
np.asarray(diags_left_2)] = np.asarray(coeffs_left_2) / (
denom_left_2 * h[2])
op_matrix[last, last + np.asarray(diags_rightmost)] = np.asarray(
coeffs_rightmost) / (denom_rightmost * h[last])
op_matrix[last - 1, last - 1 +
np.asarray(diags_right_1)] = np.asarray(coeffs_right_1) / (
denom_right_1 * h[last - 1])
op_matrix[last - 2, last - 2 +
np.asarray(diags_right_2)] = np.asarray(coeffs_right_2) / (
denom_right_2 * h[last - 2])
for row in range(3, dim_e - 3):
op_matrix[row, row +
np.asarray(diags)] = np.asarray(coeffs) / (denom *
h[row])
self.op_matrix = op_matrix
| 54,114 | 40.626923 | 104 | py |
MCEq | MCEq-master/MCEq/particlemanager.py |
import six
from math import copysign
import numpy as np
import mceq_config as config
from MCEq.misc import info, print_in_rows, getAZN
from particletools.tables import PYTHIAParticleData
info(5, 'Initialization of PYTHIAParticleData object')
_pdata = PYTHIAParticleData()
backward_compatible_namestr = {
'nu_mu': 'numu',
'nu_mubar': 'antinumu',
'nu_e': 'nue',
'nu_ebar': 'antinue',
'nu_tau': 'nutau',
'nu_taubar': 'antinutau'
}
# Replace particle names for neutrinos with those used
# in previous MCEq versions
def _pname(pdg_id_or_name):
"""Replace some particle names from pythia database with those from previous
MCEq versions for backward compatibility."""
pythia_name = _pdata.name(pdg_id_or_name)
if pythia_name in backward_compatible_namestr:
return backward_compatible_namestr[pythia_name]
return pythia_name
class MCEqParticle(object):
"""Bundles different particle properties for simplified
availability of particle properties in :class:`MCEq.core.MCEqRun`.
Args:
pdg_id (int): PDG ID of the particle
egrid (np.array, optional): energy grid (centers)
cs_db (object, optional): reference to an instance of
:class:`InteractionYields`
"""
def __init__(self,
pdg_id,
helicity,
energy_grid=None,
cs_db=None,
init_pdata_defaults=True):
#: (bool) if it's an electromagnetic particle
self.is_em = abs(pdg_id) == 11 or pdg_id == 22
#: (int) helicity -1, 0, 1 (0 means undefined or average)
self.helicity = helicity
#: (bool) particle is a nucleus (not yet implemented)
self.is_nucleus = False
#: (bool) particle is a hadron
self.is_hadron = False
#: (bool) particle is a lepton
self.is_lepton = False
#: (float) ctau in cm
self.ctau = None
#: (float) mass in GeV
self.mass = None
#: (str) species name in string representation
self.name = None
#: Mass, charge, neutron number
self.A, self.Z, self.N = getAZN(pdg_id)
#: (bool) particle has both, hadron and resonance properties
self.is_mixed = False
#: (bool) if particle has just resonance behavior
self.is_resonance = False
#: (bool) particle is interacting projectile
self.is_projectile = False
#: (bool) particle is stable
self.is_stable = False or pdg_id in config.adv_set['disable_decays']
#: (bool) can_interact
self.can_interact = False
#: (bool) has continuous losses dE/dX defined
self.has_contloss = False
#: (np.array) continuous losses in GeV/(g/cm2)
self.dEdX = None
#: (bool) is a tracking particle
self.is_tracking = False
#: decay channels if any
self.decay_dists = {}
#: (int) Particle Data Group Monte Carlo particle ID
self.pdg_id = (pdg_id, helicity)
#: (int) Unique PDG ID that is different for tracking particles
self.unique_pdg_id = (pdg_id, helicity)
#: (int) MCEq ID
self.mceqidx = -1
#: (float) mixing energy, transition between hadron and
# resonance behavior
self.E_mix = 0
#: (int) energy grid index, where transition between
# hadron and resonance occurs
self.mix_idx = 0
#: (float) critical energy in air at the surface
self.E_crit = 0
# Energy and cross section dependent inits
self.current_cross_sections = None
self._energy_grid = energy_grid
# Variables for hadronic interaction
self.current_hadronic_model = None
self.hadr_secondaries = []
self.hadr_yields = {}
# Variables for decays
self.children = []
self.decay_dists = {}
# A_target
self.A_target = config.A_target
if init_pdata_defaults:
self._init_defaults_from_pythia_database()
if self._energy_grid is not None and cs_db is not None:
#: interaction cross section in 1/cm2
self.set_cs(cs_db)
def _init_defaults_from_pythia_database(self):
"""Init some particle properties from :mod:`particletools.tables`."""
#: (bool) particle is a nucleus (not yet implemented)
self.is_nucleus = _pdata.is_nucleus(self.pdg_id[0])
#: (bool) particle is a hadron
self.is_hadron = _pdata.is_hadron(self.pdg_id[0])
#: (bool) particle is a hadron
self.is_lepton = _pdata.is_lepton(self.pdg_id[0])
#: Mass, charge, neutron number
self.A, self.Z, self.N = getAZN(self.pdg_id[0])
#: (float) ctau in cm
self.ctau = _pdata.ctau(self.pdg_id[0])
#: (float) mass in GeV
self.mass = _pdata.mass(self.pdg_id[0])
#: (str) species name in string representation
name = _pname(self.pdg_id[0]) if self.name is None else self.name
if self.helicity == -1:
name += '_l'
elif self.helicity == +1:
name += '_r'
self.name = name
#: (bool) particle is stable
#: TODO the exclusion of neutron decays is a hotfix
self.is_stable = (not self.ctau < np.inf or
self.pdg_id[0] in config.adv_set['disable_decays'])
def init_custom_particle_data(self, name, pdg_id, helicity, ctau, mass,
**kwargs):
"""Add custom particle type. (Incomplete and not debugged)"""
#: (int) Particle Data Group Monte Carlo particle ID
self.pdg_id = (pdg_id, helicity)
#: (bool) if it's an electromagnetic particle
self.is_em = kwargs.pop('is_em', abs(pdg_id) == 11 or pdg_id == 22)
#: (bool) particle is a nucleus (not yet implemented)
self.is_nucleus = kwargs.pop('is_nucleus',
_pdata.is_nucleus(self.pdg_id[0]))
#: (bool) particle is a hadron
self.is_hadron = kwargs.pop('is_hadron',
_pdata.is_hadron(self.pdg_id[0]))
#: (bool) particle is a hadron
self.is_lepton = kwargs.pop('is_lepton',
_pdata.is_lepton(self.pdg_id[0]))
#: Mass, charge, neutron number
self.A, self.Z, self.N = getAZN(self.pdg_id[0])
#: (float) ctau in cm
self.ctau = ctau
#: (float) mass in GeV
self.mass = mass
#: (str) species name in string representation
self.name = name
#: (bool) particle is stable
self.is_stable = not self.ctau < np.inf
def set_cs(self, cs_db):
"""Set cross section adn recalculate the dependent variables"""
info(11, 'Obtaining cross sections for', self.pdg_id)
self.current_cross_sections = cs_db.iam
self.cs = cs_db[self.pdg_id[0]]
if sum(self.cs) > 0:
self.can_interact = True
else:
self.can_interact = False
self._critical_energy()
self._calculate_mixing_energy()
def set_hadronic_channels(self, hadronic_db, pmanager):
"""Changes the hadronic interaction model.
Replaces indexing of the yield dictionary from PDG IDs
with references from partilcle manager.
"""
self.current_hadronic_model = hadronic_db.iam
# Collect MCEqParticle references to children
# instead of PDG ID as index
if self.pdg_id in hadronic_db.parents and not self.is_tracking:
self.is_projectile = True
self.hadr_secondaries = [
pmanager.pdg2pref[pid]
for pid in hadronic_db.relations[self.pdg_id]
]
self.hadr_yields = {}
for s in self.hadr_secondaries:
self.hadr_yields[s] = hadronic_db.get_matrix(
self.pdg_id, s.pdg_id)
else:
self.is_projectile = False
self.hadr_secondaries = []
self.hadr_yields = {}
def add_hadronic_production_channel(self, child, int_matrix):
"""Add a new particle that is produced in hadronic interactions.
The int_matrix is expected to be in the correct shape and scale
as the other interaction (dN/dE(i,j)) matrices. Energy conservation
is not checked.
"""
if not self.is_projectile:
raise Exception('The particle should be a projectile.')
if child in self.hadr_secondaries:
info(1, 'Child {0} has been already added.'.format(child.name))
return
self.hadr_secondaries.append(child)
self.hadr_yields[child] = int_matrix
def add_decay_channel(self, child, dec_matrix, force=False):
"""Add a decay channel.
The branching ratios are not renormalized and one needs to take care
of this externally.
"""
if self.is_stable:
raise Exception('Cannot add decay channel to stable particle.')
if child in self.children and not force:
info(1, 'Child {0} has been already added.'.format(child.name))
return
elif child in self.children and force:
info(1, 'Overwriting decay matrix of child {0}.'.format(child.name))
self.decay_dists[child] = dec_matrix
return
self.children.append(child)
self.decay_dists[child] = dec_matrix
def set_decay_channels(self, decay_db, pmanager):
"""Populates decay channel and energy distributions"""
if self.is_stable or self.is_tracking:
# Variables for decays
self.children = []
self.decay_dists = {}
return
if self.pdg_id not in decay_db.parents:
raise Exception('Unstable particle without decay distribution:',
self.pdg_id, self.name)
self.children = []
self.children = [pmanager[d] for d in decay_db.children(self.pdg_id)]
self.decay_dists = {}
for c in self.children:
self.decay_dists[c] = decay_db.get_matrix(self.pdg_id, c.pdg_id)
def track_decays(self, tracking_particle):
children_d = {}
for c in self.children:
children_d[c.pdg_id] = c
if tracking_particle.pdg_id not in list(children_d):
info(
17, 'Parent particle {0} does not decay into {1}'.format(
self.name, tracking_particle.name))
return False
# Copy the decay distribution from original PDG
self.children.append(tracking_particle)
self.decay_dists[tracking_particle] = self.decay_dists[children_d[
tracking_particle.pdg_id]]
return True
def track_interactions(self, tracking_particle):
secondaries_d = {}
for s in self.hadr_secondaries:
secondaries_d[s.pdg_id] = s
if tracking_particle.pdg_id not in list(secondaries_d):
info(
17, 'Parent particle {0} does not produce {1} at the vertex'.
format(self.name, tracking_particle.name))
return False
# Copy the decay distribution from original PDG
self.hadr_secondaries.append(tracking_particle)
self.hadr_yields[tracking_particle] = self.hadr_yields[secondaries_d[
tracking_particle.pdg_id]]
return True
def is_secondary(self, particle_ref):
"""`True` if this projectile and produces particle `particle_ref`."""
if not isinstance(particle_ref, self.__class__):
raise Exception('Argument not of MCEqParticle type.')
return particle_ref in self.hadr_secondaries
def is_child(self, particle_ref):
"""`True` if this particle decays into `particle_ref`."""
if not isinstance(particle_ref, self.__class__):
raise Exception('Argument not of MCEqParticle type.')
return particle_ref in self.children
@property
def hadridx(self):
"""Returns index range where particle behaves as hadron.
Returns:
:func:`tuple` (int,int): range on energy grid
"""
return (self.mix_idx, self._energy_grid.d)
@property
def residx(self):
"""Returns index range where particle behaves as resonance.
Returns:
:func:`tuple` (int,int): range on energy grid
"""
return (0, self.mix_idx)
@property
def lidx(self):
"""Returns lower index of particle range in state vector.
Returns:
(int): lower index in state vector :attr:`MCEqRun.phi`
"""
return self.mceqidx * self._energy_grid.d
@property
def uidx(self):
"""Returns upper index of particle range in state vector.
Returns:
(int): upper index in state vector :attr:`MCEqRun.phi`
"""
return (self.mceqidx + 1) * self._energy_grid.d
def inverse_decay_length(self, cut=True):
r"""Returns inverse decay length (or infinity (np.inf), if
particle is stable), where the air density :math:`\rho` is
factorized out.
Args:
E (float) : energy in laboratory system in GeV
cut (bool): set to zero in 'resonance' regime
Returns:
(float): :math:`\frac{\rho}{\lambda_{dec}}` in 1/cm
"""
try:
dlen = self.mass / self.ctau / (self._energy_grid.c + self.mass)
if cut:
dlen[0:self.mix_idx] = 0.
# Correction for bin average, since dec. length is a steep falling
# function. This factor averages the value over bin length for
# 10 bins per decade.
# return 0.989 * dlen
return dlen
except ZeroDivisionError:
return np.ones_like(self._energy_grid.d) * np.inf
def inel_cross_section(self, mbarn=False):
"""Returns inelastic cross section.
Args:
mbarn (bool) : if True cross section in mb otherwise in cm**2
Returns:
(float): :math:`\\sigma_{\\rm inel}` in mb or cm**2
"""
#: unit - :math:`\text{GeV} \cdot \text{fm}`
GeVfm = 0.19732696312541853
#: unit - :math:`\text{GeV} \cdot \text{cm}`
GeVcm = GeVfm * 1e-13
#: unit - :math:`\text{GeV}^2 \cdot \text{mbarn}`
GeV2mbarn = 10.0 * GeVfm**2
#: unit conversion - :math:`\text{mbarn} \to \text{cm}^2`
mbarn2cm2 = GeV2mbarn / GeVcm**2
if mbarn:
return mbarn2cm2 * self.cs
return self.cs
def inverse_interaction_length(self):
"""Returns inverse interaction length for A_target given by config.
Returns:
(float): :math:`\\frac{1}{\\lambda_{int}}` in cm**2/g
"""
m_target = self.A_target * 1.672621 * 1e-24 # <A> * m_proton [g]
return self.cs / m_target
def _assign_hadr_dist_idx(self, child, projidx, chidx, cmat):
"""Copies a subset, defined between indices ``projidx`` and ``chiidx``
from the ``hadr_yields`` into ``cmat``
Args:
child (int): PDG ID of final state child/secondary particle
projidx (int,int): tuple containing index range relative
to the projectile's energy grid
dtridx (int,int): tuple containing index range relative
to the child's energy grid
cmat (numpy.array): array reference to the interaction matrix
"""
cmat[chidx[0]:chidx[1], projidx[0]:projidx[1]] = self.hadr_yields[
child][chidx[0]:chidx[1], projidx[0]:projidx[1]]
def _assign_decay_idx(self, child, projidx, chidx, cmat):
"""Copies a subset, defined between indices ``projidx`` and ``chiidx``
from the ``hadr_yields`` into ``cmat``
Args:
child (int): PDG ID of final state child/secondary particle
projidx (int,int): tuple containing index range relative
to the projectile's energy grid
dtridx (int,int): tuple containing index range relative
to the child's energy grid
cmat (numpy.array): array reference to the interaction matrix
"""
cmat[chidx[0]:chidx[1], projidx[0]:projidx[1]] = self.decay_dists[
child][chidx[0]:chidx[1], projidx[0]:projidx[1]]
def dN_dxlab(self, kin_energy, sec_pdg, verbose=True, **kwargs):
r"""Returns :math:`dN/dx_{\rm Lab}` for interaction energy close
to ``kin_energy`` for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
kin_energy (float): approximate interaction kin_energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest enerkin_energygy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm Lab}`, :math:`dN/dx_{\rm Lab}`
"""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
en = self._energy_grid.c[eidx]
info(10, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.hadr_yields[sec_pdg]
xl_grid = (self._energy_grid.c[:eidx + 1]) / en
xl_dist = en * xl_grid * m[:eidx +
1, eidx] / self._energy_grid.w[:eidx + 1]
return xl_grid, xl_dist
def dNdec_dxlab(self, kin_energy, sec_pdg, verbose=True, **kwargs):
r"""Returns :math:`dN/dx_{\rm Lab}` for interaction energy close
to ``kin_energy`` for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
kin_energy (float): approximate interaction energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest energy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm Lab}`, :math:`dN/dx_{\rm Lab}`
"""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
en = self._energy_grid.c[eidx]
info(10, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.decay_dists[sec_pdg]
xl_grid = (self._energy_grid.c[:eidx + 1]) / en
xl_dist = en * xl_grid * m[:eidx +
1, eidx] / self._energy_grid.w[:eidx + 1]
return xl_grid, xl_dist
def dN_dEkin(self, kin_energy, sec_pdg, verbose=True, **kwargs):
r"""Returns :math:`dN/dE_{\rm Kin}` in lab frame for an interaction energy
close to ``kin_energy`` (total) for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
kin_energy (float): approximate interaction energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest energy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm Lab}`, :math:`dN/dx_{\rm Lab}`
"""
eidx = (np.abs(self._energy_grid.c - kin_energy)).argmin()
en = self._energy_grid.c[eidx]
info(10, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.hadr_yields[sec_pdg]
ekin_grid = self._energy_grid.c
elab_dist = m[:eidx + 1, eidx] / self._energy_grid.w[eidx]
return ekin_grid[:eidx + 1], elab_dist
def dN_dxf(self,
energy,
prim_pdg,
sec_pdg,
pos_only=True,
verbose=True,
**kwargs):
r"""Returns :math:`dN/dx_{\rm F}` in c.m. for interaction energy close
to ``energy`` (lab. not kinetic) for hadron-air collisions.
The function respects modifications applied via :func:`_set_mod_pprod`.
Args:
energy (float): approximate interaction lab. energy
prim_pdg (int): PDG ID of projectile
sec_pdg (int): PDG ID of secondary particle
verbose (bool): print out the closest energy
Returns:
(numpy.array, numpy.array): :math:`x_{\rm F}`, :math:`dN/dx_{\rm F}`
"""
if not hasattr(self, '_ptav_sib23c'):
# Load spline of average pt distribution as a funtion of log(E_lab) from sib23c
import pickle
from os.path import join
self._ptav_sib23c = pickle.load(
open(join(config.data_dir, 'sibyll23c_aux.ppd'), 'rb'))[0]
def xF(xL, Elab, ppdg):
m = {2212: 0.938, 211: 0.139, 321: 0.493}
mp = m[2212]
Ecm = np.sqrt(2 * Elab * mp + 2 * mp**2)
Esec = xL * Elab
betacm = np.sqrt((Elab - mp) / (Elab + mp))
gammacm = (Elab + mp) / Ecm
avpt = self._ptav_sib23c[ppdg](
np.log(np.sqrt(Elab**2) - m[np.abs(ppdg)]**2))
xf = 2 * (-betacm * gammacm * Esec + gammacm *
np.sqrt(Esec**2 - m[np.abs(ppdg)]**2 - avpt**2)) / Ecm
dxl_dxf = 1. / (
2 *
(-betacm * gammacm * Elab + xL * Elab**2 * gammacm / np.sqrt(
(xL * Elab)**2 - m[np.abs(ppdg)]**2 - avpt**2)) / Ecm)
return xf, dxl_dxf
eidx = (np.abs(self._energy_grid.c + self.mass - energy)).argmin()
en = self._energy_grid.c[eidx] + self.mass
info(2, 'Nearest energy, index: ', en, eidx, condition=verbose)
m = self.hadr_yields[sec_pdg]
xl_grid = (self._energy_grid.c[:eidx + 1] + self.mass) / en
xl_dist = xl_grid * en * m[:eidx + 1, eidx] / np.diag(
self._energy_grid.w)[:eidx + 1]
xf_grid, dxl_dxf = xF(xl_grid, en, sec_pdg)
xf_dist = xl_dist * dxl_dxf
if pos_only:
xf_dist = xf_dist[xf_grid >= 0]
xf_grid = xf_grid[xf_grid >= 0]
return xf_grid, xf_dist
return xf_grid, xf_dist
def _critical_energy(self):
"""Returns critical energy where decay and interaction
are balanced.
Approximate value in Air.
Returns:
(float): :math:`\\frac{m\\ 6.4 \\text{km}}{c\\tau}` in GeV
"""
if self.is_stable or self.ctau <= 0.:
self.E_crit = np.inf
else:
self.E_crit = self.mass * 6.4e5 / self.ctau
def _calculate_mixing_energy(self):
"""Calculates interaction/decay length in Air and decides if
the particle has resonance and/or hadron behavior.
Class attributes :attr:`is_mixed`, :attr:`E_mix`, :attr:`mix_idx`,
:attr:`is_resonance` are calculated here. If the option `no_mixing`
is set in config.adv_config particle is forced to be a resonance
or hadron behavior.
Args:
e_grid (numpy.array): energy grid of size :attr:`d`
max_density (float): maximum density on the integration path (largest
decay length)
"""
cross_over = config.hybrid_crossover
max_density = config.max_density
d = self._energy_grid.d
inv_intlen = self.inverse_interaction_length()
inv_declen = self.inverse_decay_length()
# If particle is stable, no "mixing" necessary
if (not np.any(np.nan_to_num(inv_declen) > 0.)
or abs(self.pdg_id[0]) in config.adv_set["exclude_from_mixing"]
or config.adv_set['no_mixing']
or self.pdg_id[0] in config.adv_set['disable_decays']):
self.mix_idx = 0
self.is_mixed = False
self.is_resonance = False
return
# If particle is forced to be a "resonance"
if (np.abs(self.pdg_id[0]) in config.adv_set["force_resonance"]):
self.mix_idx = d - 1
self.E_mix = self._energy_grid.c[self.mix_idx]
self.is_mixed = False
self.is_resonance = True
# Particle can interact and decay
elif self.can_interact and not self.is_stable:
# This is lambda_dec / lambda_int
threshold = np.zeros_like(inv_intlen)
mask = inv_declen != 0.
threshold[mask] = inv_intlen[mask] * max_density / inv_declen[mask]
del mask
self.mix_idx = np.where(threshold > cross_over)[0][0]
self.E_mix = self._energy_grid.c[self.mix_idx]
self.is_mixed = True
self.is_resonance = False
# These particles don't interact but can decay (e.g. tau leptons)
elif not self.can_interact and not self.is_stable:
mask = inv_declen != 0.
self.mix_idx = np.where(
max_density / inv_declen > config.dXmax)[0][0]
self.E_mix = self._energy_grid.c[self.mix_idx]
self.is_mixed = True
self.is_resonance = False
# Particle is stable but that should be handled above
else:
print(self.name, "This case shouldn't occur.")
threshold = np.inf
self.mix_idx = 0
self.is_mixed = False
self.is_resonance = False
def __eq__(self, other):
"""Checks name for equality"""
if isinstance(other, MCEqParticle):
return self.name == other.name
else:
return NotImplemented
def __neq__(self, other):
"""Checks name for equality"""
if isinstance(other, MCEqParticle):
return self.name != other.name
else:
return NotImplemented
def __hash__(self):
"""Instruction for comuting the hash"""
return hash(self.name)
def __repr__(self):
a_string = ("""
{0}:
is_hadron : {1}
is_lepton : {2}
is_nucleus : {3}
is_stable : {4}
is_mixed : {5}
is_resonance : {6}
is_tracking : {7}
is_projectile : {8}
mceqidx : {9}
E_mix : {10:2.1e} GeV\n""").format(
self.name, self.is_hadron, self.is_lepton, self.is_nucleus,
self.is_stable, self.is_mixed, self.is_resonance, self.is_tracking,
self.is_projectile, self.mceqidx, self.E_mix)
return a_string
class ParticleManager(object):
"""Database for objects of :class:`MCEqParticle`.
Authors:
Anatoli Fedynitch (DESY)
Jonas Heinze (DESY)
"""
def __init__(self, pdg_id_list, energy_grid, cs_db, mod_table=None):
# (dict) Dimension of primary grid
self._energy_grid = energy_grid
# Particle index shortcuts
#: (dict) Converts PDG ID to index in state vector
self.pdg2mceqidx = {}
#: (dict) Converts particle name to index in state vector
self.pname2mceqidx = {}
#: (dict) Converts PDG ID to reference of
# :class:`particlemanager.MCEqParticle`
self.pdg2pref = {}
#: (dict) Converts particle name to reference of
#: class:`particlemanager.MCEqParticle`
self.pname2pref = {}
#: (dict) Converts MCEq index to reference of
#: class:`particlemanager.MCEqParticle`
self.mceqidx2pref = {}
#: (dict) Converts index in state vector to PDG ID
self.mceqidx2pdg = {}
#: (dict) Converts index in state vector to reference
# of :class:`particlemanager.MCEqParticle`
self.mceqidx2pname = {}
# Save setup of tracked particles to reapply the relations
# when models change
self.tracking_relations = {}
#: (int) Total number of species
self.nspec = 0
# save currently applied cross section model
self.current_cross_sections = None
# save currently applied hadronic model
self.current_hadronic_model = None
# Cross section database
self._cs_db = cs_db
# Dictionary to save te tracking particle config
self.tracking_relations = []
# Save the tracking relations requested by default tracking
self._tracking_requested = []
self._init_categories(particle_pdg_list=pdg_id_list)
self.print_particle_tables(10)
def set_cross_sections_db(self, cs_db):
"""Sets the inelastic cross section to each interacting particle.
This applies to most of the hadrons and does not imply that the
particle becomes a projectile. parents need in addition defined
hadronic channels.
"""
info(5, 'Setting cross section particle variables.')
if self.current_cross_sections == cs_db.iam:
info(10, 'Same cross section model not applied to particles.')
return
for p in self.cascade_particles:
p.set_cs(cs_db)
self.current_cross_sections = cs_db.iam
self._update_particle_tables()
def set_decay_channels(self, decay_db):
"""Attaches the references to the decay yield tables to
each unstable particle"""
info(5, 'Setting decay info for particles.')
for p in self.all_particles:
p.set_decay_channels(decay_db, self)
self._restore_tracking_setup()
self._update_particle_tables()
def set_interaction_model(self,
cs_db,
hadronic_db,
updated_parent_list=None,
force=False):
"""Attaches the references to the hadronic yield tables to
each projectile particle"""
info(5, 'Setting hadronic secondaries for particles.')
if (self.current_hadronic_model == hadronic_db.iam and
not force and updated_parent_list is None):
info(10, 'Same hadronic model not applied to particles.')
return
if updated_parent_list is not None:
self._init_categories(updated_parent_list)
for p in self.cascade_particles:
p.set_cs(cs_db)
p.set_hadronic_channels(hadronic_db, self)
self.current_hadronic_model = hadronic_db.iam
self._update_particle_tables()
def set_continuous_losses(self, contloss_db):
"""Set continuous losses terms to particles with ionization
and radiation losses."""
for p in self.cascade_particles:
if p.pdg_id in contloss_db:
p.has_contloss = True
p.dEdX = contloss_db[p.pdg_id]
def add_tracking_particle(self,
parent_list,
child_pdg,
alias_name,
from_interactions=False):
"""Allows tracking decay and particle production chains.
Replaces previous ``obs_particle`` function that allowed to track
only leptons from decays certain particles. This present feature
removes the special PDG IDs 71XX, 72XX, etc and allows to define
any channel like::
$ particleManagerInstance.add_tracking_particle([211], 14, 'pi_numu')
This will store muon neutrinos from pion decays under the alias 'pi_numu'.
Multiple parents are allowed::
$ particleManagerInstance.add_tracking_particle(
[411, 421, 431], 14, 'D_numu')
Args:
alias (str): Name alias under which the result is accessible in get_solution
parents (list): list of parent particle PDG ID's
child (int): Child particle
from_interactions (bool): track particles from interactions
"""
from copy import copy
info(10, 'requested for', parent_list, child_pdg, alias_name)
for p in parent_list:
if (p, child_pdg, alias_name,
from_interactions) in self._tracking_requested:
continue
self._tracking_requested.append(
(p, child_pdg, alias_name, from_interactions))
# Check if tracking particle with the alias not yet defined
# and create new one of necessary
if alias_name in self.pname2pref:
info(15, 'Re-using tracking particle', alias_name)
tracking_particle = self.pname2pref[alias_name]
elif child_pdg not in self.pdg2pref:
info(15, 'Tracking child not a available',
'for this interaction model, skipping.')
return
else:
info(10, 'Creating new tracking particle')
# Copy all preferences of the original particle
tracking_particle = copy(self.pdg2pref[child_pdg])
tracking_particle.is_tracking = True
tracking_particle.name = alias_name
# Find a unique PDG ID for the new tracking particle
# print child_pdg[0], int(copysign(1000000, child_pdg[0]))
unique_child_pdg = (child_pdg[0] +
int(copysign(1000000, child_pdg[0])),
tracking_particle.helicity)
for i in range(100):
if unique_child_pdg not in list(self.pdg2pref):
break
info(
20, '{0}: trying to find unique_pdg ({1}) for {2}'.format(
i, tracking_particle.name, unique_child_pdg))
unique_child_pdg = (unique_child_pdg[0] +
int(copysign(10000, child_pdg[0])),
tracking_particle.helicity)
tracking_particle.unique_pdg_id = unique_child_pdg
# Track if attempt to add the tracking particle succeeded at least once
track_success = False
# Include antiparticle
for parent_pdg in list(
set(parent_list + [(-p, h) for (p, h) in parent_list])):
if parent_pdg not in self.pdg2pref:
info(15,
'Parent particle {0} does not exist.'.format(parent_pdg))
continue
if (parent_pdg, child_pdg, alias_name,
from_interactions) in self.tracking_relations:
info(
20, 'Tracking of {0} from {1} already activated.'.format(
tracking_particle.name,
self.pdg2pref[parent_pdg].name))
continue
if not from_interactions:
track_method = self.pdg2pref[parent_pdg].track_decays
else:
track_method = self.pdg2pref[parent_pdg].track_interactions
# Check if the tracking is successful. If not the particle is not
# a child of the parent particle
if track_method(tracking_particle):
info(
15, 'Parent particle {0} tracking scheduled.'.format(
parent_pdg))
self.tracking_relations.append(
(parent_pdg, child_pdg, alias_name, from_interactions))
track_success = True
if track_success and tracking_particle.name not in list(
self.pname2pref):
tracking_particle.mceqidx = np.max(list(self.mceqidx2pref)) + 1
self.all_particles.append(tracking_particle)
self.cascade_particles.append(tracking_particle)
self._update_particle_tables()
info(
10, 'tracking particle {0} successfully added.'.format(
tracking_particle.name))
def track_leptons_from(self,
parent_pdg_list,
prefix,
exclude_em=True,
from_interactions=False,
use_helicities=False):
"""Adds tracking particles for all leptons coming from decays of parents
in `parent_pdg_list`.
"""
leptons = [
p for p in self.all_particles if p.is_lepton
and not (p.is_em == exclude_em) and not p.is_tracking
]
for lepton in leptons:
if not use_helicities and lepton.pdg_id[1] != 0:
continue
self.add_tracking_particle(parent_pdg_list, lepton.pdg_id,
prefix + lepton.name, from_interactions)
def _init_categories(self, particle_pdg_list):
"""Determines the list of particles for calculation and
returns lists of instances of :class:`data.MCEqParticle` .
The particles which enter this list are those, which have a
defined index in the SIBYLL 2.3 interaction model. Included are
most relevant baryons and mesons and some of their high mass states.
More details about the particles which enter the calculation can
be found in :mod:`particletools`.
Returns:
(tuple of lists of :class:`data.MCEqParticle`): (all particles,
cascade particles, resonances)
"""
from MCEq.particlemanager import MCEqParticle
info(5, "Generating particle list.")
if particle_pdg_list is not None:
particles = particle_pdg_list
else:
from particletools.tables import SibyllParticleTable
modtab = SibyllParticleTable()
particles = modtab.baryons + modtab.mesons + modtab.leptons
# Remove duplicates
particles = sorted(list(set(particles)))
# Initialize particle objects
particle_list = [
MCEqParticle(pdg, hel, self._energy_grid, self._cs_db)
for pdg, hel in particles
]
# Sort by critical energy (= int_len ~== dec_length ~ int_cs/tau)
particle_list.sort(key=lambda x: x.E_crit, reverse=False)
# Cascade particles will "live" on the grid and have an mceqidx assigned
self.cascade_particles = [
p for p in particle_list if not p.is_resonance
]
self.cascade_particles = sorted(self.cascade_particles,
key=lambda p: abs(p.pdg_id[0]))
# These particles will only exist implicitely and integated out
self.resonances = [p for p in particle_list if p.is_resonance]
# Assign an mceqidx (position in state vector) to each explicit particle
# Resonances will kepp the default mceqidx = -1
for mceqidx, h in enumerate(self.cascade_particles):
h.mceqidx = mceqidx
self.all_particles = self.cascade_particles + self.resonances
self._update_particle_tables()
def add_new_particle(self, new_mceq_particle):
if new_mceq_particle in self.all_particles:
info(0, 'Particle {0}/{1} has already been added. Use it.'.format(
new_mceq_particle.name, new_mceq_particle.pdg_id
))
return
if not new_mceq_particle.is_resonance:
info(2, 'New particle {0}/{1} is not a resonance.'.format(
new_mceq_particle.name, new_mceq_particle.pdg_id
))
new_mceq_particle.mceqidx = len(self.cascade_particles)
self.cascade_particles.append(new_mceq_particle)
else:
info(2, 'New particle {0}/{1} is a resonance.'.format(
new_mceq_particle.name, new_mceq_particle.pdg_id
))
self.resonances.append(new_mceq_particle)
self.all_particles = self.cascade_particles + self.resonances
self._update_particle_tables()
def _update_particle_tables(self):
"""Update internal mapping tables after changes to the particle
list occur."""
self.n_cparticles = len(self.cascade_particles)
self.dim = self._energy_grid.d
self.dim_states = self._energy_grid.d * self.n_cparticles
# Clean all dictionaries
[
d.clear() for d in [
self.pdg2mceqidx, self.pname2mceqidx, self.mceqidx2pdg,
self.mceqidx2pname, self.mceqidx2pref, self.pdg2pref,
self.pname2pref
]
]
for p in self.all_particles:
self.pdg2mceqidx[p.unique_pdg_id] = p.mceqidx
self.pname2mceqidx[p.name] = p.mceqidx
self.mceqidx2pdg[p.mceqidx] = p.unique_pdg_id
self.mceqidx2pname[p.mceqidx] = p.name
self.mceqidx2pref[p.mceqidx] = p
self.pdg2pref[p.unique_pdg_id] = p
self.pname2pref[p.name] = p
self.print_particle_tables(20)
def _restore_tracking_setup(self):
"""Restores the setup of tracking particles after model changes."""
info(10, 'Restoring tracking particle setup')
if not self.tracking_relations and config.enable_default_tracking:
self._init_default_tracking()
return
# Clear tracking_relations for this initialization
self.tracking_relations = []
for pid, cid, alias, int_dec in self._tracking_requested:
if pid not in self.pdg2pref:
info(15, 'Can not restore {0}, since not in particle list.')
continue
self.add_tracking_particle([pid], cid, alias, int_dec)
def _init_default_tracking(self):
"""Add default tracking particles for leptons from pi, K, and mu"""
# Init default tracking particles
info(1, 'Initializing default tracking categories (pi, K, mu)')
self._tracking_requested_by_default = []
for parents, prefix, with_helicity in [([(211, 0)], 'pi_', True),
([(321, 0)], 'k_', True),
([(13, -1),
(13, 1)], 'mulr_', False),
([(13, 0)], 'mu_h0_', False),
([(13, -1), (13, 0),
(13, 1)], 'mu_', False),
([(310, 0),
(130, 0)], 'K0_', False)]:
self.track_leptons_from(parents,
prefix,
exclude_em=True,
use_helicities=with_helicity)
# Track prompt leptons
self.track_leptons_from([
p.pdg_id for p in self.all_particles if p.ctau < config.prompt_ctau
],
'prcas_',
exclude_em=True,
use_helicities=False)
# Track leptons from interaction vertices (also prompt)
self.track_leptons_from(
[p.pdg_id for p in self.all_particles if p.is_projectile],
'prres_',
exclude_em=True,
from_interactions=True,
use_helicities=False)
self.track_leptons_from(
[p.pdg_id for p in self.all_particles if p.is_em],
'em_',
exclude_em=True,
from_interactions=True,
use_helicities=False)
def __contains__(self, pdg_id_or_name):
"""Defines the `in` operator to look for particles"""
if isinstance(pdg_id_or_name, six.integer_types):
pdg_id_or_name = (pdg_id_or_name, 0)
elif isinstance(pdg_id_or_name, six.string_types):
pdg_id_or_name = (_pdata.pdg_id(pdg_id_or_name), 0)
return pdg_id_or_name in list(self.pdg2pref)
def __getitem__(self, pdg_id_or_name):
"""Returns reference to particle object."""
if isinstance(pdg_id_or_name, tuple):
return self.pdg2pref[pdg_id_or_name]
elif isinstance(pdg_id_or_name, six.integer_types):
return self.pdg2pref[(pdg_id_or_name, 0)]
else:
return self.pdg2pref[(_pdata.pdg_id(pdg_id_or_name), 0)]
def keys(self):
"""Returns pdg_ids of all particles"""
return [p.pdg_id for p in self.all_particles]
def __repr__(self):
str_out = ""
ident = 3 * ' '
for s in self.all_particles:
str_out += s.name + '\n' + ident
str_out += 'PDG id : ' + str(s.pdg_id) + '\n' + ident
str_out += 'MCEq idx : ' + str(s.mceqidx) + '\n\n'
return str_out
def print_particle_tables(self, min_dbg_lev=2):
info(min_dbg_lev, "Hadrons and stable particles:", no_caller=True)
print_in_rows(min_dbg_lev, [
p.name for p in self.all_particles
if p.is_hadron and not p.is_resonance and not p.is_mixed
])
info(min_dbg_lev, "\nMixed:", no_caller=True)
print_in_rows(min_dbg_lev,
[p.name for p in self.all_particles if p.is_mixed])
info(min_dbg_lev, "\nResonances:", no_caller=True)
print_in_rows(min_dbg_lev,
[p.name for p in self.all_particles if p.is_resonance])
info(min_dbg_lev, "\nLeptons:", no_caller=True)
print_in_rows(min_dbg_lev, [
p.name
for p in self.all_particles if p.is_lepton and not p.is_tracking
])
info(min_dbg_lev, "\nTracking:", no_caller=True)
print_in_rows(min_dbg_lev,
[p.name for p in self.all_particles if p.is_tracking])
info(min_dbg_lev,
"\nTotal number of species:",
self.n_cparticles,
no_caller=True)
# list particle indices
if False:
info(10, "Particle matrix indices:", no_caller=True)
some_index = 0
for p in self.cascade_particles:
for i in range(self._energy_grid.d):
info(10, p.name + '_' + str(i), some_index, no_caller=True)
some_index += 1
| 45,993 | 38.244027 | 91 | py |
MCEq | MCEq-master/MCEq/misc.py |
from __future__ import print_function
from collections import namedtuple
import numpy as np
import mceq_config as config
#: Energy grid (centers, bind widths, dimension)
energy_grid = namedtuple("energy_grid", ("c", "b", "w", "d"))
#: Matrix with x_lab=E_child/E_parent values
_xmat = None
def normalize_hadronic_model_name(name):
import re
"""Converts hadronic model name into standard form"""
return re.sub('[-.]', '', name).upper()
def theta_deg(cos_theta):
"""Converts :math:`\\cos{\\theta}` to :math:`\\theta` in degrees.
"""
return np.rad2deg(np.arccos(cos_theta))
def theta_rad(theta):
"""Converts :math:`\\theta` from rad to degrees.
"""
return np.deg2rad(theta)
def gen_xmat(energy_grid):
"""Generates x_lab matrix for a given energy grid"""
global _xmat
dims = (energy_grid.d, energy_grid.d)
if _xmat is None or _xmat.shape != dims:
_xmat = np.zeros(dims)
for eidx in range(energy_grid.d):
xvec = energy_grid.c[:eidx + 1] / energy_grid.c[eidx]
_xmat[:eidx + 1, eidx] = xvec
return _xmat
def print_in_rows(min_dbg_level, str_list, n_cols=5):
"""Prints contents of a list in rows `n_cols`
entries per row.
"""
if min_dbg_level > config.debug_level:
return
ls = len(str_list)
n_full_length = int(ls / n_cols)
n_rest = ls % n_cols
print_str = '\n'
for i in range(n_full_length):
print_str += ('"{:}", ' * n_cols).format(*str_list[i * n_cols:(i + 1) *
n_cols]) + '\n'
print_str += ('"{:}", ' * n_rest).format(*str_list[-n_rest:])
print(print_str.strip()[:-1])
def is_charm_pdgid(pdgid):
"""Returns True if particle ID belongs to a heavy (charm) hadron."""
return ((abs(pdgid) > 400 and abs(pdgid) < 500)
or (abs(pdgid) > 4000 and abs(pdgid) < 5000))
def _get_closest(value, in_list):
"""Returns the closes value to 'value' from given list."""
minindex = np.argmin(np.abs(in_list - value * np.ones(len(in_list))))
return minindex, in_list[minindex]
def getAZN(pdg_id):
"""Returns mass number :math:`A`, charge :math:`Z` and neutron
number :math:`N` of ``pdg_id``.
Note::
PDG ID for nuclei is coded according to 10LZZZAAAI.
For iron-52 it is 1000260520.
Args:
pdgid (int): PDG ID of nucleus/mass group
Returns:
(int,int,int): (Z,A) tuple
"""
Z, A = 1, 1
if pdg_id < 2000:
return 0, 0, 0
elif pdg_id == 2112:
return 1, 0, 1
elif pdg_id == 2212:
return 1, 1, 0
elif pdg_id > 1000000000:
A = pdg_id % 1000 / 10
Z = pdg_id % 1000000 / 10000
return A, Z, A - Z
else:
return 1, 0, 0
def getAZN_corsika(corsikaid):
"""Returns mass number :math:`A`, charge :math:`Z` and neutron
number :math:`N` of ``corsikaid``.
Args:
corsikaid (int): corsika id of nucleus/mass group
Returns:
(int,int,int): (Z,A) tuple
"""
Z, A = 1, 1
if corsikaid == 14:
return getAZN(2212)
if corsikaid >= 100:
Z = corsikaid % 100
A = (corsikaid - Z) / 100
else:
Z, A = 0, 0
return A, Z, A - Z
def corsikaid2pdg(corsika_id):
"""Conversion of CORSIKA nuclear code to PDG nuclear code"""
if corsika_id in [101, 14]:
return 2212
elif corsika_id in [100, 13]:
return 2112
else:
A, Z, _ = getAZN_corsika(corsika_id)
# 10LZZZAAAI
pdg_id = 1000000000
pdg_id += 10 * A
pdg_id += 10000 * Z
return pdg_id
def pdg2corsikaid(pdg_id):
"""Conversion from nuclear PDG ID to CORSIKA ID.
Note::
PDG ID for nuclei is coded according to 10LZZZAAAI.
For iron-52 it is 1000260520.
"""
if pdg_id == 2212:
return 14
A = pdg_id % 1000 / 10
Z = pdg_id % 1000000 / 10000
return A * 100 + Z
def caller_name(skip=2):
"""Get a name of a caller in the format module.class.method
`skip` specifies how many levels of stack to skip while getting caller
name. skip=1 means "who calls me", skip=2 "who calls my caller" etc.
An empty string is returned if skipped levels exceed stack height.abs
From https://gist.github.com/techtonik/2151727
"""
import inspect
stack = inspect.stack()
start = 0 + skip
if len(stack) < start + 1:
return ''
parentframe = stack[start][0]
name = []
if config.print_module:
module = inspect.getmodule(parentframe)
# `modname` can be None when frame is executed directly in console
if module:
name.append(module.__name__ + '.')
# detect classname
if 'self' in parentframe.f_locals:
# I don't know any way to detect call from the object method
# there seems to be no way to detect static method call - it will
# be just a function call
name.append(parentframe.f_locals['self'].__class__.__name__ + '::')
codename = parentframe.f_code.co_name
if codename != '<module>': # top level usually
name.append(codename + '(): ') # function or a method
else:
name.append(': ') # If called from module scope
del parentframe
return "".join(name)
def info(min_dbg_level, *message, **kwargs):
"""Print to console if `min_debug_level <= config.debug_level`
The fuction determines automatically the name of caller and appends
the message to it. Message can be a tuple of strings or objects
which can be converted to string using `str()`.
Args:
min_dbg_level (int): Minimum debug level in config for printing
message (tuple): Any argument or list of arguments that casts to str
condition (bool): Print only if condition is True
blank_caller (bool): blank the caller name (for multiline output)
no_caller (bool): don't print the name of the caller
Authors:
Anatoli Fedynitch (DESY)
Jonas Heinze (DESY)
"""
condition = kwargs.pop('condition', True)
blank_caller = kwargs.pop('blank_caller', False)
no_caller = kwargs.pop('no_caller', False)
if config.override_debug_fcn and min_dbg_level < config.override_max_level:
fcn_name = caller_name(skip=2).split('::')[-1].split('():')[0]
if fcn_name in config.override_debug_fcn:
min_dbg_level = 0
if condition and min_dbg_level <= config.debug_level:
message = [str(m) for m in message]
cname = caller_name() if not no_caller else ''
if blank_caller:
cname = len(cname) * ' '
print(cname + " ".join(message))
| 6,702 | 27.402542 | 79 | py |
MCEq | MCEq-master/MCEq/data.py |
import six
import numpy as np
import h5py
from collections import defaultdict
import mceq_config as config
from os.path import join, isfile
from .misc import normalize_hadronic_model_name, info
# TODO: Convert this to some functional generic class. Very erro prone to
# enter stuff by hand
equivalences = {
'SIBYLL23': {
-4132: 4122,
-4122: 4122,
-3334: -3312,
-3322: -2112,
-3212: -3122,
-413: -411,
113: 211,
221: 211,
111: 211,
310: 130,
413: 411,
3212: 3122,
3334: 3312
},
'SIBYLL21': {
-3322: 2112,
-3312: 2212,
-3222: 2212,
-3212: 2112,
-3122: 2112,
-3112: 2212,
-2212: 2212,
-2112: 2112,
310: 130,
111: 211,
3112: 2212,
3122: 2112,
3212: 2112,
3222: 2212,
3312: 2212,
3322: 2112
},
'QGSJET01': {
-4122: 2212,
-3322: 2212,
-3312: 2212,
-3222: 2212,
-3122: 2212,
-3112: 2212,
-2212: 2212,
-2112: 2212,
-421: 321,
-411: 321,
-211: 211,
-321: 321,
111: 211,
221: 211,
130: 321,
310: 321,
411: 321,
421: 321,
2112: 2212,
3112: 2212,
3122: 2212,
3222: 2212,
3312: 2212,
3322: 2212,
4122: 2212
},
'QGSJETII': {
-3122: -2112,
111: 211,
113: 211,
221: 211,
310: 130,
3122: 2112,
},
'DPMJET': {
-4122: -3222,
-3334: -3312,
-3212: -3122,
-431: -321,
-421: -321,
-413: -321,
-411: -321,
310: 130,
113: 211,
221: 211,
111: 211,
411: 321,
413: 321,
421: 321,
431: 321,
3212: 3122,
3334: 3312,
4122: 3222,
},
'EPOSLHC': {
-3334: 2212,
-3322: -3122,
-3312: 2212,
-3222: -2212,
-3212: -3122,
-3112: 2212,
111: 211,
113: 211,
221: 211,
310: 130,
3112: -2212,
3212: 3122,
3222: 2212,
3312: -2212,
3322: 3122,
3334: -2212
},
'PYTHIA8': {
-3122: -2112,
-431: -321,
-421: -321,
-413: -321,
-411: -321,
111: 211,
113: 211,
221: 211,
310: 321,
130: 321,
411: 321,
413: 321,
421: 321,
431: 321,
3122: 2112,
}
}
class HDF5Backend(object):
"""Provides access to tabulated data stored in an HDF5 file.
The file contains all necessary ingredients to run MCEq, i.e. no
other files are required. This database is not maintained in git
and it will change infrequently.
"""
def __init__(self):
info(2, 'Opening HDF5 file', config.mceq_db_fname)
self.had_fname = join(config.data_dir, config.mceq_db_fname)
if not isfile(self.had_fname):
raise Exception(
'MCEq DB file {0} not found in "data" directory.'.format(
config.mceq_db_fname))
self.em_fname = join(config.data_dir, config.em_db_fname)
if config.enable_em and not isfile(self.had_fname):
raise Exception(
'Electromagnetic DB file {0} not found in "data" directory.'.
format(config.em_db_fname))
with h5py.File(self.had_fname, 'r') as mceq_db:
from MCEq.misc import energy_grid
ca = mceq_db['common'].attrs
self.version = (mceq_db.attrs['version']
if 'version' in mceq_db.attrs else '1.0.0')
self.min_idx, self.max_idx, self._cuts = self._eval_energy_cuts(
ca['e_grid'])
self._energy_grid = energy_grid(
ca['e_grid'][self._cuts],
ca['e_bins'][self.min_idx:self.max_idx + 1],
ca['widths'][self._cuts], self.max_idx - self.min_idx)
self.dim_full = ca['e_dim']
@property
def energy_grid(self):
return self._energy_grid
def _eval_energy_cuts(self, e_centers):
min_idx, max_idx = 0, len(e_centers)
slice0, slice1 = None, None
if config.e_min is not None:
min_idx = slice0 = np.argmin(np.abs(e_centers - config.e_min))
if config.e_max is not None:
max_idx = slice1 = np.argmin(
np.abs(e_centers - config.e_max)) + 1
return min_idx, max_idx, slice(slice0, slice1)
def _gen_db_dictionary(self, hdf_root, indptrs, equivalences={}):
from scipy.sparse import csr_matrix
index_d = {}
relations = defaultdict(lambda: [])
particle_list = []
if 'description' in hdf_root.attrs:
description = hdf_root.attrs['description']
else:
description = None
mat_data = hdf_root[:, :]
indptr_data = indptrs[:]
len_data = hdf_root.attrs['len_data']
if hdf_root.attrs['tuple_idcs'].shape[1] == 4:
model_particles = sorted(
list(
set(hdf_root.attrs['tuple_idcs'][:,
(0,
2)].flatten().tolist())))
else:
model_particles = sorted(
list(set(hdf_root.attrs['tuple_idcs'].flatten().tolist())))
exclude = config.adv_set["disabled_particles"]
read_idx = 0
available_parents = [
(pdg, parity)
for (pdg, parity) in (hdf_root.attrs['tuple_idcs'][:, :2])
]
available_parents = sorted(list(set(available_parents)))
# Reverse equivalences
eqv_lookup = defaultdict(lambda: [])
for k in equivalences:
eqv_lookup[(equivalences[k], 0)].append((k, 0))
for tupidx, tup in enumerate(hdf_root.attrs['tuple_idcs']):
if len(tup) == 4:
parent_pdg, child_pdg = tuple(tup[:2]), tuple(tup[2:])
elif len(tup) == 2:
parent_pdg, child_pdg = (tup[0], 0), (tup[1], 0)
else:
raise Exception('Failed decoding parent-child relation.')
if (abs(parent_pdg[0]) in exclude) or (abs(
child_pdg[0]) in exclude):
read_idx += len_data[tupidx]
continue
parent_pdg = int(parent_pdg[0]), (parent_pdg[1])
child_pdg = int(child_pdg[0]), (child_pdg[1])
particle_list.append(parent_pdg)
particle_list.append(child_pdg)
index_d[(parent_pdg, child_pdg)] = (csr_matrix(
(mat_data[0, read_idx:read_idx + len_data[tupidx]],
mat_data[1, read_idx:read_idx + len_data[tupidx]],
indptr_data[tupidx, :]),
shape=(self.dim_full, self.dim_full
))[self._cuts, self.min_idx:self.max_idx]).toarray()
relations[parent_pdg].append(child_pdg)
info(20,
'This parent {0} is used for interactions of'.format(
parent_pdg[0]), [p[0] for p in eqv_lookup[parent_pdg]],
condition=len(equivalences) > 0)
if config.assume_nucleon_interactions_for_exotics:
for eqv_parent in eqv_lookup[parent_pdg]:
if eqv_parent[0] not in model_particles:
info(10, 'No equiv. replacement needed of', eqv_parent, 'for',
parent_pdg, 'parent.')
continue
elif eqv_parent in available_parents:
info(
10, 'Parent {0} has dedicated simulation.'.format(
eqv_parent[0]))
continue
particle_list.append(eqv_parent)
index_d[(eqv_parent, child_pdg)] = index_d[(parent_pdg,
child_pdg)]
relations[eqv_parent] = relations[parent_pdg]
info(
15, 'equivalence of {0} and {1} interactions'.format(
eqv_parent[0], parent_pdg[0]))
read_idx += len_data[tupidx]
return {
'parents': sorted(list(relations)),
'particles': sorted(list(set(particle_list))),
'relations': dict(relations),
'index_d': dict(index_d),
'description': description
}
def _check_subgroup_exists(self, subgroup, mname):
available_models = list(subgroup)
if mname not in available_models:
info(0, 'Invalid choice/model', mname)
info(0, 'Choose from:\n', '\n'.join(available_models))
raise Exception('Unknown selections.')
def interaction_db(self, interaction_model_name):
mname = normalize_hadronic_model_name(interaction_model_name)
info(10, 'Generating interaction db. mname={0}'.format(mname))
with h5py.File(self.had_fname, 'r') as mceq_db:
self._check_subgroup_exists(mceq_db['hadronic_interactions'],
mname)
if 'SIBYLL21' in mname:
eqv = equivalences['SIBYLL21']
elif 'SIBYLL23' in mname:
eqv = equivalences['SIBYLL23']
elif 'QGSJET01' in mname:
eqv = equivalences['QGSJET01']
elif 'QGSJETII' in mname:
eqv = equivalences['QGSJETII']
elif 'DPMJET' in mname:
eqv = equivalences['DPMJET']
elif 'EPOSLHC' in mname:
eqv = equivalences['EPOSLHC']
elif 'PYTHIA8' in mname:
eqv = equivalences['PYTHIA8']
int_index = self._gen_db_dictionary(
mceq_db['hadronic_interactions'][mname],
mceq_db['hadronic_interactions'][mname + '_indptrs'],
equivalences=eqv)
# Append electromagnetic interaction matrices from EmCA
if config.enable_em:
with h5py.File(self.em_fname, 'r') as em_db:
info(2, 'Injecting EmCA matrices into interaction_db.')
self._check_subgroup_exists(em_db, 'electromagnetic')
em_index = self._gen_db_dictionary(
em_db['electromagnetic']['emca_mats'],
em_db['electromagnetic']['emca_mats' + '_indptrs'])
int_index['parents'] = sorted(int_index['parents'] +
em_index['parents'])
int_index['particles'] = sorted(
list(set(int_index['particles'] + em_index['particles'])))
int_index['relations'].update(em_index['relations'])
int_index['index_d'].update(em_index['index_d'])
if int_index['description'] is not None:
int_index['description'] += '\nInteraction model name: ' + mname
else:
int_index['description'] = 'Interaction model name: ' + mname
return int_index
def decay_db(self, decay_dset_name):
info(10, 'Generating decay db. dset_name={0}'.format(decay_dset_name))
with h5py.File(self.had_fname, 'r') as mceq_db:
self._check_subgroup_exists(mceq_db['decays'], decay_dset_name)
dec_index = self._gen_db_dictionary(
mceq_db['decays'][decay_dset_name],
mceq_db['decays'][decay_dset_name + '_indptrs'])
if config.muon_helicity_dependence:
info(2, 'Using helicity dependent decays.')
custom_index = self._gen_db_dictionary(
mceq_db['decays']['custom_decays'],
mceq_db['decays']['custom_decays' + '_indptrs'])
info(5, 'Replacing decay from custom decay_db.')
dec_index['index_d'].update(custom_index['index_d'])
# Remove manually TODO: Kaon decays to muons assumed
# only two-body
_ = dec_index['index_d'].pop(((211, 0), (-13, 0)))
_ = dec_index['index_d'].pop(((-211, 0), (13, 0)))
_ = dec_index['index_d'].pop(((321, 0), (-13, 0)))
_ = dec_index['index_d'].pop(((-321, 0), (13, 0)))
# _ = dec_index['index_d'].pop(((211,0),(14,0)))
# _ = dec_index['index_d'].pop(((-211,0),(-14,0)))
# _ = dec_index['index_d'].pop(((321,0),(14,0)))
# _ = dec_index['index_d'].pop(((-321,0),(-14,0)))
dec_index['relations'] = defaultdict(lambda: [])
dec_index['particles'] = []
for idx_tup in dec_index['index_d']:
parent, child = idx_tup
dec_index['relations'][parent].append(child)
dec_index['particles'].append(parent)
dec_index['particles'].append(child)
dec_index['parents'] = sorted(list(dec_index['relations']))
dec_index['particles'] = sorted(
list(set(dec_index['particles'])))
return dec_index
def cs_db(self, interaction_model_name):
mname = normalize_hadronic_model_name(interaction_model_name)
with h5py.File(self.had_fname, 'r') as mceq_db:
self._check_subgroup_exists(mceq_db['cross_sections'], mname)
cs_db = mceq_db['cross_sections'][mname]
cs_data = cs_db[:]
index_d = {}
parents = list(cs_db.attrs['projectiles'])
for ip, p in enumerate(parents):
index_d[p] = cs_data[self._cuts, ip]
# Append electromagnetic interaction cross sections from EmCA
if config.enable_em:
with h5py.File(self.em_fname, 'r') as em_db:
info(2, 'Injecting EmCA matrices into interaction_db.')
self._check_subgroup_exists(em_db, 'electromagnetic')
em_cs = em_db["electromagnetic"]['cs'][:]
em_parents = list(
em_db["electromagnetic"]['cs'].attrs['projectiles'])
for ip, p in enumerate(em_parents):
if p in index_d:
raise Exception(
'EM cross sections already in database?')
index_d[p] = em_cs[ip, self._cuts]
parents += em_parents
return {'parents': parents, 'index_d': index_d}
def continuous_loss_db(self, medium='air'):
with h5py.File(self.had_fname, 'r') as mceq_db:
self._check_subgroup_exists(mceq_db['continuous_losses'], medium)
cl_db = mceq_db['continuous_losses'][medium]
index_d = {}
for pstr in list(cl_db):
for hel in [0, 1, -1]:
index_d[(int(pstr), hel)] = cl_db[pstr][self._cuts]
if config.enable_em:
with h5py.File(self.em_fname, 'r') as em_db:
info(2, 'Injecting EmCA matrices into interaction_db.')
self._check_subgroup_exists(em_db, 'electromagnetic')
for hel in [0, 1, -1]:
index_d[(11,
hel)] = em_db["electromagnetic"]['dEdX 11'][
self._cuts]
index_d[(-11,
hel)] = em_db["electromagnetic"]['dEdX -11'][
self._cuts]
return {'parents': sorted(list(index_d)), 'index_d': index_d}
class Interactions(object):
"""Class for managing the dictionary of interaction yield matrices.
Args:
mceq_hdf_db (object): instance of :class:`MCEq.data.HDF5Backend`
"""
def __init__(self, mceq_hdf_db):
from collections import defaultdict
#: MCEq HDF5Backend reference
self.mceq_db = mceq_hdf_db
#: reference to energy grid
self.energy_grid = mceq_hdf_db.energy_grid
#: List of active parents
self.parents = None
#: List of all known particles
self.particles = None
#: Dictionary parent/child relations
self.relations = None
#: Dictionary containing the distribuiton matrices
self.index_d = None
#: String containing the desciption of the model
self.description = None
#: (str) Interaction Model name
self.iam = None
# #: (tuple) selection of a band of coeffictients (in xf)
# self.band = None
#: (tuple) modified particle combination for error prop.
self.mod_pprod = defaultdict(lambda: {})
def load(self, interaction_model, parent_list=None):
from MCEq.misc import is_charm_pdgid
self.iam = normalize_hadronic_model_name(interaction_model)
# Load tables and index from file
index = self.mceq_db.interaction_db(self.iam)
disabled_particles = config.adv_set['disabled_particles']
self.parents = [p for p in index['parents']
if p[0] not in disabled_particles]
self.relations = index['relations']
self.index_d = index['index_d']
self.description = index['description']
# Advanced options
if parent_list is not None:
self.parents = [p for p in self.parents if p in parent_list and p[0]
not in disabled_particles]
if (config.adv_set['disable_charm_pprod']):
self.parents = [
p for p in self.parents if not is_charm_pdgid(p[0])
]
if (config.adv_set['disable_interactions_of_unstable']):
self.parents = [
p for p in self.parents
if p[0] not in [2212, 2112, -2212, -2112]
]
if (config.adv_set['allowed_projectiles']):
self.parents = [
p for p in self.parents
if p[0] in config.adv_set['allowed_projectiles']
]
self.particles = []
for p in list(self.relations):
if p not in self.parents:
_ = self.relations.pop(p, None)
continue
self.particles.append(p)
self.particles += [d for d in self.relations[p]
if d not in disabled_particles]
self.particles = sorted(list(set(self.particles)))
if config.adv_set['disable_direct_leptons']:
for p in list(self.relations):
self.relations[p] = [
c for c in self.relations[p] if not 10 < abs(c[0]) < 20
]
if len(disabled_particles) > 0:
for p in list(self.relations):
self.relations[p] = [
c for c in self.relations[p] if c[0] not in
disabled_particles
]
if not self.particles:
info(2, 'None of the parent_list particles interact. Returning custom list.')
self.particles = parent_list
def __getitem__(self, key):
return self.get_matrix(*key)
def __contains__(self, key):
"""Defines the `in` operator to look for particles"""
return key in self.parents
def _gen_mod_matrix(self, x_func, *args):
"""Creates modification matrix using an (x,E)-dependent function.
:math:`x = \\frac{E_{\\rm primary}}{E_{\\rm secondary}}` is the
fraction of secondary particle energy. ``x_func`` can be an
arbitrary function modifying the :math:`x_\\text{lab}` distribution.
Run this method each time you change ``x_func``, or its parameters,
not each time you change modified particle.
The ``args`` are passed to the function.
Args:
x_func (object): reference to function
args (tuple): arguments of `x_func`
Returns:
(numpy.array): modification matrix
"""
from MCEq.misc import gen_xmat
info(2, 'Generating modification matrix for', x_func.__name__, args)
xmat = gen_xmat(self.energy_grid)
# select the relevant slice of interaction matrix
modmat = x_func(xmat, self.energy_grid.c, *args)
# Set lower triangular indices to 0. (should be not necessary)
modmat[np.tril_indices(self.energy_grid.d, -1)] = 0.
return modmat
def _set_mod_pprod(self, prim_pdg, sec_pdg, x_func, args):
"""Sets combination of parent/secondary for error propagation.
The production spectrum of ``sec_pdg`` in interactions of
``prim_pdg`` is modified according to the function passed to
:func:`InteractionYields.init_mod_matrix`
Args:
prim_pdg (int): interacting (primary) particle PDG ID
sec_pdg (int): secondary particle PDG ID
"""
# Short cut for the pprod list
mpli = self.mod_pprod
pstup = (prim_pdg, sec_pdg)
if config.use_isospin_sym and prim_pdg not in [2212, 2112]:
raise Exception('Unsupported primary for isospin symmetries.')
if (x_func.__name__, args) in mpli[(pstup)]:
info(
5, ' no changes to particle production' +
' modification matrix of {0}/{1} for {2},{3}'.format(
prim_pdg, sec_pdg, x_func.__name__, args))
return False
# Check function with same mode but different parameter is supplied
for (xf_name, fargs) in list(mpli[pstup]):
if (xf_name == x_func.__name__) and (fargs[0] == args[0]):
info(1, 'Warning. If you modify only the value of a function,',
'unset and re-apply all changes')
return False
info(
2, 'modifying modify particle production' +
' matrix of {0}/{1} for {2},{3}'.format(prim_pdg, sec_pdg,
x_func.__name__, args))
kmat = self._gen_mod_matrix(x_func, *args)
mpli[pstup][(x_func.__name__, args)] = kmat
info(5, 'modification "strength"',
np.sum(kmat) / np.count_nonzero(kmat))
if not config.use_isospin_sym:
return True
prim_pdg, symm_pdg = 2212, 2112
if prim_pdg == 2112:
prim_pdg = 2112
symm_pdg = 2212
# p->pi+ = n-> pi-, p->pi- = n-> pi+
if abs(sec_pdg) == 211:
# Add the same mod to the isospin symmetric particle combination
mpli[(symm_pdg, -sec_pdg)][('isospin', args)] = kmat
# Assumption: Unflavored production coupled to the average
# of pi+ and pi- production
if np.any([p in self.parents for p in [221, 223, 333]]):
unflv_arg = None
if (prim_pdg, -sec_pdg) not in mpli:
# Only pi+ or pi- (not both) have been modified
unflv_arg = (args[0], 0.5 * args[1])
if (prim_pdg, -sec_pdg) in mpli:
# Compute average of pi+ and pi- modification matrices
# Save the 'average' argument (just for meaningful output)
for arg_name, arg_val in mpli[(prim_pdg, -sec_pdg)]:
if arg_name == args[0]:
unflv_arg = (args[0], 0.5 * (args[1] + arg_val))
unflmat = self._gen_mod_matrix(x_func, *unflv_arg)
# modify eta, omega, phi, 221, 223, 333
for t in [(prim_pdg, 221), (prim_pdg, 223), (prim_pdg, 333),
(symm_pdg, 221), (symm_pdg, 223), (symm_pdg, 333)]:
mpli[t][('isospin', unflv_arg)] = unflmat
# Charged and neutral kaons
elif abs(sec_pdg) == 321:
# approx.: p->K+ ~ n-> K+, p->K- ~ n-> K-
mpli[(symm_pdg, sec_pdg)][('isospin', args)] = kmat
k0_arg = (args[0], 0.5 * args[1])
if (prim_pdg, -sec_pdg) in mpli:
# Compute average of K+ and K- modification matrices
# Save the 'average' argument (just for meaningful printout)
for arg_name, arg_val in mpli[(prim_pdg, -sec_pdg)]:
if arg_name == args[0]:
k0_arg = (args[0], 0.5 * (args[1] + arg_val))
k0mat = self._gen_mod_matrix(x_func, *k0_arg)
# modify K0L/S
for t in [(prim_pdg, 310), (prim_pdg, 130), (symm_pdg, 310),
(symm_pdg, 130)]:
mpli[t][('isospin', k0_arg)] = k0mat
elif abs(sec_pdg) == 411:
ssec = np.sign(sec_pdg)
mpli[(prim_pdg, ssec * 421)][('isospin', args)] = kmat
mpli[(prim_pdg, ssec * 431)][('isospin', args)] = kmat
mpli[(symm_pdg, sec_pdg)][('isospin', args)] = kmat
mpli[(symm_pdg, ssec * 421)][('isospin', args)] = kmat
mpli[(symm_pdg, ssec * 431)][('isospin', args)] = kmat
# Leading particles
elif abs(sec_pdg) == prim_pdg:
mpli[(symm_pdg, symm_pdg)][('isospin', args)] = kmat
elif abs(sec_pdg) == symm_pdg:
mpli[(symm_pdg, prim_pdg)][('isospin', args)] = kmat
else:
raise Exception('No isospin relation found for secondary' +
str(sec_pdg))
# Tell MCEqRun to regenerate the matrices if something has changed
return True
def print_mod_pprod(self):
"""Prints the active particle production modification.
"""
for i, (prim_pdg, sec_pdg) in enumerate(sorted(self.mod_pprod)):
for j, (argname, argv) in enumerate(self.mod_pprod[(prim_pdg,
sec_pdg)]):
info(2,
'{0}: {1} -> {2}, func: {3}, arg: {4}'.format(
i + j, prim_pdg, sec_pdg, argname, argv),
no_caller=True)
def get_matrix(self, parent, child):
"""Returns a ``DIM x DIM`` yield matrix.
Args:
parent (int): PDG ID of parent particle
child (int): PDG ID of final state child/secondary particle
Returns:
numpy.array: yield matrix
"""
info(10, 'Called for', parent, child)
if child not in self.relations[parent]:
raise Exception(("trying to get empty matrix {0} -> {1}").format(
parent, child))
m = self.index_d[(parent, child)]
if config.adv_set['disable_leading_mesons'] and abs(child) < 2000 \
and (parent, -child) in list(self.index_d):
m_anti = self.index_d[(parent, -child)]
ie = 50
info(5, 'sum in disable_leading_mesons',
(np.sum(m[:, ie - 30:ie]) - np.sum(m_anti[:, ie - 30:ie])))
if (np.sum(m[:, ie - 30:ie]) - np.sum(m_anti[:, ie - 30:ie])) > 0:
info(5, 'inverting meson due to leading particle veto.', child,
'->', -child)
m = m_anti
else:
info(5, 'no inversion since child not leading', child)
else:
info(20, 'no meson inversion in leading particle veto.', parent,
child)
if (parent[0], child[0]) in list(self.mod_pprod):
info(
5, 'using modified particle production for {0}/{1}'.format(
parent[0], child[0]))
i = 0
m = np.copy(m)
for args, mmat in six.iteritems(self.mod_pprod[(parent[0], child[0])]):
info(10, i, (parent[0], child[0]), args, np.sum(mmat),
np.sum(m))
i += 1
m *= mmat
return m
class Decays(object):
"""Class for managing the dictionary of decay yield matrices.
Args:
mceq_hdf_db (object): instance of :class:`MCEq.data.HDF5Backend`
"""
def __init__(self, mceq_hdf_db, default_decay_dset='full_decays'):
#: MCEq HDF5Backend reference
self.mceq_db = mceq_hdf_db
#: (list) List of particles in the decay matrices
self.parent_list = []
self._default_decay_dset = default_decay_dset
def load(self, parent_list=None, decay_dset=None):
# Load tables and index from file
if decay_dset is None:
decay_dset = self._default_decay_dset
index = self.mceq_db.decay_db(decay_dset)
self.parents = index['parents']
self.particles = index['particles']
self.relations = index['relations']
self.index_d = index['index_d']
self.description = index['description']
# Advanced options
regenerate_index = False
if (parent_list):
# Take only the parents provided by the list
# Add the decay products, which can become new parents
def _follow_decay_chain(p, plist):
if p in self.relations:
plist.append(p)
for d in self.relations[p]:
_follow_decay_chain(d, plist)
else:
return plist
plist = []
for p in parent_list:
_follow_decay_chain(p, plist)
self.parents = sorted(list(set(plist)))
regenerate_index = True
if regenerate_index:
self.particles = []
for p in list(self.relations):
if p not in self.parents:
_ = self.relations.pop(p, None)
continue
self.particles.append(p)
self.particles += self.relations[p]
self.particles = sorted(list(set(self.particles)))
def __getitem__(self, key):
return self.get_matrix(*key)
def __contains__(self, key):
"""Defines the `in` operator to look for particles"""
return key in self.parents
def children(self, parent_pdg):
if parent_pdg not in self.relations:
raise Exception(
'Parent {0} not in decay database.'.format(parent_pdg))
return self.relations[parent_pdg]
def get_matrix(self, parent, child):
"""Returns a ``DIM x DIM`` decay matrix.
Args:
parent (int): PDG ID of parent particle
child (int): PDG ID of final state child particle
Returns:
numpy.array: decay matrix
"""
info(20, 'entering with', parent, child)
if child not in self.relations[parent]:
raise Exception(("trying to get empty matrix {0} -> {1}").format(
parent, child))
return self.index_d[(parent, child)]
class InteractionCrossSections(object):
"""Class for managing the dictionary of hadron-air cross-sections.
Args:
mceq_hdf_db (object): instance of :class:`MCEq.data.HDF5Backend`
interaction_model (str): name of the interaction model
"""
#: unit - :math:`\text{GeV} \cdot \text{fm}`
GeVfm = 0.19732696312541853
#: unit - :math:`\text{GeV} \cdot \text{cm}`
GeVcm = GeVfm * 1e-13
#: unit - :math:`\text{GeV}^2 \cdot \text{mbarn}`
GeV2mbarn = 10.0 * GeVfm**2
#: unit conversion - :math:`\text{mbarn} \to \text{cm}^2`
mbarn2cm2 = GeVcm**2 / GeV2mbarn
def __init__(self, mceq_hdf_db, interaction_model='SIBYLL2.3c'):
#: MCEq HDF5Backend reference
self.mceq_db = mceq_hdf_db
#: reference to energy grid
self.energy_grid = mceq_hdf_db.energy_grid
#: List of active parents
self.parents = None
#: Dictionary containing the distribuiton matrices
self.index_d = None
#: (str) Interaction Model name
self.iam = normalize_hadronic_model_name(interaction_model)
# Load defaults
self.load(interaction_model)
def __getitem__(self, parent):
"""Return the cross section in :math:`\\text{cm}^2` as a dictionary
lookup."""
return self.get_cs(parent)
def __contains__(self, key):
"""Defines the `in` operator to look for particles"""
return key in self.parents
def load(self, interaction_model):
#: (str) Interaction Model name
self.iam = normalize_hadronic_model_name(interaction_model)
# Load tables and index from file
index = self.mceq_db.cs_db(self.iam)
self.parents = index['parents']
self.index_d = index['index_d']
def get_cs(self, parent, mbarn=False):
"""Returns inelastic ``parent``-air cross-section
:math:`\\sigma_{inel}^{proj-Air}(E)` as vector spanned over
the energy grid.
Args:
parent (int): PDG ID of parent particle
mbarn (bool,optional): if ``True``, the units of the cross-section
will be :math:`mbarn`,
else :math:`\\text{cm}^2`
Returns:
numpy.array: cross-section in :math:`mbarn` or :math:`\\text{cm}^2`
"""
message_templ = 'replacing {0} with {1} cross section'
if isinstance(parent, tuple):
parent = parent[0]
if parent in list(self.index_d):
cs = self.index_d[parent]
elif abs(parent) in list(self.index_d):
cs = self.index_d[abs(parent)]
elif 100 < abs(parent) < 300 and abs(parent) != 130:
cs = self.index_d[211]
elif 300 < abs(parent) < 1000 or abs(parent) in [130, 10313, 10323]:
info(15, message_templ.format(parent, 'K+-'))
cs = self.index_d[321]
elif abs(parent) > 1000 and abs(parent) < 5000:
info(15, message_templ.format(parent, 'nucleon'))
cs = self.index_d[2212]
elif 5 < abs(parent) < 23:
info(15, 'returning 0 cross-section for lepton', parent)
return np.zeros(self.energy_grid.d)
else:
info(
1,
'Strange case for parent, using zero cross section.')
cs = 0.
if not mbarn:
return self.mbarn2cm2 * cs
else:
return cs
class ContinuousLosses(object):
"""Class for managing the dictionary of hadron-air cross-sections.
Args:
mceq_hdf_db (object): instance of :class:`MCEq.data.HDF5Backend`
material (str): name of the material (not fully implemented)
"""
def __init__(self, mceq_hdf_db, material=config.dedx_material):
#: MCEq HDF5Backend reference
self.mceq_db = mceq_hdf_db
#: reference to energy grid
self.energy_grid = mceq_hdf_db.energy_grid
#: List of active parents
self.parents = None
#: Dictionary containing the distribuiton matrices
self.index_d = None
# Load defaults
self.load_db(material)
def __getitem__(self, parent):
"""Return the cross section in :math:`\\text{cm}^2` as
a dictionary lookup."""
return self.index_d[parent]
def __contains__(self, key):
"""Defines the `in` operator to look for particles"""
return key in self.parents
def load_db(self, material):
# Load tables and index from file
index = self.mceq_db.continuous_loss_db(material)
self.parents = index['parents']
self.index_d = index['index_d']
| 35,586 | 35.954309 | 89 | py |
MCEq | MCEq-master/MCEq/charm_models.py | # -*- coding: utf-8 -*-
"""
:mod:`MCEq.charm_models` --- charmed particle production
========================================================
This module includes classes for custom charmed particle
production. Currently only the MRS model is implemented
as the class :class:`MRS_charm`. The abstract class
:class:`CharmModel` guides the implementation of custom
classes.
The :class:`Yields` instantiates derived classes of
:class:`CharmModel` and calls :func:`CharmModel.get_yield_matrix`
when overwriting a model yield file in
:func:`Yields.set_custom_charm_model`.
"""
import numpy as np
from MCEq.core import config
from MCEq.misc import info
from abc import ABCMeta, abstractmethod
from six import with_metaclass
class CharmModel(with_metaclass(ABCMeta)):
"""Abstract class, from which implemeted charm models can inherit.
Note:
Do not instantiate this class directly.
"""
@abstractmethod
def get_yield_matrix(self, proj, sec):
"""The implementation of this abstract method returns
the yield matrix spanned over the energy grid of the calculation.
Args:
proj (int): PDG ID of the interacting particle (projectile)
sec (int): PDG ID of the final state charmed meson (secondary)
Returns:
np.array: yield matrix
Raises:
NotImplementedError:
"""
raise NotImplementedError("CharmModel::get_yield_matrix(): " +
"Base class called.")
class MRS_charm(CharmModel):
"""Martin-Ryskin-Stasto charm model.
The model is described in A. D. Martin, M. G. Ryskin,
and A. M. Stasto, Acta Physica Polonica B 34, 3273 (2003).
The parameterization of the inclusive :math:`c\\bar{c}`
cross-section is given in the appendix of the paper.
This formula provides the behavior of the cross-section,
while fragmentation functions and certain scales are
needed to obtain meson and baryon fluxes as a function
of the kinematic variable :math:`x_F`. At high energies
and :math:`x_F > 0.05`, where this model is valid,
:math:`x_F \\approx x=E_c/E_{proj}`.
Here, these fragmentation functions are used:
- :math:`D`-mesons :math:`\\frac{4}{3} x`
- :math:`\\Lambda`-baryons :math:`\\frac{1}{1.47} x`
The production ratios between the different types of
:math:`D`-mesons are stored in the attribute :attr:`cs_scales`
and :attr:`D0_scale`, where :attr:`D0_scale` is the
:math:`c\\bar{c}` to :math:`D^0` ratio and :attr:`cs_scales`
stores the production ratios of :math:`D^\\pm/D^0`,
:math:`D_s/D^0` and :math:`\\Lambda_c/D^0`.
Since the model employs only perturbartive production of
charm, the charge conjugates are symmetric, i.e.
:math:`\\sigma_{D^+} = \\sigma_{D^-}` etc.
Args:
e_grid (np.array): energy grid as it is defined in
:class:`MCEqRun`.
csm (np.array): inelastic cross-sections as used in
:class:`MCEqRun`.
"""
#: fractions of cross-section wrt to D0 cross-section
cs_scales = {421: 1., 411: 0.5, 431: 0.15, 4122: 0.45}
#: D0 cross-section wrt to the ccbar cross-section
D0_scale = 1. / 2.1
#: hadron projectiles, which are allowed to produce charm
allowed_proj = [2212, -2212, 2112, -2112, 211, -211, 321, -321]
#: charm secondaries, which are predicted by this model
allowed_sec = [411, 421, 431, 4122]
def __init__(self, e_grid, csm):
# inverted fragmentation functions
self.lambda_c_frag = lambda xhad: 1 / 1.47 * xhad
self.d_frag = lambda xhad: 4. / 3. * xhad
self.e_grid = e_grid
self.d = e_grid.size
self.no_prod = np.zeros(self.d**2).reshape(self.d, self.d)
self.siginel = csm.get_cs(2212, mbarn=True)
def sigma_cc(self, E):
"""Returns the integrated ccbar cross-section in mb.
Note:
Integration is not going over complete phase-space due to
limitations of the parameterization.
"""
from scipy.integrate import quad
E = np.asarray(E)
if E.size > 1:
return 2 * np.array(
[quad(self.dsig_dx, 0.05, 0.6, args=Ei)[0] for Ei in E])
else:
return 2 * quad(self.dsig_dx, 0.05, 0.6, args=E)[0]
def dsig_dx(self, x, E):
"""Returns the Feynman-:math:`x_F` distribution
of :math:`\\sigma_{c\\bar{c}}` in mb
Args:
x (float or np.array): :math:`x_F`
E (float): center-of-mass energy in GeV
Returns:
float: :math:`\\sigma_{c\\bar{c}}` in mb
"""
x = np.asarray(x)
E = np.asarray(E)
beta = 0.05 - 0.016 * np.log(E / 10e4)
n, A = None, None
if E < 1e4:
return 0.
elif E >= 1e4 and E < 1e8:
n = 7.6 + 0.025 * np.log(E / 1e4)
A = 140 + (11. * np.log(E / 1e2))**1.65
elif E >= 1e8 and E <= 1e11:
n = 7.6 + 0.012 * np.log(E / 1e4)
A = 4100. + 245. * np.log(E / 1e8)
else:
raise Exception("MRS_charm()::out of range")
res = np.zeros_like(x)
ran = (x > 0.01) & (x < 0.7)
res[ran] = np.array(A * x[ran]**
(beta - 1.) * (1 - x[ran]**1.2)**n / 1e3)
return res
def D_dist(self, x, E, mes):
"""Returns the Feynman-:math:`x_F` distribution
of :math:`\\sigma_{D-mesons}` in mb
Args:
x (float or np.array): :math:`x_F`
E (float): center-of-mass energy in GeV
mes (int): PDG ID of D-meson: :math:`\\pm421, \\pm431, \\pm411`
Returns:
float: :math:`\\sigma_{D-mesons}` in mb
"""
xc = self.d_frag(x)
return self.dsig_dx(xc, E) * self.D0_scale * self.cs_scales[mes]
def LambdaC_dist(self, x, E):
"""Returns the Feynman-:math:`x_F` distribution
of :math:`\\sigma_{\\Lambda_C}` in mb
Args:
x (float or np.array): :math:`x_F`
E (float): center-of-mass energy in GeV
mes (int): PDG ID of D-meson: :math:`\\pm421, \\pm431, \\pm411`
Returns:
float: :math:`\\sigma_{D-mesons}` in mb
"""
xc = self.lambda_c_frag(x)
return self.dsig_dx(xc, E) * self.D0_scale * self.cs_scales[4122]
def get_yield_matrix(self, proj, sec):
"""Returns the yield matrix in proper format for :class:`MCEqRun`.
Args:
proj (int): projectile PDG ID :math:`\\pm` [2212, 211, 321]
sec (int): charmed particle PDG ID :math:`\\pm` [411, 421, 431, 4122]
Returns:
np.array: yield matrix if (proj,sec) combination allowed,
else zero matrix
"""
# TODO: Make this function a member of the base class!
if (proj not in self.allowed_proj) or (
abs(sec) not in self.allowed_sec):
return self.no_prod
self.xdist = None
if abs(sec) == 4122 and \
((np.sign(proj) != np.sign(sec)) or abs(proj) < 1000):
return self.no_prod
else:
self.xdist = lambda e: self.LambdaC_dist(self.e_grid / e, e) / e
if abs(sec) != 4122:
self.xdist = lambda e: self.D_dist(self.e_grid / e, e, abs(sec)) / e
m_out = np.zeros_like(self.no_prod)
# convert x distribution to E_sec distribution and distribute on the grid
for i, e in enumerate(self.e_grid):
m_out[:, i] = self.xdist(e) / self.siginel[i]
info(3, 'returning matrix for ({0},{1})'.format(proj, sec))
return m_out
def test(self):
"""Plots the meson, baryon and charm quark distribution as shown in
the plot below.
.. figure:: graphics/MRS_test.png
:scale: 50 %
:alt: output of test function
"""
import matplotlib.pyplot as plt
xvec = np.linspace(0.0001, 1., 20)
# Energy for plotting inclusive cross-sections
eprobe = 1e7
plt.figure(figsize=(8.5, 4))
plt.subplot(121)
plt.semilogy(
xvec,
xvec * self.dsig_dx(xvec, eprobe),
lw=1.5,
label=r'$c$-quark')
plt.semilogy(
xvec,
xvec * self.D_dist(xvec, eprobe, 421),
lw=1.5,
label=r'$D^0$')
plt.semilogy(
xvec,
xvec * self.D_dist(xvec, eprobe, 411),
lw=1.5,
label=r'$D^+$')
plt.semilogy(
xvec,
xvec * self.D_dist(xvec, eprobe, 431),
lw=1.5,
label=r'$Ds^+$')
plt.semilogy(
xvec,
xvec * self.LambdaC_dist(xvec, 1e4),
lw=1.5,
label=r'$\Lambda_C^+$')
plt.legend()
plt.xlabel(r'$x_F$')
plt.ylabel(r'inclusive $\sigma$ [mb]')
plt.subplot(122)
evec = np.logspace(4, 11, 100)
plt.loglog(
np.sqrt(evec),
self.sigma_cc(evec),
lw=1.5,
label=r'$\sigma_{c\bar{c}}$')
plt.legend()
plt.xlabel(r'$\sqrt{s}$ [GeV]')
plt.ylabel(r'$\sigma_{c\bar{c}}$ [mb]')
plt.tight_layout()
class WHR_charm(MRS_charm):
"""Logan Wille, Francis Halzen, Hall Reno.
The approach is the same as in A. D. Martin, M. G. Ryskin,
and A. M. Stasto, Acta Physica Polonica B 34, 3273 (2003).
The parameterization of the inclusive :math:`c\\bar{c}`
cross-section is replaced by interpolated tables from the
calculation. Fragmentation functions and certain scales are
needed to obtain meson and baryon fluxes as a function
of the kinematic variable :math:`x_F`. At high energies
and :math:`x_F > 0.05`, where this model is valid,
:math:`x_F \\approx x=E_c/E_{proj}`.
Here, these fragmentation functions are used:
- :math:`D`-mesons :math:`\\frac{4}{3} x`
- :math:`\\Lambda`-baryons :math:`\\frac{1}{1.47} x`
The production ratios between the different types of
:math:`D`-mesons are stored in the attribute :attr:`cs_scales`
and :attr:`D0_scale`, where :attr:`D0_scale` is the
:math:`c\\bar{c}` to :math:`D^0` ratio and :attr:`cs_scales`
stores the production ratios of :math:`D^\\pm/D^0`,
:math:`D_s/D^0` and :math:`\\Lambda_c/D^0`.
Since the model employs only perturbartive production of
charm, the charge conjugates are symmetric, i.e.
:math:`\\sigma_{D^+} = \\sigma_{D^-}` etc.
Args:
e_grid (np.array): energy grid as it is defined in
:class:`MCEqRun`.
csm (np.array): inelastic cross-sections as used in
:class:`MCEqRun`.
"""
def __init__(self, e_grid, csm):
import pickle
self.sig_table = pickle.load(open('references/logan_charm.ppl', 'rb'))
self.e_idcs = {}
for i, e in enumerate(e_grid):
self.e_idcs[e] = i
MRS_charm.__init__(self, e_grid, csm)
def dsig_dx(self, x, E):
"""Returns the Feynman-:math:`x_F` distribution
of :math:`\\sigma_{c\\bar{c}}` in mb
Args:
x (float or np.array): :math:`x_F`
E (float): center-of-mass energy in GeV
Returns:
float: :math:`\\sigma_{c\\bar{c}}` in mb
"""
res = self.sig_table[self.e_idcs[E]](x) * 1e-3 #mub -> mb
res[res < 0] = 0.
return res
| 11,491 | 32.602339 | 81 | py |
MCEq | MCEq-master/MCEq/version.py | __version__ = '1.2.6'
| 22 | 10.5 | 21 | py |
MCEq | MCEq-master/MCEq/__init__.py | 0 | 0 | 0 | py |
|
MCEq | MCEq-master/MCEq/solvers.py |
import numpy as np
import mceq_config as config
from MCEq.misc import info
def solv_numpy(nsteps, dX, rho_inv, int_m, dec_m, phi, grid_idcs):
""":mod:`numpy` implementation of forward-euler integration.
Args:
nsteps (int): number of integration steps
dX (numpy.array[nsteps]): vector of step-sizes :math:`\\Delta X_i` in g/cm**2
rho_inv (numpy.array[nsteps]): vector of density values :math:`\\frac{1}{\\rho(X_i)}`
int_m (numpy.array): interaction matrix :eq:`int_matrix` in dense or sparse representation
dec_m (numpy.array): decay matrix :eq:`dec_matrix` in dense or sparse representation
phi (numpy.array): initial state vector :math:`\\Phi(X_0)`
Returns:
numpy.array: state vector :math:`\\Phi(X_{nsteps})` after integration
"""
grid_sol = []
grid_step = 0
imc = int_m
dmc = dec_m
dxc = dX
ric = rho_inv
phc = phi
dXaccum = 0.
from time import time
start = time()
for step in range(nsteps):
phc += (imc.dot(phc) + dmc.dot(ric[step] * phc)) * dxc[step]
dXaccum += dxc[step]
if (grid_idcs and grid_step < len(grid_idcs)
and grid_idcs[grid_step] == step):
grid_sol.append(np.copy(phc))
grid_step += 1
info(
2, "Performance: {0:6.2f}ms/iteration".format(1e3 * (time() - start) /
float(nsteps)))
return phc, np.array(grid_sol)
class CUDASparseContext(object):
"""This class handles the transfer between CPU and GPU memory, and the calling
of GPU kernels. Initialized by :class:`MCEq.core.MCEqRun` and used by
:func:`solv_CUDA_sparse`.
"""
def __init__(self, int_m, dec_m, device_id=0):
if config.cuda_fp_precision == 32:
self.fl_pr = np.float32
elif config.cuda_fp_precision == 64:
self.fl_pr = np.float64
else:
raise Exception(
"CUDASparseContext(): Unknown precision specified.")
# Setup GPU stuff and upload data to it
try:
import cupy as cp
import cupyx.scipy as cpx
self.cp = cp
self.cpx = cpx
self.cubl = cp.cuda.cublas
except ImportError:
raise Exception("solv_CUDA_sparse(): Numbapro CUDA libaries not " +
"installed.\nCan not use GPU.")
cp.cuda.Device(config.cuda_gpu_id).use()
self.cubl_handle = self.cubl.create()
self.set_matrices(int_m, dec_m)
def set_matrices(self, int_m, dec_m):
"""Upload sparce matrices to GPU memory"""
self.cu_int_m = self.cpx.sparse.csr_matrix(int_m, dtype=self.fl_pr)
self.cu_dec_m = self.cpx.sparse.csr_matrix(dec_m, dtype=self.fl_pr)
self.cu_delta_phi = self.cp.zeros(self.cu_int_m.shape[0],
dtype=self.fl_pr)
def alloc_grid_sol(self, dim, nsols):
"""Allocates memory for intermediate if grid solution requested."""
self.curr_sol_idx = 0
self.grid_sol = self.cp.zeros((nsols, dim))
def dump_sol(self):
"""Saves current solution to a new index in grid solution memory."""
self.cp.copyto(self.grid_sol[self.curr_sol_idx, :], self.cu_curr_phi)
self.curr_sol_idx += 1
# self.grid_sol[self.curr_sol, :] = self.cu_curr_phi
def get_gridsol(self):
"""Downloads grid solution to main memory."""
return self.cp.asnumpy(self.grid_sol)
def set_phi(self, phi):
"""Uploads initial condition to GPU memory."""
self.cu_curr_phi = self.cp.asarray(phi, dtype=self.fl_pr)
def get_phi(self):
"""Downloads current solution from GPU memory."""
return self.cp.asnumpy(self.cu_curr_phi)
def solve_step(self, rho_inv, dX):
"""Makes one solver step on GPU using cuSparse (BLAS)"""
self.cp.cusparse.csrmv(a=self.cu_int_m,
x=self.cu_curr_phi,
y=self.cu_delta_phi,
alpha=1.,
beta=0.)
self.cp.cusparse.csrmv(a=self.cu_dec_m,
x=self.cu_curr_phi,
y=self.cu_delta_phi,
alpha=rho_inv,
beta=1.)
self.cubl.saxpy(self.cubl_handle, self.cu_delta_phi.shape[0], dX,
self.cu_delta_phi.data.ptr, 1,
self.cu_curr_phi.data.ptr, 1)
def solv_CUDA_sparse(nsteps, dX, rho_inv, context, phi, grid_idcs):
"""`NVIDIA CUDA cuSPARSE <https://developer.nvidia.com/cusparse>`_ implementation
of forward-euler integration.
Function requires a working :mod:`accelerate` installation.
Args:
nsteps (int): number of integration steps
dX (numpy.array[nsteps]): vector of step-sizes :math:`\\Delta X_i` in g/cm**2
rho_inv (numpy.array[nsteps]): vector of density values :math:`\\frac{1}{\\rho(X_i)}`
int_m (numpy.array): interaction matrix :eq:`int_matrix` in dense or sparse representation
dec_m (numpy.array): decay matrix :eq:`dec_matrix` in dense or sparse representation
phi (numpy.array): initial state vector :math:`\\Phi(X_0)`
mu_loss_handler (object): object of type :class:`SemiLagrangianEnergyLosses`
Returns:
numpy.array: state vector :math:`\\Phi(X_{nsteps})` after integration
"""
c = context
c.set_phi(phi)
grid_step = 0
from time import time
start = time()
if len(grid_idcs) > 0:
c.alloc_grid_sol(phi.shape[0], len(grid_idcs))
for step in range(nsteps):
c.solve_step(rho_inv[step], dX[step])
if (grid_idcs and grid_step < len(grid_idcs)
and grid_idcs[grid_step] == step):
c.dump_sol()
grid_step += 1
info(
2, "Performance: {0:6.2f}ms/iteration".format(1e3 * (time() - start) /
float(nsteps)))
return c.get_phi(), c.get_gridsol() if len(grid_idcs) > 0 else []
def solv_MKL_sparse(nsteps, dX, rho_inv, int_m, dec_m, phi, grid_idcs):
# mu_loss_handler):
"""`Intel MKL sparse BLAS
<https://software.intel.com/en-us/articles/intel-mkl-sparse-blas-overview?language=en>`_
implementation of forward-euler integration.
Function requires that the path to the MKL runtime library ``libmkl_rt.[so/dylib]``
defined in the config file.
Args:
nsteps (int): number of integration steps
dX (numpy.array[nsteps]): vector of step-sizes :math:`\\Delta X_i` in g/cm**2
rho_inv (numpy.array[nsteps]): vector of density values :math:`\\frac{1}{\\rho(X_i)}`
int_m (numpy.array): interaction matrix :eq:`int_matrix` in dense or sparse representation
dec_m (numpy.array): decay matrix :eq:`dec_matrix` in dense or sparse representation
phi (numpy.array): initial state vector :math:`\\Phi(X_0)`
grid_idcs (list): indices at which longitudinal solutions have to be saved.
Returns:
numpy.array: state vector :math:`\\Phi(X_{nsteps})` after integration
"""
from ctypes import c_int, c_char, POINTER, byref
from mceq_config import mkl
gemv = None
axpy = None
np_fl = None
from ctypes import c_double as fl_pr
# sparse CSR-matrix x dense vector
gemv = mkl.mkl_dcsrmv
# dense vector + dense vector
axpy = mkl.cblas_daxpy
np_fl = np.float64
# Prepare CTYPES pointers for MKL sparse CSR BLAS
int_m_data = int_m.data.ctypes.data_as(POINTER(fl_pr))
int_m_ci = int_m.indices.ctypes.data_as(POINTER(c_int))
int_m_pb = int_m.indptr[:-1].ctypes.data_as(POINTER(c_int))
int_m_pe = int_m.indptr[1:].ctypes.data_as(POINTER(c_int))
dec_m_data = dec_m.data.ctypes.data_as(POINTER(fl_pr))
dec_m_ci = dec_m.indices.ctypes.data_as(POINTER(c_int))
dec_m_pb = dec_m.indptr[:-1].ctypes.data_as(POINTER(c_int))
dec_m_pe = dec_m.indptr[1:].ctypes.data_as(POINTER(c_int))
npphi = np.copy(phi).astype(np_fl)
phi = npphi.ctypes.data_as(POINTER(fl_pr))
npdelta_phi = np.zeros_like(npphi)
delta_phi = npdelta_phi.ctypes.data_as(POINTER(fl_pr))
trans = c_char(b'n')
npmatd = np.chararray(6)
npmatd[0] = b'G'
npmatd[3] = b'C'
matdsc = npmatd.ctypes.data_as(POINTER(c_char))
m = c_int(int_m.shape[0])
cdzero = fl_pr(0.)
cdone = fl_pr(1.)
cione = c_int(1)
grid_step = 0
grid_sol = []
from time import time
start = time()
for step in range(nsteps):
# delta_phi = int_m.dot(phi)
gemv(byref(trans), byref(m), byref(m), byref(cdone),
matdsc, int_m_data, int_m_ci, int_m_pb, int_m_pe, phi,
byref(cdzero), delta_phi)
# delta_phi = rho_inv * dec_m.dot(phi) + delta_phi
gemv(byref(trans), byref(m), byref(m), byref(fl_pr(rho_inv[step])),
matdsc, dec_m_data, dec_m_ci, dec_m_pb, dec_m_pe, phi,
byref(cdone), delta_phi)
# phi = delta_phi * dX + phi
axpy(m, fl_pr(dX[step]), delta_phi, cione, phi, cione)
if (grid_idcs and grid_step < len(grid_idcs)
and grid_idcs[grid_step] == step):
grid_sol.append(np.copy(npphi))
grid_step += 1
info(
2, "Performance: {0:6.2f}ms/iteration".format(1e3 * (time() - start) /
float(nsteps)))
return npphi, np.asarray(grid_sol)
# # TODO: Debug this and transition to BDF
# def _odepack(dXstep=.1,
# initial_depth=0.0,
# int_grid=None,
# grid_var='X',
# *args,
# **kwargs):
# """Solves the transport equations with solvers from ODEPACK.
# Args:
# dXstep (float): external step size (adaptive sovlers make more steps internally)
# initial_depth (float): starting depth in g/cm**2
# int_grid (list): list of depths at which results are recorded
# grid_var (str): Can be depth `X` or something else (currently only `X` supported)
# """
# from scipy.integrate import ode
# ri = self.density_model.r_X2rho
# if config.enable_muon_energy_loss:
# raise NotImplementedError(
# 'Energy loss not imlemented for this solver.')
# # Functional to solve
# def dPhi_dX(X, phi, *args):
# return self.int_m.dot(phi) + self.dec_m.dot(ri(X) * phi)
# # Jacobian doesn't work with sparse matrices, and any precision
# # or speed advantage disappear if used with dense algebra
# def jac(X, phi, *args):
# # print 'jac', X, phi
# return (self.int_m + self.dec_m * ri(X)).todense()
# # Initial condition
# phi0 = np.copy(self.phi0)
# # Initialize variables
# grid_sol = []
# # Setup solver
# r = ode(dPhi_dX).set_integrator(
# with_jacobian=False, **config.ode_params)
# if int_grid is not None:
# initial_depth = int_grid[0]
# int_grid = int_grid[1:]
# max_X = int_grid[-1]
# grid_sol.append(phi0)
# else:
# max_X = self.density_model.max_X
# info(
# 1,
# 'your X-grid is shorter then the material',
# condition=max_X < self.density_model.max_X)
# info(
# 1,
# 'your X-grid exceeds the dimentsions of the material',
# condition=max_X > self.density_model.max_X)
# # Initial value
# r.set_initial_value(phi0, initial_depth)
# info(
# 2, 'initial depth: {0:3.2e}, maximal depth {1:}'.format(
# initial_depth, max_X))
# start = time()
# if int_grid is None:
# i = 0
# while r.successful() and (r.t + dXstep) < max_X - 1:
# info(5, "Solving at depth X =", r.t, condition=(i % 5000) == 0)
# r.integrate(r.t + dXstep)
# i += 1
# if r.t < max_X:
# r.integrate(max_X)
# # Do last step to make sure the rational number max_X is reached
# r.integrate(max_X)
# else:
# for i, Xi in enumerate(int_grid):
# info(5, 'integrating at X =', Xi, condition=i % 10 == 0)
# while r.successful() and (r.t + dXstep) < Xi:
# r.integrate(r.t + dXstep)
# # Make sure the integrator arrives at requested step
# r.integrate(Xi)
# # Store the solution on grid
# grid_sol.append(r.y)
# info(2,
# 'time elapsed during integration: {1} sec'.format(time() - start))
# self.solution = r.y
# self.grid_sol = grid_sol
| 12,678 | 34.317549 | 96 | py |
MCEq | MCEq-master/MCEq/tests/test_densities.py | import numpy as np
def test_corsika_atm():
from MCEq.geometry.density_profiles import CorsikaAtmosphere
# Depth at surface and density at X=100 g/cm2
cka_surf_100 = [
(1036.099233683902, 0.00015623258808300557),
(1033.8094962133184, 0.00015782685585891685),
(1055.861981113731, 0.00016209949387937668),
(986.9593811082788, 0.00015529574727367941),
(988.4293864278521, 0.0001589317236294479),
(1032.7184058861765, 0.00016954131888323744),
(1039.3697214845179, 0.00016202068935405075),
(1018.1547240905948, 0.0001609490344992944),
(1011.4568036341923, 0.00014626903051217024),
(1019.974568696789, 0.0001464549375212421),
(1019.9764946890782, 0.0001685608228906579)
]
for iatm, (loc, season) in enumerate([
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", 'Summer'),
("ANTARES/KM3NeT-ORCA", 'Winter'),
("KM3NeT-ARCA", 'Summer'),
("KM3NeT-ARCA", 'Winter'),
("KM3NeT", None),
('SouthPole', 'December'),
('PL_SouthPole', 'January'),
('PL_SouthPole', 'August'),
]):
cka_obj = CorsikaAtmosphere(loc, season)
assert np.allclose([cka_obj.max_X, 1. /
cka_obj.r_X2rho(100.)], cka_surf_100[iatm])
def test_msis_atm():
from MCEq.geometry.density_profiles import MSIS00Atmosphere
msis_surf_100 = [
(1022.6914983678925, 0.00014380042112573175), (1041.2180457811605, 0.00016046129606232836),
(1044.6608866969684, 0.00016063221634835724), (1046.427667371285, 0.00016041531186210874),
(1048.6505423154006, 0.00016107650347480857), (1050.6431802896034, 0.00016342084740033518),
(1050.2145039327452, 0.00016375664772178006), (1033.3640270683418, 0.00015614485659072835),
(1045.785578319159, 0.00015970449150213374), (1019.9475650272982, 0.000153212909250962),
(1020.3640351872195, 0.00015221038616604717), (1047.964376368261, 0.00016218804771381842)
]
for iatm, (loc, season) in enumerate([
('SouthPole', "January"),
('Karlsruhe', "January"),
('Geneva', "January"),
('Tokyo', "January"),
('SanGrasso', "January"),
('TelAviv', "January"),
('KSC', "January"),
('SoudanMine', "January"),
('Tsukuba', "January"),
('LynnLake', "January"),
('PeaceRiver', "January"),
('FtSumner', "January")
]):
msis_obj = MSIS00Atmosphere(loc, season)
assert np.allclose([msis_obj.max_X, 1. /
msis_obj.r_X2rho(100.)], msis_surf_100[iatm])
| 2,689 | 37.985507 | 99 | py |
MCEq | MCEq-master/MCEq/tests/test_msis.py | from __future__ import print_function
result_expected = \
"""6.665177E+05 1.138806E+08 1.998211E+07 4.022764E+05 3.557465E+03 4.074714E-15 3.475312E+04 4.095913E+06 2.667273E+04 1.250540E+03 1.241416E+03
3.407293E+06 1.586333E+08 1.391117E+07 3.262560E+05 1.559618E+03 5.001846E-15 4.854208E+04 4.380967E+06 6.956682E+03 1.166754E+03 1.161710E+03
1.123767E+05 6.934130E+04 4.247105E+01 1.322750E-01 2.618848E-05 2.756772E-18 2.016750E+04 5.741256E+03 2.374394E+04 1.239892E+03 1.239891E+03
5.411554E+07 1.918893E+11 6.115826E+12 1.225201E+12 6.023212E+10 3.584426E-10 1.059880E+07 2.615737E+05 2.819879E-42 1.027318E+03 2.068878E+02
1.851122E+06 1.476555E+08 1.579356E+07 2.633795E+05 1.588781E+03 4.809630E-15 5.816167E+04 5.478984E+06 1.264446E+03 1.212396E+03 1.208135E+03
8.673095E+05 1.278862E+08 1.822577E+07 2.922214E+05 2.402962E+03 4.355866E-15 3.686389E+04 3.897276E+06 2.667273E+04 1.220146E+03 1.212712E+03
5.776251E+05 6.979139E+07 1.236814E+07 2.492868E+05 1.405739E+03 2.470651E-15 5.291986E+04 1.069814E+06 2.667273E+04 1.116385E+03 1.112999E+03
3.740304E+05 4.782720E+07 5.240380E+06 1.759875E+05 5.501649E+02 1.571889E-15 8.896776E+04 1.979741E+06 9.121815E+03 1.031247E+03 1.024848E+03
6.748339E+05 1.245315E+08 2.369010E+07 4.911583E+05 4.578781E+03 4.564420E-15 3.244595E+04 5.370833E+06 2.667273E+04 1.306052E+03 1.293374E+03
5.528601E+05 1.198041E+08 3.495798E+07 9.339618E+05 1.096255E+04 4.974543E-15 2.686428E+04 4.889974E+06 2.805445E+04 1.361868E+03 1.347389E+03
1.375488E+14 0.000000E+00 2.049687E+19 5.498695E+18 2.451733E+17 1.261066E-03 0.000000E+00 0.000000E+00 0.000000E+00 1.027318E+03 2.814648E+02
4.427443E+13 0.000000E+00 6.597567E+18 1.769929E+18 7.891680E+16 4.059139E-04 0.000000E+00 0.000000E+00 0.000000E+00 1.027318E+03 2.274180E+02
2.127829E+12 0.000000E+00 3.170791E+17 8.506280E+16 3.792741E+15 1.950822E-05 0.000000E+00 0.000000E+00 0.000000E+00 1.027318E+03 2.374389E+02
1.412184E+11 0.000000E+00 2.104370E+16 5.645392E+15 2.517142E+14 1.294709E-06 0.000000E+00 0.000000E+00 0.000000E+00 1.027318E+03 2.795551E+02
1.254884E+10 0.000000E+00 1.874533E+15 4.923051E+14 2.239685E+13 1.147668E-07 0.000000E+00 0.000000E+00 0.000000E+00 1.027318E+03 2.190732E+02
5.196477E+05 1.274494E+08 4.850450E+07 1.720838E+06 2.354487E+04 5.881940E-15 2.500078E+04 6.279210E+06 2.667273E+04 1.426412E+03 1.408608E+03
4.260860E+07 1.241342E+11 4.929562E+12 1.048407E+12 4.993465E+10 2.914304E-10 8.831229E+06 2.252516E+05 2.415246E-42 1.027318E+03 1.934071E+02
DAY 172 81 172 172 172
UT 29000 29000 75000 29000 29000
ALT 400 400 1000 100 400
LAT 60 60 60 60 0
LONG -70 -70 -70 -70 -70
LST 16 16 16 16 16
F107A 150 150 150 150 150
F107 150 150 150 150 150
TINF 1250.54 1166.75 1239.89 1027.32 1212.40
TG 1241.42 1161.71 1239.89 206.89 1208.14
HE 6.665e+05 3.407e+06 1.124e+05 5.412e+07 1.851e+06
O 1.139e+08 1.586e+08 6.934e+04 1.919e+11 1.477e+08
N2 1.998e+07 1.391e+07 4.247e+01 6.116e+12 1.579e+07
O2 4.023e+05 3.263e+05 1.323e-01 1.225e+12 2.634e+05
AR 3.557e+03 1.560e+03 2.619e-05 6.023e+10 1.589e+03
H 3.475e+04 4.854e+04 2.017e+04 1.060e+07 5.816e+04
N 4.096e+06 4.381e+06 5.741e+03 2.616e+05 5.479e+06
ANM 0 2.667e+04 6.957e+03 2.374e+04 2.820e-42 1.264e+03
RHO 4.075e-15 5.002e-15 2.757e-18 3.584e-10 4.810e-15
DAY 172 172 172 172 172
UT 29000 29000 29000 29000 29000
ALT 400 400 400 400 400
LAT 60 60 60 60 60
LONG 0 -70 -70 -70 -70
LST 16 4 16 16 16
F107A 150 150 70 150 150
F107 150 150 150 180 150
TINF 1220.15 1116.39 1031.25 1306.05 1361.87
TG 1212.71 1113.00 1024.85 1293.37 1347.39
HE 8.673e+05 5.776e+05 3.740e+05 6.748e+05 5.529e+05
O 1.279e+08 6.979e+07 4.783e+07 1.245e+08 1.198e+08
N2 1.823e+07 1.237e+07 5.240e+06 2.369e+07 3.496e+07
O2 2.922e+05 2.493e+05 1.760e+05 4.912e+05 9.340e+05
AR 2.403e+03 1.406e+03 5.502e+02 4.579e+03 1.096e+04
H 3.686e+04 5.292e+04 8.897e+04 3.245e+04 2.686e+04
N 3.897e+06 1.070e+06 1.980e+06 5.371e+06 4.890e+06
ANM 0 2.667e+04 2.667e+04 9.122e+03 2.667e+04 2.805e+04
RHO 4.356e-15 2.471e-15 1.572e-15 4.564e-15 4.975e-15
DAY 172 172 172 172 172
UT 29000 29000 29000 29000 29000
ALT 0 10 30 50 70
LAT 60 60 60 60 60
LONG -70 -70 -70 -70 -70
LST 16 16 16 16 16
F107A 150 150 150 150 150
F107 150 150 150 150 150
TINF 1027.32 1027.32 1027.32 1027.32 1027.32
TG 281.46 227.42 237.44 279.56 219.07
HE 1.375e+14 4.427e+13 2.128e+12 1.412e+11 1.255e+10
O 0.000e+00 0.000e+00 0.000e+00 0.000e+00 0.000e+00
N2 2.050e+19 6.598e+18 3.171e+17 2.104e+16 1.875e+15
O2 5.499e+18 1.770e+18 8.506e+16 5.645e+15 4.923e+14
AR 2.452e+17 7.892e+16 3.793e+15 2.517e+14 2.240e+13
H 0.000e+00 0.000e+00 0.000e+00 0.000e+00 0.000e+00
N 0.000e+00 0.000e+00 0.000e+00 0.000e+00 0.000e+00
ANM 0 0.000e+00 0.000e+00 0.000e+00 0.000e+00 0.000e+00
RHO 1.261e-03 4.059e-04 1.951e-05 1.295e-06 1.148e-07
"""
def test_msis():
from ctypes import (c_int, c_double, pointer, byref)
from MCEq.geometry.nrlmsise00.nrlmsise00 import (
msis, nrlmsise_output, nrlmsise_input, nrlmsise_flags, ap_array)
output = [nrlmsise_output() for i in range(17)]
inp = [nrlmsise_input() for i in range(17)]
flags = nrlmsise_flags()
aph = ap_array()
# Inp values
for i in range(7):
aph.a[i] = c_double(100.)
flags.switches[0] = c_int(0)
for i in range(1, 24):
flags.switches[i] = c_int(1)
for i in range(17):
inp[i].doy = c_int(172) # Day of year
inp[i].year = c_int(0) # No effect
inp[i].sec = c_double(29000.)
inp[i].alt = c_double(400.)
inp[i].g_lat = c_double(60.)
inp[i].g_long = c_double(-70.)
inp[i].lst = c_double(16.)
inp[i].f107A = c_double(150.)
inp[i].f107 = c_double(150.)
inp[i].ap = c_double(4.)
inp[1].doy = c_int(81)
inp[2].sec = c_double(75000.)
inp[2].alt = c_double(1000.)
inp[3].alt = c_double(100.)
inp[10].alt = c_double(0.)
inp[11].alt = c_double(10.)
inp[12].alt = c_double(30.)
inp[13].alt = c_double(50.)
inp[14].alt = c_double(70.)
inp[16].alt = c_double(100.)
inp[4].g_lat = c_double(0.)
inp[5].g_long = c_double(0.)
inp[6].lst = c_double(4.)
inp[7].f107A = c_double(70.)
inp[8].f107 = c_double(180.)
inp[9].ap = c_double(40.)
inp[15].ap_a = pointer(aph)
inp[16].ap_a = pointer(aph)
for i in range(15):
# msis.gtd7(byref(inp[i]), byref(flags), byref(output[i]))
msis.gtd7_py(inp[i].year, inp[i].doy, inp[i].sec, inp[i].alt, inp[i].g_lat,
inp[i].g_long, inp[i].lst, inp[i].f107A, inp[i].f107,
inp[i].ap, inp[15].ap_a, byref(flags), byref(output[i]))
flags.switches[9] = -1
for i in range(15, 17):
msis.gtd7_py(inp[i].year, inp[i].doy, inp[i].sec, inp[i].alt, inp[i].g_lat,
inp[i].g_long, inp[i].lst, inp[i].f107A, inp[i].f107,
inp[i].ap, inp[15].ap_a, byref(flags), byref(output[i]))
# msis.gtd7(byref(inp[i]), byref(flags), byref(output[i]))
# output type 1
outbuf = ""
for i in range(17):
for j in range(9):
outbuf += '{0:E} '.format(output[i].d[j])
outbuf += '{0:E} '.format(output[i].t[0])
outbuf += '{0:E} \n'.format(output[i].t[1])
# output type 2
for i in range(3):
outbuf += "\n"
outbuf += "\nDAY "
for j in range(5):
outbuf += " {0:3}".format(inp[i * 5 + j].doy.value)
outbuf += "\nUT "
for j in range(5):
outbuf += " {0:5.0f}".format(inp[i * 5 + j].sec.value)
outbuf += "\nALT "
for j in range(5):
outbuf += " {0:4.0f}".format(inp[i * 5 + j].alt.value)
outbuf += "\nLAT "
for j in range(5):
outbuf += " {0:3.0f}".format(inp[i * 5 + j].g_lat.value)
outbuf += "\nLONG "
for j in range(5):
outbuf += " {0:3.0f}".format(inp[i * 5 + j].g_long.value)
outbuf += "\nLST "
for j in range(5):
outbuf += " {0:5.0f}".format(inp[i * 5 + j].lst.value)
outbuf += "\nF107A "
for j in range(5):
outbuf += " {0:3.0f}".format(inp[i * 5 + j].f107A.value)
outbuf += "\nF107 "
for j in range(5):
outbuf += " {0:3.0f}".format(inp[i * 5 + j].f107.value)
outbuf += "\n\n"
outbuf += "\nTINF "
for j in range(5):
outbuf += " {0:7.2f}".format(output[i * 5 + j].t[0])
outbuf += "\nTG "
for j in range(5):
outbuf += " {0:7.2f}".format(output[i * 5 + j].t[1])
outbuf += "\nHE "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[0])
outbuf += "\nO "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[1])
outbuf += "\nN2 "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[2])
outbuf += "\nO2 "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[3])
outbuf += "\nAR "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[4])
outbuf += "\nH "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[6])
outbuf += "\nN "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[7])
outbuf += "\nANM 0 "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[8])
outbuf += "\nRHO "
for j in range(5):
outbuf += " {0:1.3e}".format(output[i * 5 + j].d[5])
outbuf += "\n"
outbuf += "\n"
# print(outbuf)
assert outbuf.strip() == result_expected.strip() | 11,398 | 49.438053 | 146 | py |
MCEq | MCEq-master/MCEq/tests/test_mceq.py | from __future__ import print_function
import mceq_config as config
from MCEq.core import MCEqRun
import crflux.models as pm
import numpy as np
import pytest
import sys
if sys.platform.startswith("win") and sys.maxsize <= 2**32:
pytest.skip("Skip model test on 32-bit Windows.", allow_module_level=True)
def format_8_digits(a_list):
return ["%.8e" % member for member in a_list]
config.debug_level = 1
config.kernel_config = 'numpy'
config.cuda_gpu_id = 0
if config.has_mkl:
config.set_mkl_threads(2)
mceq = MCEqRun(
interaction_model='SIBYLL23C',
theta_deg=0.,
primary_model=(pm.HillasGaisser2012, 'H3a'))
def test_config_and_file_download():
import mceq_config as config
import os
# Import of config triggers data download
assert config.mceq_db_fname in os.listdir(config.data_dir)
def test_some_angles():
nmu = []
for theta in [0., 30., 60., 90]:
mceq.set_theta_deg(theta)
mceq.solve()
nmu.append(
np.sum(
mceq.get_solution('mu+', mag=0, integrate=True) +
mceq.get_solution('mu-', mag=0, integrate=True)))
assert format_8_digits(nmu), ['5.62504370e-03', '4.20479234e-03', '1.36630552e-03', '8.20255259e-06']
def test_switch_interaction_models():
mlist = [
'DPMJETIII191',
'DPMJETIII306',
'QGSJET01C',
'QGSJETII03',
'QGSJETII04',
'SIBYLL21',
'SIBYLL23',
'SIBYLL23C03',
'SIBYLL23C',
'SIBYLL23CPP']
count_part = []
for m in mlist:
mceq.set_interaction_model(m)
count_part.append(len(mceq._particle_list))
assert(count_part == [64, 64, 58, 44, 44, 48, 62, 62, 62, 62])
def test_single_primary():
energies = [1e3, 1e6, 1e9, 5e10]
nmu, nnumu, nnue = [], [], []
mceq.set_interaction_model('SIBYLL23C')
mceq.set_theta_deg(0.)
for e in energies:
mceq.set_single_primary_particle(E=e, pdg_id=2212)
mceq.solve()
nmu.append(
np.sum(
mceq.get_solution('mu+', mag=0, integrate=True) +
mceq.get_solution('mu-', mag=0, integrate=True)))
nnumu.append(
np.sum(
mceq.get_solution('numu', mag=0, integrate=True) +
mceq.get_solution('antinumu', mag=0, integrate=True)))
nnue.append(
np.sum(
mceq.get_solution('nue', mag=0, integrate=True) +
mceq.get_solution('antinue', mag=0, integrate=True)))
assert format_8_digits(nmu) == ['2.03134720e+01', '1.20365838e+04', '7.09254150e+06', '2.63982133e+08']
assert format_8_digits(nnumu) == ['6.80367347e+01', '2.53158948e+04', '1.20884925e+07', '4.14935240e+08']
assert format_8_digits(nnue) == ['2.36908717e+01', '6.91213253e+03', '2.87396649e+06', '9.27683105e+07']
| 2,845 | 29.276596 | 109 | py |
MCEq | MCEq-master/MCEq/geometry/density_profiles.py | from abc import ABCMeta, abstractmethod
from six import with_metaclass
from os.path import join
import numpy as np
from MCEq.misc import theta_rad
from MCEq.misc import info
import mceq_config as config
class EarthsAtmosphere(with_metaclass(ABCMeta)):
"""
Abstract class containing common methods on atmosphere.
You have to inherit from this class and implement the virtual method
:func:`get_density`.
Note:
Do not instantiate this class directly.
Attributes:
thrad (float): current zenith angle :math:`\\theta` in radiants
theta_deg (float): current zenith angle :math:`\\theta` in degrees
max_X (float): Slant depth at the surface according to the geometry
defined in the :mod:`MCEq.geometry`
geometry (object): Can be a custom instance of EarthGeometry
"""
def __init__(self, *args, **kwargs):
from MCEq.geometry.geometry import EarthGeometry
self.geom = kwargs.pop("geometry", EarthGeometry())
self.thrad = None
self.theta_deg = None
self._max_den = config.max_density
self.max_theta = 90.0
self.location = None
self.season = None
@abstractmethod
def get_density(self, h_cm):
"""Abstract method which implementation should return the density in g/cm**3.
Args:
h_cm (float): height in cm
Returns:
float: density in g/cm**3
Raises:
NotImplementedError:
"""
raise NotImplementedError("Base class called.")
def calculate_density_spline(self, n_steps=2000):
"""Calculates and stores a spline of :math:`\\rho(X)`.
Args:
n_steps (int, optional): number of :math:`X` values
to use for interpolation
Raises:
Exception: if :func:`set_theta` was not called before.
"""
from scipy.integrate import cumtrapz
from time import time
from scipy.interpolate import UnivariateSpline
if self.theta_deg is None:
raise Exception("zenith angle not set")
else:
info(
5,
"Calculating spline of rho(X) for zenith {0:4.1f} degrees.".format(
self.theta_deg
),
)
thrad = self.thrad
path_length = self.geom.l(thrad)
vec_rho_l = np.vectorize(
lambda delta_l: self.get_density(self.geom.h(delta_l, thrad))
)
dl_vec = np.linspace(0, path_length, n_steps)
now = time()
# Calculate integral for each depth point
X_int = cumtrapz(vec_rho_l(dl_vec), dl_vec) #
dl_vec = dl_vec[1:]
info(5, ".. took {0:1.2f}s".format(time() - now))
# Save depth value at h_obs
self._max_X = X_int[-1]
self._max_den = self.get_density(self.geom.h(0, thrad))
# Interpolate with bi-splines without smoothing
h_intp = [self.geom.h(dl, thrad) for dl in reversed(dl_vec[1:])]
X_intp = [X for X in reversed(X_int[1:])]
self._s_h2X = UnivariateSpline(h_intp, np.log(X_intp), k=2, s=0.0)
self._s_X2rho = UnivariateSpline(X_int, vec_rho_l(dl_vec), k=2, s=0.0)
self._s_lX2h = UnivariateSpline(np.log(X_intp)[::-1], h_intp[::-1], k=2, s=0.0)
@property
def max_X(self):
"""Depth at altitude 0."""
if not hasattr(self, "_max_X"):
self.set_theta(0)
return self._max_X
@property
def max_den(self):
"""Density at altitude 0."""
if not hasattr(self, "_max_den"):
self.set_theta(0)
return self._max_den
@property
def s_h2X(self):
"""Spline for conversion from altitude to depth."""
if not hasattr(self, "_s_h2X"):
self.set_theta(0)
return self._s_h2X
@property
def s_X2rho(self):
"""Spline for conversion from depth to density."""
if not hasattr(self, "_s_X2rho"):
self.set_theta(0)
return self._s_X2rho
@property
def s_lX2h(self):
"""Spline for conversion from depth to altitude."""
if not hasattr(self, "_s_lX2h"):
self.set_theta(0)
return self._s_lX2h
def set_theta(self, theta_deg):
"""Configures geometry and initiates spline calculation for
:math:`\\rho(X)`.
If the option 'use_atm_cache' is enabled in the config, the
function will check, if a corresponding spline is available
in the cache and use it. Otherwise it will call
:func:`calculate_density_spline`, make the function
:func:`r_X2rho` available to the core code and store the spline
in the cache.
Args:
theta_deg (float): zenith angle :math:`\\theta` at detector
"""
if theta_deg < 0.0 or theta_deg > self.max_theta:
raise Exception("Zenith angle not in allowed range.")
self.thrad = theta_rad(theta_deg)
self.theta_deg = theta_deg
self.calculate_density_spline()
def r_X2rho(self, X):
"""Returns the inverse density :math:`\\frac{1}{\\rho}(X)`.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`1/\\rho` in cm**3/g
"""
return 1.0 / self.s_X2rho(X)
def h2X(self, h):
"""Returns the depth along path as function of height above
surface.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
h (float): vertical height above surface in cm
Returns:
float: X slant depth in g/cm**2
"""
return np.exp(self.s_h2X(h))
def X2h(self, X):
"""Returns the height above surface as a function of slant depth
for currently selected zenith angle.
The spline `s_lX2h` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float h: height above surface in cm
"""
return self.s_lX2h(np.log(X))
def X2rho(self, X):
"""Returns the density :math:`\\rho(X)`.
The spline `s_X2rho` is used, which was calculated or retrieved
from cache during the :func:`set_theta` call.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`\\rho` in cm**3/g
"""
return self.s_X2rho(X)
def moliere_air(self, h_cm):
"""Returns the Moliere unit of air for US standard atmosphere."""
return 9.3 / (self.get_density(h_cm) * 100.0)
def nref_rel_air(self, h_cm):
"""Returns the refractive index - 1 in air (density parametrization
as in CORSIKA).
"""
return 0.000283 * self.get_density(h_cm) / self.get_density(0)
def gamma_cherenkov_air(self, h_cm):
"""Returns the Lorentz factor gamma of Cherenkov threshold in air (MeV)."""
nrel = self.nref_rel_air(h_cm)
return (1.0 + nrel) / np.sqrt(2.0 * nrel + nrel**2)
def theta_cherenkov_air(self, h_cm):
"""Returns the Cherenkov angle in air (degrees)."""
return np.arccos(1.0 / (1.0 + self.nref_rel_air(h_cm))) * 180.0 / np.pi
class CorsikaAtmosphere(EarthsAtmosphere):
"""Class, holding the parameters of a Linsley type parameterization
similar to the Air-Shower Monte Carlo
`CORSIKA <https://web.ikp.kit.edu/corsika/>`_.
The parameters pre-defined parameters are taken from the CORSIKA
manual. If new sets of parameters are added to :func:`init_parameters`,
the array _thickl can be calculated using :func:`calc_thickl` .
Attributes:
_atm_param (numpy.array): (5x5) Stores 5 atmospheric parameters
_aatm, _batm, _catm, _thickl, _hlay
for each of the 5 layers
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
_atm_param = None
def __init__(self, location, season=None):
cka_atmospheres = [
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", "Summer"),
("ANTARES/KM3NeT-ORCA", "Winter"),
("KM3NeT-ARCA", "Summer"),
("KM3NeT-ARCA", "Winter"),
("KM3NeT", None),
("SouthPole", "December"),
("PL_SouthPole", "January"),
("PL_SouthPole", "August"),
]
assert (
location,
season,
) in cka_atmospheres, "{0}/{1} not available for CorsikaAtmsophere".format(
location, season
)
self.init_parameters(location, season)
import MCEq.geometry.corsikaatm.corsikaatm as corsika_acc
self.corsika_acc = corsika_acc
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, season):
"""Initializes :attr:`_atm_param`. Parameters from ANTARES/KM3NET
are based on the work of T. Heid
(`see this issue <https://github.com/afedynitch/MCEq/issues/12>`_)
+---------------------+-------------------+------------------------------+
| location | CORSIKA Table | Description/season |
+=====================+===================+==============================+
| "USStd" | 23 | US Standard atmosphere |
+---------------------+-------------------+------------------------------+
| "BK_USStd" | 37 | Bianca Keilhauer's USStd |
+---------------------+-------------------+------------------------------+
| "Karlsruhe" | 24 | AT115 / Karlsruhe |
+---------------------+-------------------+------------------------------+
| "SouthPole" | 26 and 28 | MSIS-90-E for Dec and June |
+---------------------+-------------------+------------------------------+
|"PL_SouthPole" | 29 and 30 | P. Lipari's Jan and Aug |
+---------------------+-------------------+------------------------------+
|"ANTARES/KM3NeT-ORCA"| NA | PhD T. Heid |
+---------------------+-------------------+------------------------------+
| "KM3NeT-ARCA" | NA | PhD T. Heid |
+---------------------+-------------------+------------------------------+
Args:
location (str): see table
season (str, optional): choice of season for supported locations
Raises:
Exception: if parameter set not available
"""
_aatm, _batm, _catm, _thickl, _hlay = None, None, None, None, None
if location == "USStd":
_aatm = np.array([-186.5562, -94.919, 0.61289, 0.0, 0.01128292])
_batm = np.array([1222.6562, 1144.9069, 1305.5948, 540.1778, 1.0])
_catm = np.array([994186.38, 878153.55, 636143.04, 772170.0, 1.0e9])
_thickl = np.array(
[1036.102549, 631.100309, 271.700230, 3.039494, 0.001280]
)
_hlay = np.array([0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif location == "BK_USStd":
_aatm = np.array(
[-149.801663, -57.932486, 0.63631894, 4.3545369e-4, 0.01128292]
)
_batm = np.array([1183.6071, 1143.0425, 1322.9748, 655.69307, 1.0])
_catm = np.array([954248.34, 800005.34, 629568.93, 737521.77, 1.0e9])
_thickl = np.array(
[1033.804941, 418.557770, 216.981635, 4.344861, 0.001280]
)
_hlay = np.array([0.0, 7.0e5, 1.14e6, 3.7e6, 1.0e7])
elif location == "Karlsruhe":
_aatm = np.array([-118.1277, -154.258, 0.4191499, 5.4094056e-4, 0.01128292])
_batm = np.array([1173.9861, 1205.7625, 1386.7807, 555.8935, 1.0])
_catm = np.array([919546.0, 963267.92, 614315.0, 739059.6, 1.0e9])
_thickl = np.array(
[1055.858707, 641.755364, 272.720974, 2.480633, 0.001280]
)
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif location == "KM3NeT": # averaged over detector and season
_aatm = np.array(
[
-141.31449999999998,
-8.256029999999999,
0.6132505,
-0.025998975,
0.4024275,
]
)
_batm = np.array(
[
1153.0349999999999,
1263.3325,
1257.0724999999998,
404.85974999999996,
1.0,
]
)
_catm = np.array([967990.75, 668591.75, 636790.0, 814070.75, 21426175.0])
_thickl = np.array(
[
1011.8521512499999,
275.84507575000003,
51.0230705,
2.983134,
0.21927724999999998,
]
)
_hlay = np.array([0.0, 993750.0, 2081250.0, 4150000.0, 6877500.0])
elif location == "ANTARES/KM3NeT-ORCA":
if season == "Summer":
_aatm = np.array([-158.85, -5.38682, 0.889893, -0.0286665, 0.50035])
_batm = np.array([1145.62, 1176.79, 1248.92, 415.543, 1.0])
_catm = np.array([998469.0, 677398.0, 636790.0, 823489.0, 16090500.0])
_thickl = np.array(
[986.951713, 306.4668, 40.546793, 4.288721, 0.277182]
)
_hlay = np.array([0, 9.0e5, 22.0e5, 38.0e5, 68.2e5])
elif season == "Winter":
_aatm = np.array([-132.16, -2.4787, 0.298031, -0.0220264, 0.348021])
_batm = np.array([1120.45, 1203.97, 1163.28, 360.027, 1.0])
_catm = np.array([933697.0, 643957.0, 636790.0, 804486.0, 23109000.0])
_thickl = np.array(
[988.431172, 273.033464, 37.185105, 1.162987, 0.192998]
)
_hlay = np.array([0, 9.5e5, 22.0e5, 47.0e5, 68.2e5])
elif location == "KM3NeT-ARCA":
if season == "Summer":
_aatm = np.array([-157.857, -28.7524, 0.790275, -0.0286999, 0.481114])
_batm = np.array([1190.44, 1171.0, 1344.78, 445.357, 1.0])
_catm = np.array([1006100.0, 758614.0, 636790.0, 817384.0, 16886800.0])
_thickl = np.array(
[1032.679434, 328.978681, 80.601135, 4.420745, 0.264112]
)
_hlay = np.array([0, 9.0e5, 18.0e5, 38.0e5, 68.2e5])
elif season == "Winter":
_aatm = np.array([-116.391, 3.5938, 0.474803, -0.0246031, 0.280225])
_batm = np.array([1155.63, 1501.57, 1271.31, 398.512, 1.0])
_catm = np.array([933697.0, 594398.0, 636790.0, 810924.0, 29618400.0])
_thickl = np.array(
[1039.346286, 194.901358, 45.759249, 2.060083, 0.142817]
)
_hlay = np.array([0, 12.25e5, 21.25e5, 43.0e5, 70.5e5])
elif location == "SouthPole":
if season == "December":
_aatm = np.array([-128.601, -39.5548, 1.13088, -0.00264960, 0.00192534])
_batm = np.array([1139.99, 1073.82, 1052.96, 492.503, 1.0])
_catm = np.array([861913.0, 744955.0, 675928.0, 829627.0, 5.8587010e9])
_thickl = np.array(
[1011.398804, 588.128367, 240.955360, 3.964546, 0.000218]
)
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
elif season == "June":
_aatm = np.array(
[-163.331, -65.3713, 0.402903, -0.000479198, 0.00188667]
)
_batm = np.array([1183.70, 1108.06, 1424.02, 207.595, 1.0])
_catm = np.array([875221.0, 753213.0, 545846.0, 793043.0, 5.9787908e9])
_thickl = np.array(
[1020.370363, 586.143464, 228.374393, 1.338258, 0.000214]
)
_hlay = np.array([0.0, 4.0e5, 1.0e6, 4.0e6, 1.0e7])
else:
raise Exception(
'CorsikaAtmosphere(): Season "'
+ season
+ '" not parameterized for location SouthPole.'
)
elif location == "PL_SouthPole":
if season == "January":
_aatm = np.array([-113.139, -7930635, -54.3888, -0.0, 0.00421033])
_batm = np.array([1133.10, 1101.20, 1085.00, 1098.00, 1.0])
_catm = np.array([861730.0, 826340.0, 790950.0, 682800.0, 2.6798156e9])
_thickl = np.array(
[1019.966898, 718.071682, 498.659703, 340.222344, 0.000478]
)
_hlay = np.array([0.0, 2.67e5, 5.33e5, 8.0e5, 1.0e7])
elif season == "August":
_aatm = np.array([-59.0293, -21.5794, -7.14839, 0.0, 0.000190175])
_batm = np.array([1079.0, 1071.90, 1182.0, 1647.1, 1.0])
_catm = np.array([764170.0, 699910.0, 635650.0, 551010.0, 59.329575e9])
_thickl = np.array(
[1019.946057, 391.739652, 138.023515, 43.687992, 0.000022]
)
_hlay = np.array([0.0, 6.67e5, 13.33e5, 2.0e6, 1.0e7])
else:
raise Exception(
'CorsikaAtmosphere(): Season "'
+ season
+ '" not parameterized for location SouthPole.'
)
else:
raise Exception(
"CorsikaAtmosphere:init_parameters(): Location "
+ str(location)
+ " not parameterized."
)
self._atm_param = np.array([_aatm, _batm, _catm, _thickl, _hlay])
self.location, self.season = location, season
# Clear cached theta value to force spline recalculation
self.theta_deg = None
def depth2height(self, x_v):
"""Converts column/vertical depth to height.
Args:
x_v (float): column depth :math:`X_v` in g/cm**2
Returns:
float: height in cm
"""
_aatm, _batm, _catm, _thickl, _hlay = self._atm_param
if x_v >= _thickl[1]:
height = _catm[0] * np.log(_batm[0] / (x_v - _aatm[0]))
elif x_v >= _thickl[2]:
height = _catm[1] * np.log(_batm[1] / (x_v - _aatm[1]))
elif x_v >= _thickl[3]:
height = _catm[2] * np.log(_batm[2] / (x_v - _aatm[2]))
elif x_v >= _thickl[4]:
height = _catm[3] * np.log(_batm[3] / (x_v - _aatm[3]))
else:
height = (_aatm[4] - x_v) * _catm[4]
return height
def get_density(self, h_cm):
"""Returns the density of air in g/cm**3.
Uses the optimized module function :func:`corsika_get_density_jit`.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self.corsika_acc.corsika_get_density(h_cm, *self._atm_param)
# return corsika_get_density_jit(h_cm, self._atm_param)
def get_mass_overburden(self, h_cm):
"""Returns the mass overburden in atmosphere in g/cm**2.
Uses the optimized module function :func:`corsika_get_m_overburden_jit`
Args:
h_cm (float): height in cm
Returns:
float: column depth :math:`T(h_{cm})` in g/cm**2
"""
return self.corsika_acc.corsika_get_m_overburden(h_cm, *self._atm_param)
# return corsika_get_m_overburden_jit(h_cm, self._atm_param)
def rho_inv(self, X, cos_theta):
"""Returns reciprocal density in cm**3/g using planar approximation.
This function uses the optimized function :func:`planar_rho_inv_jit`
Args:
h_cm (float): height in cm
Returns:
float: :math:`\\frac{1}{\\rho}(X,\\cos{\\theta})` cm**3/g
"""
return self.corsika_acc.planar_rho_inv(X, cos_theta, *self._atm_param)
# return planar_rho_inv_jit(X, cos_theta, self._atm_param)
def calc_thickl(self):
"""Calculates thickness layers for :func:`depth2height`
The analytical inversion of the CORSIKA parameterization
relies on the knowledge about the depth :math:`X`, where
trasitions between layers/exponentials occur.
Example:
Create a new set of parameters in :func:`init_parameters`
inserting arbitrary values in the _thikl array::
$ cor_atm = CorsikaAtmosphere(new_location, new_season)
$ cor_atm.calc_thickl()
Replace _thickl values with printout.
"""
from scipy.integrate import quad
thickl = []
for h in self._atm_param[4]:
thickl.append(
"{0:4.6f}".format(quad(self.get_density, h, 112.8e5, epsrel=1e-4)[0])
)
info(5, "_thickl = np.array([" + ", ".join(thickl) + "])")
return thickl
class IsothermalAtmosphere(EarthsAtmosphere):
"""Isothermal model of the atmosphere.
This model is widely used in semi-analytical calculations. The isothermal
approximation is valid in a certain range of altitudes and usually
one adjust the parameters to match a more realistic density profile
at altitudes between 10 - 30 km, where the high energy muon production
rate peaks. Such parametrizations are given in the book "Cosmic Rays and
Particle Physics", Gaisser, Engel and Resconi (2016). The default values
are from M. Thunman, G. Ingelman, and P. Gondolo, Astropart. Physics 5,
309 (1996).
Args:
location (str): no effect
season (str): no effect
hiso_km (float): isothermal scale height in km
X0 (float): Ground level overburden
"""
def __init__(self, location, season, hiso_km=6.3, X0=1300.0):
self.hiso_cm = hiso_km * 1e5
self.X0 = X0
self.location = location
self.season = season
EarthsAtmosphere.__init__(self)
def get_density(self, h_cm):
"""Returns the density of air in g/cm**3.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self.X0 / self.hiso_cm * np.exp(-h_cm / self.hiso_cm)
def get_mass_overburden(self, h_cm):
"""Returns the mass overburden in atmosphere in g/cm**2.
Args:
h_cm (float): height in cm
Returns:
float: column depth :math:`T(h_{cm})` in g/cm**2
"""
return self.X0 * np.exp(-h_cm / self.hiso_cm)
class MSIS00Atmosphere(EarthsAtmosphere):
"""Wrapper class for a python interface to the NRLMSISE-00 model.
`NRLMSISE-00 <http://ccmc.gsfc.nasa.gov/modelweb/atmos/nrlmsise00.html>`_
is an empirical model of the Earth's atmosphere. It is available as
a FORTRAN 77 code or as a verson traslated into
`C by Dominik Borodowski <http://www.brodo.de/english/pub/nrlmsise/>`_.
Here a PYTHON wrapper has been used.
Attributes:
_msis : NRLMSISE-00 python wrapper object handler
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season=None, doy=None, use_loc_altitudes=False):
from MCEq.geometry.nrlmsise00_mceq import cNRLMSISE00
msis_atmospheres = [
"SouthPole",
"Karlsruhe",
"Geneva",
"Tokyo",
"SanGrasso",
"TelAviv",
"KSC",
"SoudanMine",
"Tsukuba",
"LynnLake",
"PeaceRiver",
"FtSumner",
]
assert (
location in msis_atmospheres
), "{0} not available for MSIS00Atmosphere".format(location)
self._msis = cNRLMSISE00()
self.init_parameters(location, season, doy, use_loc_altitudes)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, season, doy, use_loc_altitudes):
"""Sets location and season in :class:`NRLMSISE-00`.
Translates location and season into day of year
and geo coordinates.
Args:
location (str): Supported are "SouthPole" and "Karlsruhe"
season (str): months of the year: January, February, etc.
use_loc_altitudes (bool): If to use default altitudes from location
"""
self._msis.set_location(location)
if season is not None:
self._msis.set_season(season)
else:
self._msis.set_doy(doy)
self.location, self.season = location, season
# Clear cached value to force spline recalculation
self.theta_deg = None
if use_loc_altitudes:
info(0, "Using loc altitude", self._msis.alt_surface, "cm")
self.geom.h_obs = self._msis.alt_surface
def _clear_cache(self):
"""Clears the density model cache so that density profiles can be recalculated
It is a private method to wrap the logic of cache cleaning
"""
self.theta_deg = None
def update_parameters(self, **kwargs):
"""Updates parameters of the density model
Args:
location_coord (tuple of str): (longitude, latitude)
season (str): months of the year: January, February, etc.
doy (int): day of the year. 'doy' takes precedence over 'season' if both are set
"""
self._clear_cache()
if not kwargs:
return
if "location_coord" in kwargs:
self.set_location_coord(*kwargs.get("location_coord"))
if "season" in kwargs:
self.set_season(kwargs.get("season"))
if "doy" in kwargs:
self.set_doy(kwargs.get("doy"))
if "season" in kwargs:
info(2, "Both 'season' and 'doy' are set in parameter list.\n'doy' takes precedence over 'season'")
def get_density(self, h_cm):
"""Returns the density of air in g/cm**3.
Wraps around ctypes calls to the NRLMSISE-00 C library.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
return self._msis.get_density(h_cm)
def set_location(self, location):
"""Changes MSIS location by strings defined in _msis_wrapper.
Args:
location (str): location as defined in :class:`NRLMSISE-00.`
"""
self._msis.set_location(location)
self._clear_cache()
def set_location_coord(self, longitude, latitude):
"""Changes MSIS location by longitude, latitude in _msis_wrapper
Args:
longitude (float): longitude of the location with abs(longitude) <= 180
latitude (float): latitude of the location with abs(latitude) <= 90
"""
self._msis.set_location_coord(longitude, latitude)
self._clear_cache()
def set_season(self, month):
"""Changes MSIS location by month strings defined in _msis_wrapper.
Args:
location (str): month as defined in :class:`NRLMSISE-00.`
"""
self._msis.set_season(month)
self._clear_cache()
def set_doy(self, day_of_year):
"""Changes MSIS season by day of year.
Args:
day_of_year (int): 1. Jan.=0, 1.Feb=32
"""
self._msis.set_doy(day_of_year)
self._clear_cache()
def get_temperature(self, h_cm):
"""Returns the temperature of air in K.
Wraps around ctypes calls to the NRLMSISE-00 C library.
Args:
h_cm (float): height in cm
Returns:
float: density :math:`T(h_{cm})` in K
"""
return self._msis.get_temperature(h_cm)
class AIRSAtmosphere(EarthsAtmosphere):
"""Interpolation class for tabulated atmospheres.
This class is intended to read preprocessed AIRS Satellite data.
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season, extrapolate=True, *args, **kwargs):
if location != "SouthPole":
raise Exception(
self.__class__.__name__
+ "(): Only South Pole location supported. "
+ location
)
self.extrapolate = extrapolate
self.month2doy = {
"January": 1,
"February": 32,
"March": 60,
"April": 91,
"May": 121,
"June": 152,
"July": 182,
"August": 213,
"September": 244,
"October": 274,
"November": 305,
"December": 335,
}
self.season = season
self.init_parameters(location, **kwargs)
EarthsAtmosphere.__init__(self)
def init_parameters(self, location, **kwargs):
"""Loads tables and prepares interpolation.
Args:
location (str): supported is only "SouthPole"
doy (int): Day Of Year
"""
# from time import strptime
from matplotlib.dates import datestr2num, num2date
from os import path
def bytespdate2num(b):
return datestr2num(b.decode("utf-8"))
data_path = join(
path.expanduser("~"), "OneDrive/Dokumente/projects/atmospheric_variations/"
)
if "table_path" in kwargs:
data_path = kwargs["table_path"]
files = [
("dens", "airs_amsu_dens_180_daily.txt"),
("temp", "airs_amsu_temp_180_daily.txt"),
("alti", "airs_amsu_alti_180_daily.txt"),
]
data_collection = {}
# limit SouthPole pressure to <= 600
min_press_idx = 4
IC79_idx_1 = None
IC79_idx_2 = None
for d_key, fname in files:
fname = data_path + "tables/" + fname
# tabf = open(fname).read()
tab = np.loadtxt(
fname, converters={0: bytespdate2num}, usecols=[0] + list(range(2, 27))
)
# with open(fname, 'r') as f:
# comline = f.readline()
# p_levels = [
# float(s.strip()) for s in comline.split(' ')[3:] if s != ''
# ][min_press_idx:]
dates = num2date(tab[:, 0])
for di, date in enumerate(dates):
if date.month == 6 and date.day == 1:
if date.year == 2010:
IC79_idx_1 = di
elif date.year == 2011:
IC79_idx_2 = di
surf_val = tab[:, 1]
cols = tab[:, min_press_idx + 2 :]
data_collection[d_key] = (dates, surf_val, cols)
self.interp_tab_d = {}
self.interp_tab_t = {}
self.dates = {}
dates = data_collection["alti"][0]
msis = MSIS00Atmosphere(location, "January")
for didx, date in enumerate(dates):
h_vec = np.array(data_collection["alti"][2][didx, :] * 1e2)
d_vec = np.array(data_collection["dens"][2][didx, :])
t_vec = np.array(data_collection["temp"][2][didx, :])
if self.extrapolate:
# Extrapolate using msis
h_extra = np.linspace(h_vec[-1], self.geom.h_atm * 1e2, 250)
msis._msis.set_doy(self._get_y_doy(date)[1] - 1)
msis_extra_d = np.array([msis.get_density(h) for h in h_extra])
msis_extra_t = np.array([msis.get_temperature(h) for h in h_extra])
# Interpolate last few altitude bins
ninterp = 5
for ni in range(ninterp):
cl = 1 - np.exp(-ninterp + ni + 1)
ch = 1 - np.exp(-ni)
norm = 1.0 / (cl + ch)
d_vec[-ni - 1] = (
d_vec[-ni - 1] * cl * norm
+ msis.get_density(h_vec[-ni - 1]) * ch * norm
)
t_vec[-ni - 1] = (
t_vec[-ni - 1] * cl * norm
+ msis.get_temperature(h_vec[-ni - 1]) * ch * norm
)
# Merge the two datasets
h_vec = np.hstack([h_vec[:-1], h_extra])
d_vec = np.hstack([d_vec[:-1], msis_extra_d])
t_vec = np.hstack([t_vec[:-1], msis_extra_t])
self.interp_tab_d[self._get_y_doy(date)] = (h_vec, d_vec)
self.interp_tab_t[self._get_y_doy(date)] = (h_vec, t_vec)
self.dates[self._get_y_doy(date)] = date
self.IC79_start = self._get_y_doy(dates[IC79_idx_1])
self.IC79_end = self._get_y_doy(dates[IC79_idx_2])
self.IC79_days = (dates[IC79_idx_2] - dates[IC79_idx_1]).days
self.location = location
if self.season is None:
self.set_IC79_day(0)
else:
self.set_season(self.season)
# Clear cached value to force spline recalculation
self.theta_deg = None
def set_date(self, year, doy):
self.h, self.dens = self.interp_tab_d[(year, doy)]
_, self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
# Compatibility with caching
self.season = self.date
def _set_doy(self, doy, year=2010):
self.h, self.dens = self.interp_tab_d[(year, doy)]
_, self.temp = self.interp_tab_t[(year, doy)]
self.date = self.dates[(year, doy)]
def set_season(self, month):
self.season = month
self._set_doy(self.month2doy[month])
self.season = month
def set_IC79_day(self, IC79_day):
import datetime
if IC79_day > self.IC79_days:
raise Exception(
self.__class__.__name__ + "::set_IC79_day(): IC79_day above range."
)
target_day = self._get_y_doy(
self.dates[self.IC79_start] + datetime.timedelta(days=IC79_day)
)
info(2, "setting IC79_day", IC79_day)
self.h, self.dens = self.interp_tab_d[target_day]
_, self.temp = self.interp_tab_t[target_day]
self.date = self.dates[target_day]
# Compatibility with caching
self.season = self.date
def _get_y_doy(self, date):
return date.timetuple().tm_year, date.timetuple().tm_yday
def get_density(self, h_cm):
"""Returns the density of air in g/cm**3.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: density :math:`\\rho(h_{cm})` in g/cm**3
"""
ret = np.exp(np.interp(h_cm, self.h, np.log(self.dens)))
try:
ret[h_cm > self.h[-1]] = np.nan
except TypeError:
if h_cm > self.h[-1]:
return np.nan
return ret
def get_temperature(self, h_cm):
"""Returns the temperature in K.
Interpolates table at requested value for previously set
year and day of year (doy).
Args:
h_cm (float): height in cm
Returns:
float: temperature :math:`T(h_{cm})` in K
"""
ret = np.exp(np.interp(h_cm, self.h, np.log(self.temp)))
try:
ret[h_cm > self.h[-1]] = np.nan
except TypeError:
if h_cm > self.h[-1]:
return np.nan
return ret
class MSIS00IceCubeCentered(MSIS00Atmosphere):
"""Extension of :class:`MSIS00Atmosphere` which couples the latitude
setting with the zenith angle of the detector.
Args:
location (str): see :func:`init_parameters`
season (str,optional): see :func:`init_parameters`
"""
def __init__(self, location, season):
if location != "SouthPole":
info(2, "location forced to the South Pole")
location = "SouthPole"
MSIS00Atmosphere.__init__(self, location, season)
# Allow for upgoing zenith angles
self.max_theta = 180.0
def latitude(self, det_zenith_deg):
"""Returns the geographic latitude of the shower impact point.
Assumes a spherical earth. The detector is 1948m under the
surface.
Credits: geometry fomulae by Jakob van Santen, DESY Zeuthen.
Args:
det_zenith_deg (float): zenith angle at detector in degrees
Returns:
float: latitude of the impact point in degrees
"""
r = self.geom.r_E
d = 1948 # m
theta_rad = det_zenith_deg / 180.0 * np.pi
x = np.sqrt(2.0 * r * d + ((r - d) * np.cos(theta_rad)) ** 2 - d**2) - (
r - d
) * np.cos(theta_rad)
return (
-90.0
+ np.arctan2(x * np.sin(theta_rad), r - d + x * np.cos(theta_rad))
/ np.pi
* 180.0
)
def set_theta(self, theta_deg):
self._msis.set_location_coord(longitude=0.0, latitude=self.latitude(theta_deg))
info(
1,
"latitude = {0:5.2f} for zenith angle = {1:5.2f}".format(
self.latitude(theta_deg), theta_deg
),
)
downgoing_theta_deg = theta_deg
if theta_deg > 90.0:
downgoing_theta_deg = 180.0 - theta_deg
info(
1,
"theta = {0:5.2f} below horizon. using theta = {1:5.2f}".format(
theta_deg, downgoing_theta_deg
),
)
MSIS00Atmosphere.set_theta(self, downgoing_theta_deg)
self.theta_deg = theta_deg
class GeneralizedTarget(object):
"""This class provides a way to run MCEq on piece-wise constant
one-dimenional density profiles.
The default values for the average density are taken from
config file variables `len_target`, `env_density` and `env_name`.
The density profile has to be built by calling subsequently
:func:`add_material`. The current composition of the target
can be checked with :func:`draw_materials` or :func:`print_table`.
Note:
If the target is not air or hydrogen, the result is approximate,
since seconray particle yields are provided for nucleon-air or
proton-proton collisions. Depending on this choice one has to
adjust the nuclear mass in :mod:`mceq_config`.
Args:
len_target (float): total length of the target in meters
env_density (float): density of the default material in g/cm**3
env_name (str): title for this environment
"""
def __init__(
self,
len_target=config.len_target * 1e2, # cm
env_density=config.env_density, # g/cm3
env_name=config.env_name,
):
self.len_target = len_target
self.env_density = env_density
self.env_name = env_name
self.reset()
@property
def max_den(self):
return self._max_den
def reset(self):
"""Resets material list to defaults."""
self.mat_list = [[0.0, self.len_target, self.env_density, self.env_name]]
self._update_variables()
def _update_variables(self):
"""Updates internal variables. Not needed to call by user."""
self.start_bounds, self.end_bounds, self.densities = list(zip(*self.mat_list))[
:-1
]
self.densities = np.array(self.densities)
self.start_bounds = np.array(self.start_bounds)
self.end_bounds = np.array(self.end_bounds)
self._max_den = np.max(self.densities)
self._integrate()
def set_length(self, new_length_cm):
"""Updates the total length of the target.
Usually the length is set
"""
if new_length_cm < self.mat_list[-1][0]:
raise Exception(
"GeneralizedTarget::set_length(): "
+ "can not set length below lower boundary of last "
+ "material."
)
self.len_target = new_length_cm
self.mat_list[-1][1] = new_length_cm
self._update_variables()
def add_material(self, start_position_cm, density, name):
"""Adds one additional material to a composite target.
Args:
start_position_cm (float): position where the material starts
counted from target origin l|X = 0 in cm
density (float): density of material in g/cm**3
name (str): any user defined name
Raises:
Exception: If requested start_position_cm is not properly defined.
"""
if start_position_cm < 0.0 or start_position_cm > self.len_target:
raise Exception(
"GeneralizedTarget::add_material(): "
+ "distance exceeds target dimensions."
)
elif (
start_position_cm == self.mat_list[-1][0]
and self.mat_list[-1][-1] == self.env_name
):
self.mat_list[-1] = [start_position_cm, self.len_target, density, name]
elif start_position_cm <= self.mat_list[-1][0]:
raise Exception(
"GeneralizedTarget::add_material(): "
+ "start_position_cm is ahead of previous material."
)
else:
self.mat_list[-1][1] = start_position_cm
self.mat_list.append([start_position_cm, self.len_target, density, name])
info(
2,
(
"{0}::add_material(): Material '{1}' added. "
+ "location on path {2} to {3} m"
).format(
self.__class__.__name__,
name,
self.mat_list[-1][0],
self.mat_list[-1][1],
),
)
self._update_variables()
def set_theta(self, *args):
"""This method is not defined for the generalized target. The purpose
is to catch usage errors.
Raises:
NotImplementedError: always
"""
raise NotImplementedError(
"GeneralizedTarget::set_theta(): Method"
+ "not defined for this target class."
)
def _integrate(self):
"""Walks through material list and computes the depth along the
position (path). Computes the spline for the position-depth relation
and determines the maximum depth for the material selection.
Method does not need to be called by the user, instead the class
calls it when necessary.
"""
from scipy.interpolate import UnivariateSpline
self.density_depth = None
self.knots = [0.0]
self.X_int = [0.0]
for start, end, density, _ in self.mat_list:
self.knots.append(end)
self.X_int.append(density * (end - start) + self.X_int[-1])
self._s_X2h = UnivariateSpline(self.X_int, self.knots, k=1, s=0.0)
self._s_h2X = UnivariateSpline(self.knots, self.X_int, k=1, s=0.0)
self._max_X = self.X_int[-1]
@property
def s_X2h(self):
"""Spline for depth at distance."""
if not hasattr(self, "_s_X2h"):
self._integrate()
return self._s_X2h
@property
def s_h2X(self):
"""Spline for distance at depth."""
if not hasattr(self, "_s_h2X"):
self._integrate()
return self._s_h2X
@property
def max_X(self):
"""Maximal depth of target."""
if not hasattr(self, "_max_X"):
self._integrate()
return self._max_X
def get_density_X(self, X):
"""Returns the density in g/cm**3 as a function of depth X.
Args:
X (float): depth in g/cm**2
Returns:
float: density in g/cm**3
Raises:
Exception: If requested depth exceeds target.
"""
X = np.atleast_1d(X)
# allow for some small constant extrapolation for odepack solvers
if X[-1] > self.max_X and X[-1] < self.max_X * 1.003:
X[-1] = self.max_X
if np.min(X) < 0.0 or np.max(X) > self.max_X:
# return self.get_density(self.s_X2h(self.max_X))
info(
0,
"Depth {0:4.3f} exceeds target dimensions {1:4.3f}".format(
np.max(X), self.max_X
),
)
raise Exception("Invalid input")
return self.get_density(self.s_X2h(X))
def r_X2rho(self, X):
"""Returns the inverse density :math:`\\frac{1}{\\rho}(X)`.
Args:
X (float): slant depth in g/cm**2
Returns:
float: :math:`1/\\rho` in cm**3/g
"""
return 1.0 / self.get_density_X(X)
def get_density(self, l_cm):
"""Returns the density in g/cm**3 as a function of position l in cm.
Args:
l (float): position in target in cm
Returns:
float: density in g/cm**3
Raises:
Exception: If requested position exceeds target length.
"""
l_cm = np.atleast_1d(l_cm)
res = np.zeros_like(l_cm)
if np.min(l_cm) < 0 or np.max(l_cm) > self.len_target:
raise Exception(
"GeneralizedTarget::get_density(): "
+ "requested position exceeds target legth."
)
for i, li in enumerate(l_cm):
bi = 0
while not (li >= self.start_bounds[bi] and li <= self.end_bounds[bi]):
bi += 1
res[i] = self.densities[bi]
return res
def draw_materials(self, axes=None, logx=False):
"""Makes a plot of depth and density profile as a function
of the target length. The list of materials is printed out, too.
Args:
axes (plt.axes, optional): handle for matplotlib axes
"""
import matplotlib.pyplot as plt
if not axes:
plt.figure(figsize=(5, 2.5))
axes = plt.gca()
ymax = np.max(self.X_int) * 1.01
for _, mat in enumerate(self.mat_list):
xstart = mat[0]
xend = mat[1]
alpha = 0.188 * mat[2] / max(self.densities) + 0.248
if alpha > 1:
alpha = 1.0
elif alpha < 0.0:
alpha = 0.0
axes.fill_between(
(xstart, xend),
(ymax, ymax),
(0.0, 0.0),
label=mat[2],
facecolor="grey",
alpha=alpha,
)
# axes.text(0.5e-2 * (xstart + xend), 0.5 * ymax, str(nm))
axes.plot([xl for xl in self.knots], self.X_int, lw=1.7, color="r")
if logx:
axes.set_xscale("log", nonposx="clip")
axes.set_ylim(0.0, ymax)
axes.set_xlabel("distance in target (cm)")
axes.set_ylabel(r"depth X (g/cm$^2)$")
self.print_table(min_dbg_lev=2)
def print_table(self, min_dbg_lev=0):
"""Prints table of materials to standard output."""
templ = "{0:^3} | {1:15} | {2:^9.3g} | {3:^9.3g} | {4:^8.5g}"
info(
min_dbg_lev,
"********************* List of materials ***********************",
no_caller=True,
)
head = "{0:3} | {1:15} | {2:9} | {3:9} | {4:9}".format(
"no", "name", "start [cm]", "end [cm]", "density [g/cm**3]"
)
info(min_dbg_lev, "-" * len(head), no_caller=True)
info(min_dbg_lev, head, no_caller=True)
info(min_dbg_lev, "-" * len(head), no_caller=True)
for nm, mat in enumerate(self.mat_list):
info(
min_dbg_lev,
templ.format(nm, mat[3], mat[0], mat[1], mat[2]),
no_caller=True,
)
if __name__ == "__main__":
import matplotlib.pyplot as plt
plt.figure(figsize=(5, 4))
plt.title("CORSIKA atmospheres")
cka_atmospheres = [
("USStd", None),
("BK_USStd", None),
("Karlsruhe", None),
("ANTARES/KM3NeT-ORCA", "Summer"),
("ANTARES/KM3NeT-ORCA", "Winter"),
("KM3NeT-ARCA", "Summer"),
("KM3NeT-ARCA", "Winter"),
("KM3NeT", None),
("SouthPole", "December"),
("PL_SouthPole", "January"),
("PL_SouthPole", "August"),
]
cka_surf_100 = []
for loc, season in cka_atmospheres:
cka_obj = CorsikaAtmosphere(loc, season)
cka_obj.set_theta(0.0)
x_vec = np.linspace(0, cka_obj.max_X, 5000)
plt.plot(
x_vec,
1 / cka_obj.r_X2rho(x_vec),
lw=1.5,
label="{0}/{1}".format(loc, season)
if season is not None
else "{0}".format(loc),
)
cka_surf_100.append((cka_obj.max_X, 1.0 / cka_obj.r_X2rho(100.0)))
print(cka_surf_100)
plt.ylabel(r"Density $\rho$ (g/cm$^3$)")
plt.xlabel(r"Depth (g/cm$^2$)")
plt.legend(loc="upper left")
plt.tight_layout()
plt.figure(figsize=(5, 4))
plt.title("NRLMSISE-00 atmospheres")
msis_atmospheres = [
("SouthPole", "January"),
("Karlsruhe", "January"),
("Geneva", "January"),
("Tokyo", "January"),
("SanGrasso", "January"),
("TelAviv", "January"),
("KSC", "January"),
("SoudanMine", "January"),
("Tsukuba", "January"),
("LynnLake", "January"),
("PeaceRiver", "January"),
("FtSumner", "January"),
]
msis_surf_100 = []
for loc, season in msis_atmospheres:
msis_obj = MSIS00Atmosphere(loc, season)
msis_obj.set_theta(0.0)
x_vec = np.linspace(0, msis_obj.max_X, 5000)
plt.plot(x_vec, 1 / msis_obj.r_X2rho(x_vec), lw=1.5, label="{0}".format(loc))
msis_surf_100.append((msis_obj.max_X, 1.0 / msis_obj.r_X2rho(100.0)))
print(msis_surf_100)
plt.ylabel(r"Density $\rho$ (g/cm$^3$)")
plt.xlabel(r"Depth (g/cm$^2$)")
plt.legend(loc="upper left")
plt.tight_layout()
plt.show()
| 50,633 | 33.562457 | 115 | py |
MCEq | MCEq-master/MCEq/geometry/nrlmsise00_mceq.py | from MCEq.misc import info
import six
import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis
class NRLMSISE00Base(object):
def __init__(self):
# Cache altitude value of last call
self.last_alt = None
self.inp = cmsis.nrlmsise_input()
self.output = cmsis.nrlmsise_output()
self.flags = cmsis.nrlmsise_flags()
self.month2doy = {
'January': 1,
'February': 32,
'March': 60,
'April': 91,
'May': 121,
'June': 152,
'July': 182,
'August': 213,
'September': 244,
'October': 274,
'November': 305,
'December': 335
}
# Longitude, latitude, height
self.locations = {
'SouthPole': (0., -90., 2834. * 100.),
'Karlsruhe': (8.4, 49., 110. * 100.),
'Geneva': (6.1, 46.2, 370. * 100.),
'Tokyo': (139., 35., 5. * 100.),
'SanGrasso': (13.5, 42.4, 5. * 100.),
'TelAviv': (34.8, 32.1, 5. * 100.),
'KSC': (-80.7, 32.1, 5. * 100.),
'SoudanMine': (-92.2, 47.8, 5. * 100.),
'Tsukuba': (140.1, 36.2, 30. * 100.),
'LynnLake': (-101.1, 56.9, 360. * 100.),
'PeaceRiver': (-117.2, 56.15, 36000. * 100.),
'FtSumner': (-104.2, 34.5, 31000. * 100.)
}
self.daytimes = {'day': 43200., 'night': 0.}
self.current_location = 'SouthPole'
self.init_default_values()
def surface_vert_depth(self, loc='SouthPole', month='June'):
self.set_location('SouthPole')
self.set_season('June')
def height2depth(self, altitude_cm):
from scipy.integrate import quad
return quad(self.get_density, altitude_cm, 112.8 * 1e5,
epsrel=0.001)[0]
def _retrieve_result(self, *args, **kwargs):
"""Calls NRLMSISE library's main function"""
raise Exception('Not implemented for the base class')
def get_temperature(self, altitude_cm):
"""Returns temperature in K"""
self._retrieve_result(altitude_cm)
return self.output.t[1]
def get_density(self, altitude_cm):
"""Returns density in g/cm^3"""
self._retrieve_result(altitude_cm)
return self.output.d[5]
class cNRLMSISE00(NRLMSISE00Base):
def init_default_values(self):
"""Sets default to June at South Pole"""
self.inp.doy = cmsis.c_int(self.month2doy['June']) # Day of year
self.inp.year = cmsis.c_int(0) # No effect
self.inp.sec = cmsis.c_double(self.daytimes['day']) # 12:00
self.inp.alt = cmsis.c_double(self.locations[self.current_location][2])
self.inp.g_lat = cmsis.c_double(
self.locations[self.current_location][1])
self.inp.g_long = cmsis.c_double(
self.locations[self.current_location][0])
self.inp.lst = cmsis.c_double(self.inp.sec.value / 3600. +
self.inp.g_long.value / 15.)
# Do not touch this except you know what you are doing
self.inp.f107A = cmsis.c_double(150.)
self.inp.f107 = cmsis.c_double(150.)
self.inp.ap = cmsis.c_double(4.)
self.inp.ap_a = cmsis.pointer(cmsis.ap_array())
self.alt_surface = self.locations[self.current_location][2]
self.flags.switches[0] = cmsis.c_int(0)
for i in range(1, 24):
self.flags.switches[i] = cmsis.c_int(1)
def set_location(self, tag):
if tag not in list(self.locations):
raise Exception(
"NRLMSISE00::set_location(): Unknown location tag '{0}'.".
format(tag))
self.inp.alt = cmsis.c_double(self.locations[tag][2])
self.set_location_coord(*self.locations[tag][:2])
self.current_location = tag
self.alt_surface = self.locations[self.current_location][2]
def set_location_coord(self, longitude, latitude):
info(5, 'long={0:5.2f}, lat={1:5.2f}'.format(longitude, latitude))
if abs(latitude) > 90 or abs(longitude) > 180:
raise Exception("NRLMSISE00::set_location_coord(): Invalid inp.")
self.inp.g_lat = cmsis.c_double(latitude)
self.inp.g_long = cmsis.c_double(longitude)
def set_season(self, tag):
if tag not in self.month2doy:
raise Exception("NRLMSISE00::set_location(): Unknown season tag.")
info(5, 'Season', tag, 'doy=', self.month2doy[tag])
self.inp.doy = self.month2doy[tag]
def set_doy(self, doy):
if doy < 0 or doy > 365:
raise Exception("NRLMSISE00::set_doy(): Day of year out of range.")
info(5, 'day of year', doy)
self.inp.doy = cmsis.c_int(doy)
def _retrieve_result(self, altitude_cm):
if self.last_alt == altitude_cm:
return
inp = self.inp
inp.alt = cmsis.c_double(altitude_cm / 1e5)
cmsis.msis.gtd7_py(inp.year, inp.doy, inp.sec, inp.alt, inp.g_lat,
inp.g_long, inp.lst, inp.f107A, inp.f107, inp.ap,
inp.ap_a, cmsis.byref(self.flags),
cmsis.byref(self.output))
self.last_alt = altitude_cm
def test():
import numpy as np
import matplotlib.pyplot as plt
msis = cNRLMSISE00()
den = np.vectorize(msis.get_density)
plt.figure(figsize=(16, 5))
plt.suptitle('NRLMSISE-00')
h_vec = np.linspace(0, 112.8 * 1e5, 500)
msis.set_season('January')
msis.set_location('SouthPole')
den_sp_jan = den(h_vec)
msis.set_season('January')
msis.set_location('Karlsruhe')
den_ka_jan = den(h_vec)
plt.subplot(131)
plt.semilogy(h_vec / 1e5, den_sp_jan, label='MSIS00: SP Jan.')
plt.semilogy(h_vec / 1e5, den_ka_jan, label='MSIS00: KA Jan.')
plt.legend()
plt.xlabel('vertical height in km')
plt.ylabel(r'density $\rho(h)$ in g/cm$^3$')
plt.subplot(132)
plt.plot(h_vec / 1e5, den_ka_jan / den_sp_jan, label='MSIS00: KA/SP')
plt.xlabel('vertical height in km')
plt.ylabel(r'density ratio')
plt.legend(loc='upper left')
plt.subplot(133)
msis.set_location('SouthPole')
for i in range(360 / 30):
msis.inp.doy = i * 30
plt.plot(h_vec / 1e5, den(h_vec) / den_sp_jan, label=str(i + 1))
plt.legend(ncol=2, loc=3)
plt.title('MSIS00: SouthPole')
plt.xlabel('vertical height in km')
plt.ylabel(r'$\rho$(Month) / $\rho$(January)')
plt.ylim(ymin=0.6)
plt.tight_layout()
plt.figure(figsize=(6, 5))
h2d = np.vectorize(msis.height2depth)
plt.semilogy(h_vec / 1e5, h2d(h_vec))
plt.ylabel(r'Slant depth X [g/cm$^2$]')
plt.xlabel(r'Atmospheric height $h$ [km]')
plt.subplots_adjust(left=0.15, bottom=0.11)
plt.show()
if __name__ == '__main__':
test()
| 6,859 | 34 | 79 | py |
MCEq | MCEq-master/MCEq/geometry/geometry.py |
import sys
import numpy as np
from MCEq.misc import theta_rad
import mceq_config as config
class EarthGeometry(object):
r"""A model of the Earth's geometry, approximating it
by a sphere. The figure below illustrates the meaning of the parameters.
.. figure:: graphics/geometry.*
:scale: 30 %
:alt: picture of geometry
Curved geometry as it is used in the code (not to scale!).
Example:
The plots below will be produced by executing the module::
$ python geometry.py
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from MCEq.geometry.geometry import *
from MCEq.misc import theta_rad
g = EarthGeometry()
theta_list = np.linspace(0, 90, 500)
h_vec = np.linspace(0, g.h_atm, 500)
th_list_rad = theta_rad(theta_list)
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list, g.l(th_list_rad) / 1e5,
lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'path length $l(\theta)$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list,
np.arccos(g.cos_th_star(th_list_rad)) / np.pi * 180.,
lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'$\theta^*$ at top of the atm.')
plt.ylim([0, 90])
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(h_vec / 1e5, g.delta_l(h_vec, theta_rad(85.)) / 1e5,
lw=2)
plt.ylabel(r'Path length $\Delta l(h)$ in km')
plt.xlabel(r'atm. height $h_{atm}$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
for theta in [30., 60., 70., 80., 85., 90.]:
theta_path = theta_rad(theta)
delta_l_vec = np.linspace(0, g.l(theta_path), 1000)
plt.plot(delta_l_vec / 1e5, g.h(delta_l_vec, theta_path) / 1e5,
label=r'${0}^o$'.format(theta), lw=2)
plt.legend()
plt.xlabel(r'path length $\Delta l$ [km]')
plt.ylabel(r'atm. height $h_{atm}(\Delta l, \theta)$ [km]')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
Attributes:
h_obs (float): observation level height [cm]
h_atm (float): top of the atmosphere [cm]
r_E (float): radius Earth [cm]
r_top (float): radius at top of the atmosphere [cm]
r_obs (float): radius at observation level [cm]
"""
def __init__(self):
self.h_obs = config.h_obs * 1e2 # cm
self.h_atm = config.h_atm * 1e2 # cm
self.r_E = config.r_E * 1e2 # cm
self.r_top = self.r_E + self.h_atm
self.r_obs = self.r_E + self.h_obs
def _A_1(self, theta):
r"""Segment length :math:`A1(\theta)` in cm.
"""
return self.r_obs * np.cos(theta)
def _A_2(self, theta):
r"""Segment length :math:`A2(\theta)` in cm.
"""
return self.r_obs * np.sin(theta)
def l(self, theta):
r"""Returns path length in [cm] for given zenith
angle :math:`\theta` [rad].
"""
return (np.sqrt(self.r_top**2 - self._A_2(theta)**2) -
self._A_1(theta))
def cos_th_star(self, theta):
r"""Returns the zenith angle at atmospheric boarder
:math:`\cos(\theta^*)` in [rad] as a function of zenith at detector.
"""
return (self._A_1(theta) + self.l(theta)) / self.r_top
def h(self, dl, theta):
r"""Height above surface at distance :math:`dl` counted from the beginning
of path :math:`l(\theta)` in cm.
"""
return np.sqrt(
self._A_2(theta)**2 +
(self._A_1(theta) + self.l(theta) - dl)**2) - self.r_E
def delta_l(self, h, theta):
r"""Distance :math:`dl` covered along path :math:`l(\theta)`
as a function of current height. Inverse to :func:`h`.
"""
return (self._A_1(theta) + self.l(theta) -
np.sqrt((h + self.r_E)**2 - self._A_2(theta)**2))
def chirkin_cos_theta_star(costheta):
r""":math:`\cos(\theta^*)` parameterization.
This function returns the equivalent zenith angle for
for very inclined showers. It is based on a CORSIKA study by
`D. Chirkin, hep-ph/0407078v1, 2004
<http://arxiv.org/abs/hep-ph/0407078v1>`_.
Args:
costheta (float): :math:`\cos(\theta)` in [rad]
Returns:
float: :math:`\cos(\theta*)` in [rad]
"""
p1 = 0.102573
p2 = -0.068287
p3 = 0.958633
p4 = 0.0407253
p5 = 0.817285
x = costheta
return np.sqrt(
(x**2 + p1**2 + p2 * x**p3 + p4 * x**p5) / (1 + p1**2 + p2 + p4))
if __name__ == "__main__":
import matplotlib.pyplot as plt
earth = EarthGeometry()
theta_list = np.linspace(0, 90, 500)
h_vec = np.linspace(0, earth.h_atm, 500)
th_list_rad = theta_rad(theta_list)
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list, earth.l(th_list_rad) / 1e5, lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'path length $l(\theta)$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(theta_list,
np.arccos(earth.cos_th_star(th_list_rad)) / np.pi * 180.,
lw=2)
plt.xlabel(r'zenith $\theta$ at detector')
plt.ylabel(r'$\theta^*$ at top of the atm.')
plt.ylim([0, 90])
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
plt.plot(h_vec / 1e5, earth.delta_l(h_vec, theta_rad(85.)) / 1e5, lw=2)
plt.ylabel(r'Path length $\Delta l(h)$ in km')
plt.xlabel(r'atm. height $h_{atm}$ in km')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
fig = plt.figure(figsize=(5, 4))
fig.set_tight_layout(dict(rect=[0.00, 0.00, 1, 1]))
for theta in [30., 60., 70., 80., 85., 90.]:
theta_path = theta_rad(theta)
delta_l_vec = np.linspace(0, earth.l(theta_path), 1000)
plt.plot(delta_l_vec / 1e5,
earth.h(delta_l_vec, theta_path) / 1e5,
label=r'${0}^o$'.format(theta),
lw=2)
plt.legend()
plt.xlabel(r'path length $\Delta l$ [km]')
plt.ylabel(r'atm. height $h_{atm}(\Delta l, \theta)$ [km]')
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.show()
| 7,994 | 33.61039 | 82 | py |
MCEq | MCEq-master/MCEq/geometry/__init__.py | 0 | 0 | 0 | py |
|
MCEq | MCEq-master/MCEq/geometry/corsikaatm/corsikaatm.py | from ctypes import (cdll, Structure, c_int, c_double, POINTER)
import os
import sysconfig
base = os.path.dirname(os.path.abspath(__file__))
suffix = sysconfig.get_config_var('EXT_SUFFIX')
# Some Python 2.7 versions don't define EXT_SUFFIX
if suffix is None and 'SO' in sysconfig.get_config_vars():
suffix = sysconfig.get_config_var('SO')
assert suffix is not None, 'Shared lib suffix was not identified.'
for fn in os.listdir(base):
if 'libcorsikaatm' in fn and fn.endswith(suffix):
corsika_acc = cdll.LoadLibrary(os.path.join(base, fn))
break
for func in [
corsika_acc.corsika_get_density, corsika_acc.planar_rho_inv,
corsika_acc.corsika_get_m_overburden
]:
func.restype = c_double
def corsika_get_density(h_cm, a, b, c, t, hl):
"""Wrap arguments for ctypes function"""
return corsika_acc.corsika_get_density(
c_double(h_cm), a.ctypes, b.ctypes, c.ctypes, t.ctypes, hl.ctypes)
def planar_rho_inv(X, cos_theta, a, b, c, t, hl):
"""Wrap arguments for ctypes function"""
return corsika_acc.planar_rho_inv(
c_double(X), c_double(cos_theta), a.ctypes, b.ctypes, c.ctypes,
t.ctypes, hl.ctypes)
def corsika_get_m_overburden(h_cm, a, b, c, t, hl):
"""Wrap arguments for ctypes function"""
return corsika_acc.corsika_get_m_overburden(
c_double(h_cm), a.ctypes, b.ctypes, c.ctypes, t.ctypes, hl.ctypes)
| 1,406 | 32.5 | 74 | py |
MCEq | MCEq-master/MCEq/geometry/corsikaatm/__init__.py | 0 | 0 | 0 | py |
|
MCEq | MCEq-master/MCEq/geometry/nrlmsise00/nrlmsise00.py | '''
Ctypes interface for struct-based interface to the C-version of NRLMSISE-00.
This C version of NRLMSISE-00 is written by Dominik Brodowski
'''
from ctypes import (cdll, Structure, c_int, c_double, pointer, byref, POINTER)
import os
import sysconfig
base = os.path.dirname(os.path.abspath(__file__))
suffix = sysconfig.get_config_var('EXT_SUFFIX')
# Some Python 2.7 versions don't define EXT_SUFFIX
if suffix is None and 'SO' in sysconfig.get_config_vars():
suffix = sysconfig.get_config_var('SO')
assert suffix is not None, 'Shared lib suffix was not identified.'
for fn in os.listdir(base):
if 'libnrlmsis' in fn and fn.endswith(suffix):
msis = cdll.LoadLibrary(os.path.join(base, fn))
break
class nrlmsise_flags(Structure):
"""C-struct containing NRLMSISE related switches"""
_fields_ = [("switches", c_int * 24), ("sw", c_double * 24),
("swc", c_double * 24)]
class ap_array(Structure):
"""C-struct containing NRLMSISE related switches"""
_fields_ = [("a", c_double * 7)]
class nrlmsise_input(Structure):
"""The C-struct contains input variables for NRLMSISE."""
_field_ = [("year", c_int), ("doy", c_int), ("sec", c_double),
("alt", c_double), ("g_lat", c_double), ("g_long", c_double),
("lst", c_double), ("f107A", c_double), ("f107", c_double),
("ap", c_double), ("ap_a", POINTER(ap_array))]
class nrlmsise_output(Structure):
"""The C-struct contains output variables for NRLMSISE."""
_fields_ = [("d", c_double * 9), ("t", c_double * 2)]
| 1,581 | 33.391304 | 78 | py |
MCEq | MCEq-master/MCEq/geometry/nrlmsise00/__init__.py | 0 | 0 | 0 | py |
|
qemu | qemu-master/python/setup.py | #!/usr/bin/env python3
"""
QEMU tooling installer script
Copyright (c) 2020-2021 John Snow for Red Hat, Inc.
"""
import setuptools
from setuptools.command import bdist_egg
import sys
import pkg_resources
class bdist_egg_guard(bdist_egg.bdist_egg):
"""
Protect against bdist_egg from being executed
This prevents calling 'setup.py install' directly, as the 'install'
CLI option will invoke the deprecated bdist_egg hook. "pip install"
calls the more modern bdist_wheel hook, which is what we want.
"""
def run(self):
sys.exit(
'Installation directly via setup.py is not supported.\n'
'Please use `pip install .` instead.'
)
def main():
"""
QEMU tooling installer
"""
# https://medium.com/@daveshawley/safely-using-setup-cfg-for-metadata-1babbe54c108
pkg_resources.require('setuptools>=39.2')
setuptools.setup(cmdclass={'bdist_egg': bdist_egg_guard})
if __name__ == '__main__':
main()
| 989 | 23.146341 | 86 | py |
qemu | qemu-master/python/qemu/qmp/error.py | """
QMP Error Classes
This package seeks to provide semantic error classes that are intended
to be used directly by clients when they would like to handle particular
semantic failures (e.g. "failed to connect") without needing to know the
enumeration of possible reasons for that failure.
QMPError serves as the ancestor for all exceptions raised by this
package, and is suitable for use in handling semantic errors from this
library. In most cases, individual public methods will attempt to catch
and re-encapsulate various exceptions to provide a semantic
error-handling interface.
.. admonition:: QMP Exception Hierarchy Reference
| `Exception`
| +-- `QMPError`
| +-- `ConnectError`
| +-- `StateError`
| +-- `ExecInterruptedError`
| +-- `ExecuteError`
| +-- `ListenerError`
| +-- `ProtocolError`
| +-- `DeserializationError`
| +-- `UnexpectedTypeError`
| +-- `ServerParseError`
| +-- `BadReplyError`
| +-- `GreetingError`
| +-- `NegotiationError`
"""
class QMPError(Exception):
"""Abstract error class for all errors originating from this package."""
class ProtocolError(QMPError):
"""
Abstract error class for protocol failures.
Semantically, these errors are generally the fault of either the
protocol server or as a result of a bug in this library.
:param error_message: Human-readable string describing the error.
"""
def __init__(self, error_message: str):
super().__init__(error_message)
#: Human-readable error message, without any prefix.
self.error_message: str = error_message
| 1,701 | 32.372549 | 76 | py |
qemu | qemu-master/python/qemu/qmp/legacy.py | """
(Legacy) Sync QMP Wrapper
This module provides the `QEMUMonitorProtocol` class, which is a
synchronous wrapper around `QMPClient`.
Its design closely resembles that of the original QEMUMonitorProtocol
class, originally written by Luiz Capitulino. It is provided here for
compatibility with scripts inside the QEMU source tree that expect the
old interface.
"""
#
# Copyright (C) 2009-2022 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <[email protected]>
# John Snow <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
import asyncio
import socket
from types import TracebackType
from typing import (
Any,
Awaitable,
Dict,
List,
Optional,
Type,
TypeVar,
Union,
)
from .error import QMPError
from .protocol import Runstate, SocketAddrT
from .qmp_client import QMPClient
#: QMPMessage is an entire QMP message of any kind.
QMPMessage = Dict[str, Any]
#: QMPReturnValue is the 'return' value of a command.
QMPReturnValue = object
#: QMPObject is any object in a QMP message.
QMPObject = Dict[str, object]
# QMPMessage can be outgoing commands or incoming events/returns.
# QMPReturnValue is usually a dict/json object, but due to QAPI's
# 'command-returns-exceptions', it can actually be anything.
#
# {'return': {}} is a QMPMessage,
# {} is the QMPReturnValue.
class QMPBadPortError(QMPError):
"""
Unable to parse socket address: Port was non-numerical.
"""
class QEMUMonitorProtocol:
"""
Provide an API to connect to QEMU via QEMU Monitor Protocol (QMP)
and then allow to handle commands and events.
:param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection or None
:param sock: a socket or None
:param server: Act as the socket server. (See 'accept')
:param nickname: Optional nickname used for logging.
"""
def __init__(self,
address: Optional[SocketAddrT] = None,
sock: Optional[socket.socket] = None,
server: bool = False,
nickname: Optional[str] = None):
assert address or sock
self._qmp = QMPClient(nickname)
self._aloop = asyncio.get_event_loop()
self._address = address
self._sock = sock
self._timeout: Optional[float] = None
if server:
if sock:
assert self._sock is not None
self._sync(self._qmp.open_with_socket(self._sock))
else:
assert self._address is not None
self._sync(self._qmp.start_server(self._address))
_T = TypeVar('_T')
def _sync(
self, future: Awaitable[_T], timeout: Optional[float] = None
) -> _T:
return self._aloop.run_until_complete(
asyncio.wait_for(future, timeout=timeout)
)
def _get_greeting(self) -> Optional[QMPMessage]:
if self._qmp.greeting is not None:
# pylint: disable=protected-access
return self._qmp.greeting._asdict()
return None
def __enter__(self: _T) -> _T:
# Implement context manager enter function.
return self
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
# Implement context manager exit function.
self.close()
@classmethod
def parse_address(cls, address: str) -> SocketAddrT:
"""
Parse a string into a QMP address.
Figure out if the argument is in the port:host form.
If it's not, it's probably a file path.
"""
components = address.split(':')
if len(components) == 2:
try:
port = int(components[1])
except ValueError:
msg = f"Bad port: '{components[1]}' in '{address}'."
raise QMPBadPortError(msg) from None
return (components[0], port)
# Treat as filepath.
return address
def connect(self, negotiate: bool = True) -> Optional[QMPMessage]:
"""
Connect to the QMP Monitor and perform capabilities negotiation.
:return: QMP greeting dict, or None if negotiate is false
:raise ConnectError: on connection errors
"""
assert self._address is not None
self._qmp.await_greeting = negotiate
self._qmp.negotiate = negotiate
self._sync(
self._qmp.connect(self._address)
)
return self._get_greeting()
def accept(self, timeout: Optional[float] = 15.0) -> QMPMessage:
"""
Await connection from QMP Monitor and perform capabilities negotiation.
:param timeout:
timeout in seconds (nonnegative float number, or None).
If None, there is no timeout, and this may block forever.
:return: QMP greeting dict
:raise ConnectError: on connection errors
"""
self._qmp.await_greeting = True
self._qmp.negotiate = True
self._sync(self._qmp.accept(), timeout)
ret = self._get_greeting()
assert ret is not None
return ret
def cmd_obj(self, qmp_cmd: QMPMessage) -> QMPMessage:
"""
Send a QMP command to the QMP Monitor.
:param qmp_cmd: QMP command to be sent as a Python dict
:return: QMP response as a Python dict
"""
return dict(
self._sync(
# pylint: disable=protected-access
# _raw() isn't a public API, because turning off
# automatic ID assignment is discouraged. For
# compatibility with iotests *only*, do it anyway.
self._qmp._raw(qmp_cmd, assign_id=False),
self._timeout
)
)
def cmd(self, name: str,
args: Optional[Dict[str, object]] = None,
cmd_id: Optional[object] = None) -> QMPMessage:
"""
Build a QMP command and send it to the QMP Monitor.
:param name: command name (string)
:param args: command arguments (dict)
:param cmd_id: command id (dict, list, string or int)
"""
qmp_cmd: QMPMessage = {'execute': name}
if args:
qmp_cmd['arguments'] = args
if cmd_id:
qmp_cmd['id'] = cmd_id
return self.cmd_obj(qmp_cmd)
def command(self, cmd: str, **kwds: object) -> QMPReturnValue:
"""
Build and send a QMP command to the monitor, report errors if any
"""
return self._sync(
self._qmp.execute(cmd, kwds),
self._timeout
)
def pull_event(self,
wait: Union[bool, float] = False) -> Optional[QMPMessage]:
"""
Pulls a single event.
:param wait:
If False or 0, do not wait. Return None if no events ready.
If True, wait forever until the next event.
Otherwise, wait for the specified number of seconds.
:raise asyncio.TimeoutError:
When a timeout is requested and the timeout period elapses.
:return: The first available QMP event, or None.
"""
if not wait:
# wait is False/0: "do not wait, do not except."
if self._qmp.events.empty():
return None
# If wait is 'True', wait forever. If wait is False/0, the events
# queue must not be empty; but it still needs some real amount
# of time to complete.
timeout = None
if wait and isinstance(wait, float):
timeout = wait
return dict(
self._sync(
self._qmp.events.get(),
timeout
)
)
def get_events(self, wait: Union[bool, float] = False) -> List[QMPMessage]:
"""
Get a list of QMP events and clear all pending events.
:param wait:
If False or 0, do not wait. Return None if no events ready.
If True, wait until we have at least one event.
Otherwise, wait for up to the specified number of seconds for at
least one event.
:raise asyncio.TimeoutError:
When a timeout is requested and the timeout period elapses.
:return: A list of QMP events.
"""
events = [dict(x) for x in self._qmp.events.clear()]
if events:
return events
event = self.pull_event(wait)
return [event] if event is not None else []
def clear_events(self) -> None:
"""Clear current list of pending events."""
self._qmp.events.clear()
def close(self) -> None:
"""Close the connection."""
self._sync(
self._qmp.disconnect()
)
def settimeout(self, timeout: Optional[float]) -> None:
"""
Set the timeout for QMP RPC execution.
This timeout affects the `cmd`, `cmd_obj`, and `command` methods.
The `accept`, `pull_event` and `get_event` methods have their
own configurable timeouts.
:param timeout:
timeout in seconds, or None.
None will wait indefinitely.
"""
self._timeout = timeout
def send_fd_scm(self, fd: int) -> None:
"""
Send a file descriptor to the remote via SCM_RIGHTS.
"""
self._qmp.send_fd_scm(fd)
def __del__(self) -> None:
if self._qmp.runstate == Runstate.IDLE:
return
if not self._aloop.is_running():
self.close()
else:
# Garbage collection ran while the event loop was running.
# Nothing we can do about it now, but if we don't raise our
# own error, the user will be treated to a lot of traceback
# they might not understand.
raise QMPError(
"QEMUMonitorProtocol.close()"
" was not called before object was garbage collected"
)
| 10,193 | 30.079268 | 79 | py |
qemu | qemu-master/python/qemu/qmp/events.py | """
QMP Events and EventListeners
Asynchronous QMP uses `EventListener` objects to listen for events. An
`EventListener` is a FIFO event queue that can be pre-filtered to listen
for only specific events. Each `EventListener` instance receives its own
copy of events that it hears, so events may be consumed without fear or
worry for depriving other listeners of events they need to hear.
EventListener Tutorial
----------------------
In all of the following examples, we assume that we have a `QMPClient`
instantiated named ``qmp`` that is already connected.
`listener()` context blocks with one name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The most basic usage is by using the `listener()` context manager to
construct them:
.. code:: python
with qmp.listener('STOP') as listener:
await qmp.execute('stop')
await listener.get()
The listener is active only for the duration of the ‘with’ block. This
instance listens only for ‘STOP’ events.
`listener()` context blocks with two or more names
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Multiple events can be selected for by providing any ``Iterable[str]``:
.. code:: python
with qmp.listener(('STOP', 'RESUME')) as listener:
await qmp.execute('stop')
event = await listener.get()
assert event['event'] == 'STOP'
await qmp.execute('cont')
event = await listener.get()
assert event['event'] == 'RESUME'
`listener()` context blocks with no names
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
By omitting names entirely, you can listen to ALL events.
.. code:: python
with qmp.listener() as listener:
await qmp.execute('stop')
event = await listener.get()
assert event['event'] == 'STOP'
This isn’t a very good use case for this feature: In a non-trivial
running system, we may not know what event will arrive next. Grabbing
the top of a FIFO queue returning multiple kinds of events may be prone
to error.
Using async iterators to retrieve events
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you’d like to simply watch what events happen to arrive, you can use
the listener as an async iterator:
.. code:: python
with qmp.listener() as listener:
async for event in listener:
print(f"Event arrived: {event['event']}")
This is analogous to the following code:
.. code:: python
with qmp.listener() as listener:
while True:
event = listener.get()
print(f"Event arrived: {event['event']}")
This event stream will never end, so these blocks will never terminate.
Using asyncio.Task to concurrently retrieve events
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since a listener’s event stream will never terminate, it is not likely
useful to use that form in a script. For longer-running clients, we can
create event handlers by using `asyncio.Task` to create concurrent
coroutines:
.. code:: python
async def print_events(listener):
try:
async for event in listener:
print(f"Event arrived: {event['event']}")
except asyncio.CancelledError:
return
with qmp.listener() as listener:
task = asyncio.Task(print_events(listener))
await qmp.execute('stop')
await qmp.execute('cont')
task.cancel()
await task
However, there is no guarantee that these events will be received by the
time we leave this context block. Once the context block is exited, the
listener will cease to hear any new events, and becomes inert.
Be mindful of the timing: the above example will *probably*– but does
not *guarantee*– that both STOP/RESUMED events will be printed. The
example below outlines how to use listeners outside of a context block.
Using `register_listener()` and `remove_listener()`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To create a listener with a longer lifetime, beyond the scope of a
single block, create a listener and then call `register_listener()`:
.. code:: python
class MyClient:
def __init__(self, qmp):
self.qmp = qmp
self.listener = EventListener()
async def print_events(self):
try:
async for event in self.listener:
print(f"Event arrived: {event['event']}")
except asyncio.CancelledError:
return
async def run(self):
self.task = asyncio.Task(self.print_events)
self.qmp.register_listener(self.listener)
await qmp.execute('stop')
await qmp.execute('cont')
async def stop(self):
self.task.cancel()
await self.task
self.qmp.remove_listener(self.listener)
The listener can be deactivated by using `remove_listener()`. When it is
removed, any possible pending events are cleared and it can be
re-registered at a later time.
Using the built-in all events listener
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The `QMPClient` object creates its own default listener named
:py:obj:`~Events.events` that can be used for the same purpose without
having to create your own:
.. code:: python
async def print_events(listener):
try:
async for event in listener:
print(f"Event arrived: {event['event']}")
except asyncio.CancelledError:
return
task = asyncio.Task(print_events(qmp.events))
await qmp.execute('stop')
await qmp.execute('cont')
task.cancel()
await task
Using both .get() and async iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The async iterator and `get()` methods pull events from the same FIFO
queue. If you mix the usage of both, be aware: Events are emitted
precisely once per listener.
If multiple contexts try to pull events from the same listener instance,
events are still emitted only precisely once.
This restriction can be lifted by creating additional listeners.
Creating multiple listeners
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Additional `EventListener` objects can be created at-will. Each one
receives its own copy of events, with separate FIFO event queues.
.. code:: python
my_listener = EventListener()
qmp.register_listener(my_listener)
await qmp.execute('stop')
copy1 = await my_listener.get()
copy2 = await qmp.events.get()
assert copy1 == copy2
In this example, we await an event from both a user-created
`EventListener` and the built-in events listener. Both receive the same
event.
Clearing listeners
~~~~~~~~~~~~~~~~~~
`EventListener` objects can be cleared, clearing all events seen thus far:
.. code:: python
await qmp.execute('stop')
qmp.events.clear()
await qmp.execute('cont')
event = await qmp.events.get()
assert event['event'] == 'RESUME'
`EventListener` objects are FIFO queues. If events are not consumed,
they will remain in the queue until they are witnessed or discarded via
`clear()`. FIFO queues will be drained automatically upon leaving a
context block, or when calling `remove_listener()`.
Accessing listener history
~~~~~~~~~~~~~~~~~~~~~~~~~~
`EventListener` objects record their history. Even after being cleared,
you can obtain a record of all events seen so far:
.. code:: python
await qmp.execute('stop')
await qmp.execute('cont')
qmp.events.clear()
assert len(qmp.events.history) == 2
assert qmp.events.history[0]['event'] == 'STOP'
assert qmp.events.history[1]['event'] == 'RESUME'
The history is updated immediately and does not require the event to be
witnessed first.
Using event filters
~~~~~~~~~~~~~~~~~~~
`EventListener` objects can be given complex filtering criteria if names
are not sufficient:
.. code:: python
def job1_filter(event) -> bool:
event_data = event.get('data', {})
event_job_id = event_data.get('id')
return event_job_id == "job1"
with qmp.listener('JOB_STATUS_CHANGE', job1_filter) as listener:
await qmp.execute('blockdev-backup', arguments={'job-id': 'job1', ...})
async for event in listener:
if event['data']['status'] == 'concluded':
break
These filters might be most useful when parameterized. `EventListener`
objects expect a function that takes only a single argument (the raw
event, as a `Message`) and returns a bool; True if the event should be
accepted into the stream. You can create a function that adapts this
signature to accept configuration parameters:
.. code:: python
def job_filter(job_id: str) -> EventFilter:
def filter(event: Message) -> bool:
return event['data']['id'] == job_id
return filter
with qmp.listener('JOB_STATUS_CHANGE', job_filter('job2')) as listener:
await qmp.execute('blockdev-backup', arguments={'job-id': 'job2', ...})
async for event in listener:
if event['data']['status'] == 'concluded':
break
Activating an existing listener with `listen()`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Listeners with complex, long configurations can also be created manually
and activated temporarily by using `listen()` instead of `listener()`:
.. code:: python
listener = EventListener(('BLOCK_JOB_COMPLETED', 'BLOCK_JOB_CANCELLED',
'BLOCK_JOB_ERROR', 'BLOCK_JOB_READY',
'BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE'))
with qmp.listen(listener):
await qmp.execute('blockdev-backup', arguments={'job-id': 'job3', ...})
async for event in listener:
print(event)
if event['event'] == 'BLOCK_JOB_COMPLETED':
break
Any events that are not witnessed by the time the block is left will be
cleared from the queue; entering the block is an implicit
`register_listener()` and leaving the block is an implicit
`remove_listener()`.
Activating multiple existing listeners with `listen()`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While `listener()` is only capable of creating a single listener,
`listen()` is capable of activating multiple listeners simultaneously:
.. code:: python
def job_filter(job_id: str) -> EventFilter:
def filter(event: Message) -> bool:
return event['data']['id'] == job_id
return filter
jobA = EventListener('JOB_STATUS_CHANGE', job_filter('jobA'))
jobB = EventListener('JOB_STATUS_CHANGE', job_filter('jobB'))
with qmp.listen(jobA, jobB):
qmp.execute('blockdev-create', arguments={'job-id': 'jobA', ...})
qmp.execute('blockdev-create', arguments={'job-id': 'jobB', ...})
async for event in jobA.get():
if event['data']['status'] == 'concluded':
break
async for event in jobB.get():
if event['data']['status'] == 'concluded':
break
Extending the `EventListener` class
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the case that a more specialized `EventListener` is desired to
provide either more functionality or more compact syntax for specialized
cases, it can be extended.
One of the key methods to extend or override is
:py:meth:`~EventListener.accept()`. The default implementation checks an
incoming message for:
1. A qualifying name, if any :py:obj:`~EventListener.names` were
specified at initialization time
2. That :py:obj:`~EventListener.event_filter()` returns True.
This can be modified however you see fit to change the criteria for
inclusion in the stream.
For convenience, a ``JobListener`` class could be created that simply
bakes in configuration so it does not need to be repeated:
.. code:: python
class JobListener(EventListener):
def __init__(self, job_id: str):
super().__init__(('BLOCK_JOB_COMPLETED', 'BLOCK_JOB_CANCELLED',
'BLOCK_JOB_ERROR', 'BLOCK_JOB_READY',
'BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE'))
self.job_id = job_id
def accept(self, event) -> bool:
if not super().accept(event):
return False
if event['event'] in ('BLOCK_JOB_PENDING', 'JOB_STATUS_CHANGE'):
return event['data']['id'] == job_id
return event['data']['device'] == job_id
From here on out, you can conjure up a custom-purpose listener that
listens only for job-related events for a specific job-id easily:
.. code:: python
listener = JobListener('job4')
with qmp.listener(listener):
await qmp.execute('blockdev-backup', arguments={'job-id': 'job4', ...})
async for event in listener:
print(event)
if event['event'] == 'BLOCK_JOB_COMPLETED':
break
Experimental Interfaces & Design Issues
---------------------------------------
These interfaces are not ones I am sure I will keep or otherwise modify
heavily.
qmp.listener()’s type signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`listener()` does not return anything, because it was assumed the caller
already had a handle to the listener. However, for
``qmp.listener(EventListener())`` forms, the caller will not have saved
a handle to the listener.
Because this function can accept *many* listeners, I found it hard to
accurately type in a way where it could be used in both “one” or “many”
forms conveniently and in a statically type-safe manner.
Ultimately, I removed the return altogether, but perhaps with more time
I can work out a way to re-add it.
API Reference
-------------
"""
import asyncio
from contextlib import contextmanager
import logging
from typing import (
AsyncIterator,
Callable,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Union,
)
from .error import QMPError
from .message import Message
EventNames = Union[str, Iterable[str], None]
EventFilter = Callable[[Message], bool]
class ListenerError(QMPError):
"""
Generic error class for `EventListener`-related problems.
"""
class EventListener:
"""
Selectively listens for events with runtime configurable filtering.
This class is designed to be directly usable for the most common cases,
but it can be extended to provide more rigorous control.
:param names:
One or more names of events to listen for.
When not provided, listen for ALL events.
:param event_filter:
An optional event filtering function.
When names are also provided, this acts as a secondary filter.
When ``names`` and ``event_filter`` are both provided, the names
will be filtered first, and then the filter function will be called
second. The event filter function can assume that the format of the
event is a known format.
"""
def __init__(
self,
names: EventNames = None,
event_filter: Optional[EventFilter] = None,
):
# Queue of 'heard' events yet to be witnessed by a caller.
self._queue: 'asyncio.Queue[Message]' = asyncio.Queue()
# Intended as a historical record, NOT a processing queue or backlog.
self._history: List[Message] = []
#: Primary event filter, based on one or more event names.
self.names: Set[str] = set()
if isinstance(names, str):
self.names.add(names)
elif names is not None:
self.names.update(names)
#: Optional, secondary event filter.
self.event_filter: Optional[EventFilter] = event_filter
@property
def history(self) -> Tuple[Message, ...]:
"""
A read-only history of all events seen so far.
This represents *every* event, including those not yet witnessed
via `get()` or ``async for``. It persists between `clear()`
calls and is immutable.
"""
return tuple(self._history)
def accept(self, event: Message) -> bool:
"""
Determine if this listener accepts this event.
This method determines which events will appear in the stream.
The default implementation simply checks the event against the
list of names and the event_filter to decide if this
`EventListener` accepts a given event. It can be
overridden/extended to provide custom listener behavior.
User code is not expected to need to invoke this method.
:param event: The event under consideration.
:return: `True`, if this listener accepts this event.
"""
name_ok = (not self.names) or (event['event'] in self.names)
return name_ok and (
(not self.event_filter) or self.event_filter(event)
)
async def put(self, event: Message) -> None:
"""
Conditionally put a new event into the FIFO queue.
This method is not designed to be invoked from user code, and it
should not need to be overridden. It is a public interface so
that `QMPClient` has an interface by which it can inform
registered listeners of new events.
The event will be put into the queue if
:py:meth:`~EventListener.accept()` returns `True`.
:param event: The new event to put into the FIFO queue.
"""
if not self.accept(event):
return
self._history.append(event)
await self._queue.put(event)
async def get(self) -> Message:
"""
Wait for the very next event in this stream.
If one is already available, return that one.
"""
return await self._queue.get()
def empty(self) -> bool:
"""
Return `True` if there are no pending events.
"""
return self._queue.empty()
def clear(self) -> List[Message]:
"""
Clear this listener of all pending events.
Called when an `EventListener` is being unregistered, this clears the
pending FIFO queue synchronously. It can be also be used to
manually clear any pending events, if desired.
:return: The cleared events, if any.
.. warning::
Take care when discarding events. Cleared events will be
silently tossed on the floor. All events that were ever
accepted by this listener are visible in `history()`.
"""
events = []
while True:
try:
events.append(self._queue.get_nowait())
except asyncio.QueueEmpty:
break
return events
def __aiter__(self) -> AsyncIterator[Message]:
return self
async def __anext__(self) -> Message:
"""
Enables the `EventListener` to function as an async iterator.
It may be used like this:
.. code:: python
async for event in listener:
print(event)
These iterators will never terminate of their own accord; you
must provide break conditions or otherwise prepare to run them
in an `asyncio.Task` that can be cancelled.
"""
return await self.get()
class Events:
"""
Events is a mix-in class that adds event functionality to the QMP class.
It's designed specifically as a mix-in for `QMPClient`, and it
relies upon the class it is being mixed into having a 'logger'
property.
"""
def __init__(self) -> None:
self._listeners: List[EventListener] = []
#: Default, all-events `EventListener`.
self.events: EventListener = EventListener()
self.register_listener(self.events)
# Parent class needs to have a logger
self.logger: logging.Logger
async def _event_dispatch(self, msg: Message) -> None:
"""
Given a new event, propagate it to all of the active listeners.
:param msg: The event to propagate.
"""
for listener in self._listeners:
await listener.put(msg)
def register_listener(self, listener: EventListener) -> None:
"""
Register and activate an `EventListener`.
:param listener: The listener to activate.
:raise ListenerError: If the given listener is already registered.
"""
if listener in self._listeners:
raise ListenerError("Attempted to re-register existing listener")
self.logger.debug("Registering %s.", str(listener))
self._listeners.append(listener)
def remove_listener(self, listener: EventListener) -> None:
"""
Unregister and deactivate an `EventListener`.
The removed listener will have its pending events cleared via
`clear()`. The listener can be re-registered later when
desired.
:param listener: The listener to deactivate.
:raise ListenerError: If the given listener is not registered.
"""
if listener == self.events:
raise ListenerError("Cannot remove the default listener.")
self.logger.debug("Removing %s.", str(listener))
listener.clear()
self._listeners.remove(listener)
@contextmanager
def listen(self, *listeners: EventListener) -> Iterator[None]:
r"""
Context manager: Temporarily listen with an `EventListener`.
Accepts one or more `EventListener` objects and registers them,
activating them for the duration of the context block.
`EventListener` objects will have any pending events in their
FIFO queue cleared upon exiting the context block, when they are
deactivated.
:param \*listeners: One or more EventListeners to activate.
:raise ListenerError: If the given listener(s) are already active.
"""
_added = []
try:
for listener in listeners:
self.register_listener(listener)
_added.append(listener)
yield
finally:
for listener in _added:
self.remove_listener(listener)
@contextmanager
def listener(
self,
names: EventNames = (),
event_filter: Optional[EventFilter] = None
) -> Iterator[EventListener]:
"""
Context manager: Temporarily listen with a new `EventListener`.
Creates an `EventListener` object and registers it, activating
it for the duration of the context block.
:param names:
One or more names of events to listen for.
When not provided, listen for ALL events.
:param event_filter:
An optional event filtering function.
When names are also provided, this acts as a secondary filter.
:return: The newly created and active `EventListener`.
"""
listener = EventListener(names, event_filter)
with self.listen(listener):
yield listener
| 22,625 | 30.512535 | 78 | py |
qemu | qemu-master/python/qemu/qmp/message.py | """
QMP Message Format
This module provides the `Message` class, which represents a single QMP
message sent to or from the server.
"""
import json
from json import JSONDecodeError
from typing import (
Dict,
Iterator,
Mapping,
MutableMapping,
Optional,
Union,
)
from .error import ProtocolError
class Message(MutableMapping[str, object]):
"""
Represents a single QMP protocol message.
QMP uses JSON objects as its basic communicative unit; so this
Python object is a :py:obj:`~collections.abc.MutableMapping`. It may
be instantiated from either another mapping (like a `dict`), or from
raw `bytes` that still need to be deserialized.
Once instantiated, it may be treated like any other MutableMapping::
>>> msg = Message(b'{"hello": "world"}')
>>> assert msg['hello'] == 'world'
>>> msg['id'] = 'foobar'
>>> print(msg)
{
"hello": "world",
"id": "foobar"
}
It can be converted to `bytes`::
>>> msg = Message({"hello": "world"})
>>> print(bytes(msg))
b'{"hello":"world","id":"foobar"}'
Or back into a garden-variety `dict`::
>>> dict(msg)
{'hello': 'world'}
:param value: Initial value, if any.
:param eager:
When `True`, attempt to serialize or deserialize the initial value
immediately, so that conversion exceptions are raised during
the call to ``__init__()``.
"""
# pylint: disable=too-many-ancestors
def __init__(self,
value: Union[bytes, Mapping[str, object]] = b'{}', *,
eager: bool = True):
self._data: Optional[bytes] = None
self._obj: Optional[Dict[str, object]] = None
if isinstance(value, bytes):
self._data = value
if eager:
self._obj = self._deserialize(self._data)
else:
self._obj = dict(value)
if eager:
self._data = self._serialize(self._obj)
# Methods necessary to implement the MutableMapping interface, see:
# https://docs.python.org/3/library/collections.abc.html#collections.abc.MutableMapping
# We get pop, popitem, clear, update, setdefault, __contains__,
# keys, items, values, get, __eq__ and __ne__ for free.
def __getitem__(self, key: str) -> object:
return self._object[key]
def __setitem__(self, key: str, value: object) -> None:
self._object[key] = value
self._data = None
def __delitem__(self, key: str) -> None:
del self._object[key]
self._data = None
def __iter__(self) -> Iterator[str]:
return iter(self._object)
def __len__(self) -> int:
return len(self._object)
# Dunder methods not related to MutableMapping:
def __repr__(self) -> str:
if self._obj is not None:
return f"Message({self._object!r})"
return f"Message({bytes(self)!r})"
def __str__(self) -> str:
"""Pretty-printed representation of this QMP message."""
return json.dumps(self._object, indent=2)
def __bytes__(self) -> bytes:
"""bytes representing this QMP message."""
if self._data is None:
self._data = self._serialize(self._obj or {})
return self._data
# Conversion Methods
@property
def _object(self) -> Dict[str, object]:
"""
A `dict` representing this QMP message.
Generated on-demand, if required. This property is private
because it returns an object that could be used to invalidate
the internal state of the `Message` object.
"""
if self._obj is None:
self._obj = self._deserialize(self._data or b'{}')
return self._obj
@classmethod
def _serialize(cls, value: object) -> bytes:
"""
Serialize a JSON object as `bytes`.
:raise ValueError: When the object cannot be serialized.
:raise TypeError: When the object cannot be serialized.
:return: `bytes` ready to be sent over the wire.
"""
return json.dumps(value, separators=(',', ':')).encode('utf-8')
@classmethod
def _deserialize(cls, data: bytes) -> Dict[str, object]:
"""
Deserialize JSON `bytes` into a native Python `dict`.
:raise DeserializationError:
If JSON deserialization fails for any reason.
:raise UnexpectedTypeError:
If the data does not represent a JSON object.
:return: A `dict` representing this QMP message.
"""
try:
obj = json.loads(data)
except JSONDecodeError as err:
emsg = "Failed to deserialize QMP message."
raise DeserializationError(emsg, data) from err
if not isinstance(obj, dict):
raise UnexpectedTypeError(
"QMP message is not a JSON object.",
obj
)
return obj
class DeserializationError(ProtocolError):
"""
A QMP message was not understood as JSON.
When this Exception is raised, ``__cause__`` will be set to the
`json.JSONDecodeError` Exception, which can be interrogated for
further details.
:param error_message: Human-readable string describing the error.
:param raw: The raw `bytes` that prompted the failure.
"""
def __init__(self, error_message: str, raw: bytes):
super().__init__(error_message)
#: The raw `bytes` that were not understood as JSON.
self.raw: bytes = raw
def __str__(self) -> str:
return "\n".join([
super().__str__(),
f" raw bytes were: {str(self.raw)}",
])
class UnexpectedTypeError(ProtocolError):
"""
A QMP message was JSON, but not a JSON object.
:param error_message: Human-readable string describing the error.
:param value: The deserialized JSON value that wasn't an object.
"""
def __init__(self, error_message: str, value: object):
super().__init__(error_message)
#: The JSON value that was expected to be an object.
self.value: object = value
def __str__(self) -> str:
strval = json.dumps(self.value, indent=2)
return "\n".join([
super().__str__(),
f" json value was: {strval}",
])
| 6,355 | 29.266667 | 91 | py |
qemu | qemu-master/python/qemu/qmp/qmp_tui.py | # Copyright (c) 2021
#
# Authors:
# Niteesh Babu G S <[email protected]>
#
# This work is licensed under the terms of the GNU LGPL, version 2 or
# later. See the COPYING file in the top-level directory.
"""
QMP TUI
QMP TUI is an asynchronous interface built on top the of the QMP library.
It is the successor of QMP-shell and is bought-in as a replacement for it.
Example Usage: qmp-tui <SOCKET | TCP IP:PORT>
Full Usage: qmp-tui --help
"""
import argparse
import asyncio
import json
import logging
from logging import Handler, LogRecord
import signal
from typing import (
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from pygments import lexers
from pygments import token as Token
import urwid
import urwid_readline
from .error import ProtocolError
from .legacy import QEMUMonitorProtocol, QMPBadPortError
from .message import DeserializationError, Message, UnexpectedTypeError
from .protocol import ConnectError, Runstate
from .qmp_client import ExecInterruptedError, QMPClient
from .util import create_task, pretty_traceback
# The name of the signal that is used to update the history list
UPDATE_MSG: str = 'UPDATE_MSG'
palette = [
(Token.Punctuation, '', '', '', 'h15,bold', 'g7'),
(Token.Text, '', '', '', '', 'g7'),
(Token.Name.Tag, '', '', '', 'bold,#f88', 'g7'),
(Token.Literal.Number.Integer, '', '', '', '#fa0', 'g7'),
(Token.Literal.String.Double, '', '', '', '#6f6', 'g7'),
(Token.Keyword.Constant, '', '', '', '#6af', 'g7'),
('DEBUG', '', '', '', '#ddf', 'g7'),
('INFO', '', '', '', 'g100', 'g7'),
('WARNING', '', '', '', '#ff6', 'g7'),
('ERROR', '', '', '', '#a00', 'g7'),
('CRITICAL', '', '', '', '#a00', 'g7'),
('background', '', 'black', '', '', 'g7'),
]
def format_json(msg: str) -> str:
"""
Formats valid/invalid multi-line JSON message into a single-line message.
Formatting is first tried using the standard json module. If that fails
due to an decoding error then a simple string manipulation is done to
achieve a single line JSON string.
Converting into single line is more aesthetically pleasing when looking
along with error messages.
Eg:
Input:
[ 1,
true,
3 ]
The above input is not a valid QMP message and produces the following error
"QMP message is not a JSON object."
When displaying this in TUI in multiline mode we get
[ 1,
true,
3 ]: QMP message is not a JSON object.
whereas in singleline mode we get the following
[1, true, 3]: QMP message is not a JSON object.
The single line mode is more aesthetically pleasing.
:param msg:
The message to formatted into single line.
:return: Formatted singleline message.
"""
try:
msg = json.loads(msg)
return str(json.dumps(msg))
except json.decoder.JSONDecodeError:
msg = msg.replace('\n', '')
words = msg.split(' ')
words = list(filter(None, words))
return ' '.join(words)
def has_handler_type(logger: logging.Logger,
handler_type: Type[Handler]) -> bool:
"""
The Logger class has no interface to check if a certain type of handler is
installed or not. So we provide an interface to do so.
:param logger:
Logger object
:param handler_type:
The type of the handler to be checked.
:return: returns True if handler of type `handler_type`.
"""
for handler in logger.handlers:
if isinstance(handler, handler_type):
return True
return False
class App(QMPClient):
"""
Implements the QMP TUI.
Initializes the widgets and starts the urwid event loop.
:param address:
Address of the server to connect to.
:param num_retries:
The number of times to retry before stopping to reconnect.
:param retry_delay:
The delay(sec) before each retry
"""
def __init__(self, address: Union[str, Tuple[str, int]], num_retries: int,
retry_delay: Optional[int]) -> None:
urwid.register_signal(type(self), UPDATE_MSG)
self.window = Window(self)
self.address = address
self.aloop: Optional[asyncio.AbstractEventLoop] = None
self.num_retries = num_retries
self.retry_delay = retry_delay if retry_delay else 2
self.retry: bool = False
self.exiting: bool = False
super().__init__()
def add_to_history(self, msg: str, level: Optional[str] = None) -> None:
"""
Appends the msg to the history list.
:param msg:
The raw message to be appended in string type.
"""
urwid.emit_signal(self, UPDATE_MSG, msg, level)
def _cb_outbound(self, msg: Message) -> Message:
"""
Callback: outbound message hook.
Appends the outgoing messages to the history box.
:param msg: raw outbound message.
:return: final outbound message.
"""
str_msg = str(msg)
if not has_handler_type(logging.getLogger(), TUILogHandler):
logging.debug('Request: %s', str_msg)
self.add_to_history('<-- ' + str_msg)
return msg
def _cb_inbound(self, msg: Message) -> Message:
"""
Callback: outbound message hook.
Appends the incoming messages to the history box.
:param msg: raw inbound message.
:return: final inbound message.
"""
str_msg = str(msg)
if not has_handler_type(logging.getLogger(), TUILogHandler):
logging.debug('Request: %s', str_msg)
self.add_to_history('--> ' + str_msg)
return msg
async def _send_to_server(self, msg: Message) -> None:
"""
This coroutine sends the message to the server.
The message has to be pre-validated.
:param msg:
Pre-validated message to be to sent to the server.
:raise Exception: When an unhandled exception is caught.
"""
try:
await self._raw(msg, assign_id='id' not in msg)
except ExecInterruptedError as err:
logging.info('Error server disconnected before reply %s', str(err))
self.add_to_history('Server disconnected before reply', 'ERROR')
except Exception as err:
logging.error('Exception from _send_to_server: %s', str(err))
raise err
def cb_send_to_server(self, raw_msg: str) -> None:
"""
Validates and sends the message to the server.
The raw string message is first converted into a Message object
and is then sent to the server.
:param raw_msg:
The raw string message to be sent to the server.
:raise Exception: When an unhandled exception is caught.
"""
try:
msg = Message(bytes(raw_msg, encoding='utf-8'))
create_task(self._send_to_server(msg))
except (DeserializationError, UnexpectedTypeError) as err:
raw_msg = format_json(raw_msg)
logging.info('Invalid message: %s', err.error_message)
self.add_to_history(f'{raw_msg}: {err.error_message}', 'ERROR')
def unhandled_input(self, key: str) -> None:
"""
Handle's keys which haven't been handled by the child widgets.
:param key:
Unhandled key
"""
if key == 'esc':
self.kill_app()
def kill_app(self) -> None:
"""
Initiates killing of app. A bridge between asynchronous and synchronous
code.
"""
create_task(self._kill_app())
async def _kill_app(self) -> None:
"""
This coroutine initiates the actual disconnect process and calls
urwid.ExitMainLoop() to kill the TUI.
:raise Exception: When an unhandled exception is caught.
"""
self.exiting = True
await self.disconnect()
logging.debug('Disconnect finished. Exiting app')
raise urwid.ExitMainLoop()
async def disconnect(self) -> None:
"""
Overrides the disconnect method to handle the errors locally.
"""
try:
await super().disconnect()
except (OSError, EOFError) as err:
logging.info('disconnect: %s', str(err))
self.retry = True
except ProtocolError as err:
logging.info('disconnect: %s', str(err))
except Exception as err:
logging.error('disconnect: Unhandled exception %s', str(err))
raise err
def _set_status(self, msg: str) -> None:
"""
Sets the message as the status.
:param msg:
The message to be displayed in the status bar.
"""
self.window.footer.set_text(msg)
def _get_formatted_address(self) -> str:
"""
Returns a formatted version of the server's address.
:return: formatted address
"""
if isinstance(self.address, tuple):
host, port = self.address
addr = f'{host}:{port}'
else:
addr = f'{self.address}'
return addr
async def _initiate_connection(self) -> Optional[ConnectError]:
"""
Tries connecting to a server a number of times with a delay between
each try. If all retries failed then return the error faced during
the last retry.
:return: Error faced during last retry.
"""
current_retries = 0
err = None
# initial try
await self.connect_server()
while self.retry and current_retries < self.num_retries:
logging.info('Connection Failed, retrying in %d', self.retry_delay)
status = f'[Retry #{current_retries} ({self.retry_delay}s)]'
self._set_status(status)
await asyncio.sleep(self.retry_delay)
err = await self.connect_server()
current_retries += 1
# If all retries failed report the last error
if err:
logging.info('All retries failed: %s', err)
return err
return None
async def manage_connection(self) -> None:
"""
Manage the connection based on the current run state.
A reconnect is issued when the current state is IDLE and the number
of retries is not exhausted.
A disconnect is issued when the current state is DISCONNECTING.
"""
while not self.exiting:
if self.runstate == Runstate.IDLE:
err = await self._initiate_connection()
# If retry is still true then, we have exhausted all our tries.
if err:
self._set_status(f'[Error: {err.error_message}]')
else:
addr = self._get_formatted_address()
self._set_status(f'[Connected {addr}]')
elif self.runstate == Runstate.DISCONNECTING:
self._set_status('[Disconnected]')
await self.disconnect()
# check if a retry is needed
if self.runstate == Runstate.IDLE:
continue
await self.runstate_changed()
async def connect_server(self) -> Optional[ConnectError]:
"""
Initiates a connection to the server at address `self.address`
and in case of a failure, sets the status to the respective error.
"""
try:
await self.connect(self.address)
self.retry = False
except ConnectError as err:
logging.info('connect_server: ConnectError %s', str(err))
self.retry = True
return err
return None
def run(self, debug: bool = False) -> None:
"""
Starts the long running co-routines and the urwid event loop.
:param debug:
Enables/Disables asyncio event loop debugging
"""
screen = urwid.raw_display.Screen()
screen.set_terminal_properties(256)
self.aloop = asyncio.get_event_loop()
self.aloop.set_debug(debug)
# Gracefully handle SIGTERM and SIGINT signals
cancel_signals = [signal.SIGTERM, signal.SIGINT]
for sig in cancel_signals:
self.aloop.add_signal_handler(sig, self.kill_app)
event_loop = urwid.AsyncioEventLoop(loop=self.aloop)
main_loop = urwid.MainLoop(urwid.AttrMap(self.window, 'background'),
unhandled_input=self.unhandled_input,
screen=screen,
palette=palette,
handle_mouse=True,
event_loop=event_loop)
create_task(self.manage_connection(), self.aloop)
try:
main_loop.run()
except Exception as err:
logging.error('%s\n%s\n', str(err), pretty_traceback())
raise err
class StatusBar(urwid.Text):
"""
A simple statusbar modelled using the Text widget. The status can be
set using the set_text function. All text set is aligned to right.
:param text: Initial text to be displayed. Default is empty str.
"""
def __init__(self, text: str = ''):
super().__init__(text, align='right')
class Editor(urwid_readline.ReadlineEdit):
"""
A simple editor modelled using the urwid_readline.ReadlineEdit widget.
Mimcs GNU readline shortcuts and provides history support.
The readline shortcuts can be found below:
https://github.com/rr-/urwid_readline#features
Along with the readline features, this editor also has support for
history. Pressing the 'up'/'down' switches between the prev/next messages
available in the history.
Currently there is no support to save the history to a file. The history of
previous commands is lost on exit.
:param parent: Reference to the TUI object.
"""
def __init__(self, parent: App) -> None:
super().__init__(caption='> ', multiline=True)
self.parent = parent
self.history: List[str] = []
self.last_index: int = -1
self.show_history: bool = False
def keypress(self, size: Tuple[int, int], key: str) -> Optional[str]:
"""
Handles the keypress on this widget.
:param size:
The current size of the widget.
:param key:
The key to be handled.
:return: Unhandled key if any.
"""
msg = self.get_edit_text()
if key == 'up' and not msg:
# Show the history when 'up arrow' is pressed with no input text.
# NOTE: The show_history logic is necessary because in 'multiline'
# mode (which we use) 'up arrow' is used to move between lines.
if not self.history:
return None
self.show_history = True
last_msg = self.history[self.last_index]
self.set_edit_text(last_msg)
self.edit_pos = len(last_msg)
elif key == 'up' and self.show_history:
self.last_index = max(self.last_index - 1, -len(self.history))
self.set_edit_text(self.history[self.last_index])
self.edit_pos = len(self.history[self.last_index])
elif key == 'down' and self.show_history:
if self.last_index == -1:
self.set_edit_text('')
self.show_history = False
else:
self.last_index += 1
self.set_edit_text(self.history[self.last_index])
self.edit_pos = len(self.history[self.last_index])
elif key == 'meta enter':
# When using multiline, enter inserts a new line into the editor
# send the input to the server on alt + enter
self.parent.cb_send_to_server(msg)
self.history.append(msg)
self.set_edit_text('')
self.last_index = -1
self.show_history = False
else:
self.show_history = False
self.last_index = -1
return cast(Optional[str], super().keypress(size, key))
return None
class EditorWidget(urwid.Filler):
"""
Wrapper around the editor widget.
The Editor is a flow widget and has to wrapped inside a box widget.
This class wraps the Editor inside filler widget.
:param parent: Reference to the TUI object.
"""
def __init__(self, parent: App) -> None:
super().__init__(Editor(parent), valign='top')
class HistoryBox(urwid.ListBox):
"""
This widget is modelled using the ListBox widget, contains the list of
all messages both QMP messages and log messages to be shown in the TUI.
The messages are urwid.Text widgets. On every append of a message, the
focus is shifted to the last appended message.
:param parent: Reference to the TUI object.
"""
def __init__(self, parent: App) -> None:
self.parent = parent
self.history = urwid.SimpleFocusListWalker([])
super().__init__(self.history)
def add_to_history(self,
history: Union[str, List[Tuple[str, str]]]) -> None:
"""
Appends a message to the list and set the focus to the last appended
message.
:param history:
The history item(message/event) to be appended to the list.
"""
self.history.append(urwid.Text(history))
self.history.set_focus(len(self.history) - 1)
def mouse_event(self, size: Tuple[int, int], _event: str, button: float,
_x: int, _y: int, focus: bool) -> None:
# Unfortunately there are no urwid constants that represent the mouse
# events.
if button == 4: # Scroll up event
super().keypress(size, 'up')
elif button == 5: # Scroll down event
super().keypress(size, 'down')
class HistoryWindow(urwid.Frame):
"""
This window composes the HistoryBox and EditorWidget in a horizontal split.
By default the first focus is given to the history box.
:param parent: Reference to the TUI object.
"""
def __init__(self, parent: App) -> None:
self.parent = parent
self.editor_widget = EditorWidget(parent)
self.editor = urwid.LineBox(self.editor_widget)
self.history = HistoryBox(parent)
self.body = urwid.Pile([('weight', 80, self.history),
('weight', 20, self.editor)])
super().__init__(self.body)
urwid.connect_signal(self.parent, UPDATE_MSG, self.cb_add_to_history)
def cb_add_to_history(self, msg: str, level: Optional[str] = None) -> None:
"""
Appends a message to the history box
:param msg:
The message to be appended to the history box.
:param level:
The log level of the message, if it is a log message.
"""
formatted = []
if level:
msg = f'[{level}]: {msg}'
formatted.append((level, msg))
else:
lexer = lexers.JsonLexer() # pylint: disable=no-member
for token in lexer.get_tokens(msg):
formatted.append(token)
self.history.add_to_history(formatted)
class Window(urwid.Frame):
"""
This window is the top most widget of the TUI and will contain other
windows. Each child of this widget is responsible for displaying a specific
functionality.
:param parent: Reference to the TUI object.
"""
def __init__(self, parent: App) -> None:
self.parent = parent
footer = StatusBar()
body = HistoryWindow(parent)
super().__init__(body, footer=footer)
class TUILogHandler(Handler):
"""
This handler routes all the log messages to the TUI screen.
It is installed to the root logger to so that the log message from all
libraries begin used is routed to the screen.
:param tui: Reference to the TUI object.
"""
def __init__(self, tui: App) -> None:
super().__init__()
self.tui = tui
def emit(self, record: LogRecord) -> None:
"""
Emits a record to the TUI screen.
Appends the log message to the TUI screen
"""
level = record.levelname
msg = record.getMessage()
self.tui.add_to_history(msg, level)
def main() -> None:
"""
Driver of the whole script, parses arguments, initialize the TUI and
the logger.
"""
parser = argparse.ArgumentParser(description='QMP TUI')
parser.add_argument('qmp_server', help='Address of the QMP server. '
'Format <UNIX socket path | TCP addr:port>')
parser.add_argument('--num-retries', type=int, default=10,
help='Number of times to reconnect before giving up.')
parser.add_argument('--retry-delay', type=int,
help='Time(s) to wait before next retry. '
'Default action is to wait 2s between each retry.')
parser.add_argument('--log-file', help='The Log file name')
parser.add_argument('--log-level', default='WARNING',
help='Log level <CRITICAL|ERROR|WARNING|INFO|DEBUG|>')
parser.add_argument('--asyncio-debug', action='store_true',
help='Enable debug mode for asyncio loop. '
'Generates lot of output, makes TUI unusable when '
'logs are logged in the TUI. '
'Use only when logging to a file.')
args = parser.parse_args()
try:
address = QEMUMonitorProtocol.parse_address(args.qmp_server)
except QMPBadPortError as err:
parser.error(str(err))
app = App(address, args.num_retries, args.retry_delay)
root_logger = logging.getLogger()
root_logger.setLevel(logging.getLevelName(args.log_level))
if args.log_file:
root_logger.addHandler(logging.FileHandler(args.log_file))
else:
root_logger.addHandler(TUILogHandler(app))
app.run(args.asyncio_debug)
if __name__ == '__main__':
main()
| 22,153 | 32.926493 | 79 | py |
qemu | qemu-master/python/qemu/qmp/qmp_client.py | """
QMP Protocol Implementation
This module provides the `QMPClient` class, which can be used to connect
and send commands to a QMP server such as QEMU. The QMP class can be
used to either connect to a listening server, or used to listen and
accept an incoming connection from that server.
"""
import asyncio
import logging
import socket
import struct
from typing import (
Dict,
List,
Mapping,
Optional,
Union,
cast,
)
from .error import ProtocolError, QMPError
from .events import Events
from .message import Message
from .models import ErrorResponse, Greeting
from .protocol import AsyncProtocol, Runstate, require
from .util import (
bottom_half,
exception_summary,
pretty_traceback,
upper_half,
)
class _WrappedProtocolError(ProtocolError):
"""
Abstract exception class for Protocol errors that wrap an Exception.
:param error_message: Human-readable string describing the error.
:param exc: The root-cause exception.
"""
def __init__(self, error_message: str, exc: Exception):
super().__init__(error_message)
self.exc = exc
def __str__(self) -> str:
return f"{self.error_message}: {self.exc!s}"
class GreetingError(_WrappedProtocolError):
"""
An exception occurred during the Greeting phase.
:param error_message: Human-readable string describing the error.
:param exc: The root-cause exception.
"""
class NegotiationError(_WrappedProtocolError):
"""
An exception occurred during the Negotiation phase.
:param error_message: Human-readable string describing the error.
:param exc: The root-cause exception.
"""
class ExecuteError(QMPError):
"""
Exception raised by `QMPClient.execute()` on RPC failure.
:param error_response: The RPC error response object.
:param sent: The sent RPC message that caused the failure.
:param received: The raw RPC error reply received.
"""
def __init__(self, error_response: ErrorResponse,
sent: Message, received: Message):
super().__init__(error_response.error.desc)
#: The sent `Message` that caused the failure
self.sent: Message = sent
#: The received `Message` that indicated failure
self.received: Message = received
#: The parsed error response
self.error: ErrorResponse = error_response
#: The QMP error class
self.error_class: str = error_response.error.class_
class ExecInterruptedError(QMPError):
"""
Exception raised by `execute()` (et al) when an RPC is interrupted.
This error is raised when an `execute()` statement could not be
completed. This can occur because the connection itself was
terminated before a reply was received.
The true cause of the interruption will be available via `disconnect()`.
"""
class _MsgProtocolError(ProtocolError):
"""
Abstract error class for protocol errors that have a `Message` object.
This Exception class is used for protocol errors where the `Message`
was mechanically understood, but was found to be inappropriate or
malformed.
:param error_message: Human-readable string describing the error.
:param msg: The QMP `Message` that caused the error.
"""
def __init__(self, error_message: str, msg: Message):
super().__init__(error_message)
#: The received `Message` that caused the error.
self.msg: Message = msg
def __str__(self) -> str:
return "\n".join([
super().__str__(),
f" Message was: {str(self.msg)}\n",
])
class ServerParseError(_MsgProtocolError):
"""
The Server sent a `Message` indicating parsing failure.
i.e. A reply has arrived from the server, but it is missing the "ID"
field, indicating a parsing error.
:param error_message: Human-readable string describing the error.
:param msg: The QMP `Message` that caused the error.
"""
class BadReplyError(_MsgProtocolError):
"""
An execution reply was successfully routed, but not understood.
If a QMP message is received with an 'id' field to allow it to be
routed, but is otherwise malformed, this exception will be raised.
A reply message is malformed if it is missing either the 'return' or
'error' keys, or if the 'error' value has missing keys or members of
the wrong type.
:param error_message: Human-readable string describing the error.
:param msg: The malformed reply that was received.
:param sent: The message that was sent that prompted the error.
"""
def __init__(self, error_message: str, msg: Message, sent: Message):
super().__init__(error_message, msg)
#: The sent `Message` that caused the failure
self.sent = sent
class QMPClient(AsyncProtocol[Message], Events):
"""
Implements a QMP client connection.
QMP can be used to establish a connection as either the transport
client or server, though this class always acts as the QMP client.
:param name: Optional nickname for the connection, used for logging.
Basic script-style usage looks like this::
qmp = QMPClient('my_virtual_machine_name')
await qmp.connect(('127.0.0.1', 1234))
...
res = await qmp.execute('block-query')
...
await qmp.disconnect()
Basic async client-style usage looks like this::
class Client:
def __init__(self, name: str):
self.qmp = QMPClient(name)
async def watch_events(self):
try:
async for event in self.qmp.events:
print(f"Event: {event['event']}")
except asyncio.CancelledError:
return
async def run(self, address='/tmp/qemu.socket'):
await self.qmp.connect(address)
asyncio.create_task(self.watch_events())
await self.qmp.runstate_changed.wait()
await self.disconnect()
See `qmp.events` for more detail on event handling patterns.
"""
#: Logger object used for debugging messages.
logger = logging.getLogger(__name__)
# Read buffer limit; 10MB like libvirt default
_limit = 10 * 1024 * 1024
# Type alias for pending execute() result items
_PendingT = Union[Message, ExecInterruptedError]
def __init__(self, name: Optional[str] = None) -> None:
super().__init__(name)
Events.__init__(self)
#: Whether or not to await a greeting after establishing a connection.
self.await_greeting: bool = True
#: Whether or not to perform capabilities negotiation upon connection.
#: Implies `await_greeting`.
self.negotiate: bool = True
# Cached Greeting, if one was awaited.
self._greeting: Optional[Greeting] = None
# Command ID counter
self._execute_id = 0
# Incoming RPC reply messages.
self._pending: Dict[
Union[str, None],
'asyncio.Queue[QMPClient._PendingT]'
] = {}
@property
def greeting(self) -> Optional[Greeting]:
"""The `Greeting` from the QMP server, if any."""
return self._greeting
@upper_half
async def _establish_session(self) -> None:
"""
Initiate the QMP session.
Wait for the QMP greeting and perform capabilities negotiation.
:raise GreetingError: When the greeting is not understood.
:raise NegotiationError: If the negotiation fails.
:raise EOFError: When the server unexpectedly hangs up.
:raise OSError: For underlying stream errors.
"""
self._greeting = None
self._pending = {}
if self.await_greeting or self.negotiate:
self._greeting = await self._get_greeting()
if self.negotiate:
await self._negotiate()
# This will start the reader/writers:
await super()._establish_session()
@upper_half
async def _get_greeting(self) -> Greeting:
"""
:raise GreetingError: When the greeting is not understood.
:raise EOFError: When the server unexpectedly hangs up.
:raise OSError: For underlying stream errors.
:return: the Greeting object given by the server.
"""
self.logger.debug("Awaiting greeting ...")
try:
msg = await self._recv()
return Greeting(msg)
except (ProtocolError, KeyError, TypeError) as err:
emsg = "Did not understand Greeting"
self.logger.error("%s: %s", emsg, exception_summary(err))
self.logger.debug("%s:\n%s\n", emsg, pretty_traceback())
raise GreetingError(emsg, err) from err
except BaseException as err:
# EOFError, OSError, or something unexpected.
emsg = "Failed to receive Greeting"
self.logger.error("%s: %s", emsg, exception_summary(err))
self.logger.debug("%s:\n%s\n", emsg, pretty_traceback())
raise
@upper_half
async def _negotiate(self) -> None:
"""
Perform QMP capabilities negotiation.
:raise NegotiationError: When negotiation fails.
:raise EOFError: When the server unexpectedly hangs up.
:raise OSError: For underlying stream errors.
"""
self.logger.debug("Negotiating capabilities ...")
arguments: Dict[str, List[str]] = {}
if self._greeting and 'oob' in self._greeting.QMP.capabilities:
arguments.setdefault('enable', []).append('oob')
msg = self.make_execute_msg('qmp_capabilities', arguments=arguments)
# It's not safe to use execute() here, because the reader/writers
# aren't running. AsyncProtocol *requires* that a new session
# does not fail after the reader/writers are running!
try:
await self._send(msg)
reply = await self._recv()
assert 'return' in reply
assert 'error' not in reply
except (ProtocolError, AssertionError) as err:
emsg = "Negotiation failed"
self.logger.error("%s: %s", emsg, exception_summary(err))
self.logger.debug("%s:\n%s\n", emsg, pretty_traceback())
raise NegotiationError(emsg, err) from err
except BaseException as err:
# EOFError, OSError, or something unexpected.
emsg = "Negotiation failed"
self.logger.error("%s: %s", emsg, exception_summary(err))
self.logger.debug("%s:\n%s\n", emsg, pretty_traceback())
raise
@bottom_half
async def _bh_disconnect(self) -> None:
try:
await super()._bh_disconnect()
finally:
if self._pending:
self.logger.debug("Cancelling pending executions")
keys = self._pending.keys()
for key in keys:
self.logger.debug("Cancelling execution '%s'", key)
self._pending[key].put_nowait(
ExecInterruptedError("Disconnected")
)
self.logger.debug("QMP Disconnected.")
@upper_half
def _cleanup(self) -> None:
super()._cleanup()
assert not self._pending
@bottom_half
async def _on_message(self, msg: Message) -> None:
"""
Add an incoming message to the appropriate queue/handler.
:raise ServerParseError: When Message indicates server parse failure.
"""
# Incoming messages are not fully parsed/validated here;
# do only light peeking to know how to route the messages.
if 'event' in msg:
await self._event_dispatch(msg)
return
# Below, we assume everything left is an execute/exec-oob response.
exec_id = cast(Optional[str], msg.get('id'))
if exec_id in self._pending:
await self._pending[exec_id].put(msg)
return
# We have a message we can't route back to a caller.
is_error = 'error' in msg
has_id = 'id' in msg
if is_error and not has_id:
# This is very likely a server parsing error.
# It doesn't inherently belong to any pending execution.
# Instead of performing clever recovery, just terminate.
# See "NOTE" in qmp-spec.txt, section 2.4.2
raise ServerParseError(
("Server sent an error response without an ID, "
"but there are no ID-less executions pending. "
"Assuming this is a server parser failure."),
msg
)
# qmp-spec.txt, section 2.4:
# 'Clients should drop all the responses
# that have an unknown "id" field.'
self.logger.log(
logging.ERROR if is_error else logging.WARNING,
"Unknown ID '%s', message dropped.",
exec_id,
)
self.logger.debug("Unroutable message: %s", str(msg))
@upper_half
@bottom_half
async def _do_recv(self) -> Message:
"""
:raise OSError: When a stream error is encountered.
:raise EOFError: When the stream is at EOF.
:raise ProtocolError:
When the Message is not understood.
See also `Message._deserialize`.
:return: A single QMP `Message`.
"""
msg_bytes = await self._readline()
msg = Message(msg_bytes, eager=True)
return msg
@upper_half
@bottom_half
def _do_send(self, msg: Message) -> None:
"""
:raise ValueError: JSON serialization failure
:raise TypeError: JSON serialization failure
:raise OSError: When a stream error is encountered.
"""
assert self._writer is not None
self._writer.write(bytes(msg))
@upper_half
def _get_exec_id(self) -> str:
exec_id = f"__qmp#{self._execute_id:05d}"
self._execute_id += 1
return exec_id
@upper_half
async def _issue(self, msg: Message) -> Union[None, str]:
"""
Issue a QMP `Message` and do not wait for a reply.
:param msg: The QMP `Message` to send to the server.
:return: The ID of the `Message` sent.
"""
msg_id: Optional[str] = None
if 'id' in msg:
assert isinstance(msg['id'], str)
msg_id = msg['id']
self._pending[msg_id] = asyncio.Queue(maxsize=1)
try:
await self._outgoing.put(msg)
except:
del self._pending[msg_id]
raise
return msg_id
@upper_half
async def _reply(self, msg_id: Union[str, None]) -> Message:
"""
Await a reply to a previously issued QMP message.
:param msg_id: The ID of the previously issued message.
:return: The reply from the server.
:raise ExecInterruptedError:
When the reply could not be retrieved because the connection
was lost, or some other problem.
"""
queue = self._pending[msg_id]
try:
result = await queue.get()
if isinstance(result, ExecInterruptedError):
raise result
return result
finally:
del self._pending[msg_id]
@upper_half
async def _execute(self, msg: Message, assign_id: bool = True) -> Message:
"""
Send a QMP `Message` to the server and await a reply.
This method *assumes* you are sending some kind of an execute
statement that *will* receive a reply.
An execution ID will be assigned if assign_id is `True`. It can be
disabled, but this requires that an ID is manually assigned
instead. For manually assigned IDs, you must not use the string
'__qmp#' anywhere in the ID.
:param msg: The QMP `Message` to execute.
:param assign_id: If True, assign a new execution ID.
:return: Execution reply from the server.
:raise ExecInterruptedError:
When the reply could not be retrieved because the connection
was lost, or some other problem.
"""
if assign_id:
msg['id'] = self._get_exec_id()
elif 'id' in msg:
assert isinstance(msg['id'], str)
assert '__qmp#' not in msg['id']
exec_id = await self._issue(msg)
return await self._reply(exec_id)
@upper_half
@require(Runstate.RUNNING)
async def _raw(
self,
msg: Union[Message, Mapping[str, object], bytes],
assign_id: bool = True,
) -> Message:
"""
Issue a raw `Message` to the QMP server and await a reply.
:param msg:
A Message to send to the server. It may be a `Message`, any
Mapping (including Dict), or raw bytes.
:param assign_id:
Assign an arbitrary execution ID to this message. If
`False`, the existing id must either be absent (and no other
such pending execution may omit an ID) or a string. If it is
a string, it must not start with '__qmp#' and no other such
pending execution may currently be using that ID.
:return: Execution reply from the server.
:raise ExecInterruptedError:
When the reply could not be retrieved because the connection
was lost, or some other problem.
:raise TypeError:
When assign_id is `False`, an ID is given, and it is not a string.
:raise ValueError:
When assign_id is `False`, but the ID is not usable;
Either because it starts with '__qmp#' or it is already in-use.
"""
# 1. convert generic Mapping or bytes to a QMP Message
# 2. copy Message objects so that we assign an ID only to the copy.
msg = Message(msg)
exec_id = msg.get('id')
if not assign_id and 'id' in msg:
if not isinstance(exec_id, str):
raise TypeError(f"ID ('{exec_id}') must be a string.")
if exec_id.startswith('__qmp#'):
raise ValueError(
f"ID ('{exec_id}') must not start with '__qmp#'."
)
if not assign_id and exec_id in self._pending:
raise ValueError(
f"ID '{exec_id}' is in-use and cannot be used."
)
return await self._execute(msg, assign_id=assign_id)
@upper_half
@require(Runstate.RUNNING)
async def execute_msg(self, msg: Message) -> object:
"""
Execute a QMP command and return its value.
:param msg: The QMP `Message` to execute.
:return:
The command execution return value from the server. The type of
object returned depends on the command that was issued,
though most in QEMU return a `dict`.
:raise ValueError:
If the QMP `Message` does not have either the 'execute' or
'exec-oob' fields set.
:raise ExecuteError: When the server returns an error response.
:raise ExecInterruptedError: if the connection was terminated early.
"""
if not ('execute' in msg or 'exec-oob' in msg):
raise ValueError("Requires 'execute' or 'exec-oob' message")
# Copy the Message so that the ID assigned by _execute() is
# local to this method; allowing the ID to be seen in raised
# Exceptions but without modifying the caller's held copy.
msg = Message(msg)
reply = await self._execute(msg)
if 'error' in reply:
try:
error_response = ErrorResponse(reply)
except (KeyError, TypeError) as err:
# Error response was malformed.
raise BadReplyError(
"QMP error reply is malformed", reply, msg,
) from err
raise ExecuteError(error_response, msg, reply)
if 'return' not in reply:
raise BadReplyError(
"QMP reply is missing a 'error' or 'return' member",
reply, msg,
)
return reply['return']
@classmethod
def make_execute_msg(cls, cmd: str,
arguments: Optional[Mapping[str, object]] = None,
oob: bool = False) -> Message:
"""
Create an executable message to be sent by `execute_msg` later.
:param cmd: QMP command name.
:param arguments: Arguments (if any). Must be JSON-serializable.
:param oob: If `True`, execute "out of band".
:return: An executable QMP `Message`.
"""
msg = Message({'exec-oob' if oob else 'execute': cmd})
if arguments is not None:
msg['arguments'] = arguments
return msg
@upper_half
async def execute(self, cmd: str,
arguments: Optional[Mapping[str, object]] = None,
oob: bool = False) -> object:
"""
Execute a QMP command and return its value.
:param cmd: QMP command name.
:param arguments: Arguments (if any). Must be JSON-serializable.
:param oob: If `True`, execute "out of band".
:return:
The command execution return value from the server. The type of
object returned depends on the command that was issued,
though most in QEMU return a `dict`.
:raise ExecuteError: When the server returns an error response.
:raise ExecInterruptedError: if the connection was terminated early.
"""
msg = self.make_execute_msg(cmd, arguments, oob=oob)
return await self.execute_msg(msg)
@upper_half
@require(Runstate.RUNNING)
def send_fd_scm(self, fd: int) -> None:
"""
Send a file descriptor to the remote via SCM_RIGHTS.
"""
assert self._writer is not None
sock = self._writer.transport.get_extra_info('socket')
if sock.family != socket.AF_UNIX:
raise QMPError("Sending file descriptors requires a UNIX socket.")
if not hasattr(sock, 'sendmsg'):
# We need to void the warranty sticker.
# Access to sendmsg is scheduled for removal in Python 3.11.
# Find the real backing socket to use it anyway.
sock = sock._sock # pylint: disable=protected-access
sock.sendmsg(
[b' '],
[(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('@i', fd))]
)
| 22,564 | 33.397866 | 78 | py |
qemu | qemu-master/python/qemu/qmp/util.py | """
Miscellaneous Utilities
This module provides asyncio utilities and compatibility wrappers for
Python 3.6 to provide some features that otherwise become available in
Python 3.7+.
Various logging and debugging utilities are also provided, such as
`exception_summary()` and `pretty_traceback()`, used primarily for
adding information into the logging stream.
"""
import asyncio
import sys
import traceback
from typing import (
Any,
Coroutine,
Optional,
TypeVar,
cast,
)
T = TypeVar('T')
# --------------------------
# Section: Utility Functions
# --------------------------
async def flush(writer: asyncio.StreamWriter) -> None:
"""
Utility function to ensure a StreamWriter is *fully* drained.
`asyncio.StreamWriter.drain` only promises we will return to below
the "high-water mark". This function ensures we flush the entire
buffer -- by setting the high water mark to 0 and then calling
drain. The flow control limits are restored after the call is
completed.
"""
transport = cast( # type: ignore[redundant-cast]
asyncio.WriteTransport, writer.transport
)
# https://github.com/python/typeshed/issues/5779
low, high = transport.get_write_buffer_limits() # type: ignore
transport.set_write_buffer_limits(0, 0)
try:
await writer.drain()
finally:
transport.set_write_buffer_limits(high, low)
def upper_half(func: T) -> T:
"""
Do-nothing decorator that annotates a method as an "upper-half" method.
These methods must not call bottom-half functions directly, but can
schedule them to run.
"""
return func
def bottom_half(func: T) -> T:
"""
Do-nothing decorator that annotates a method as a "bottom-half" method.
These methods must take great care to handle their own exceptions whenever
possible. If they go unhandled, they will cause termination of the loop.
These methods do not, in general, have the ability to directly
report information to a caller’s context and will usually be
collected as a Task result instead.
They must not call upper-half functions directly.
"""
return func
# -------------------------------
# Section: Compatibility Wrappers
# -------------------------------
def create_task(coro: Coroutine[Any, Any, T],
loop: Optional[asyncio.AbstractEventLoop] = None
) -> 'asyncio.Future[T]':
"""
Python 3.6-compatible `asyncio.create_task` wrapper.
:param coro: The coroutine to execute in a task.
:param loop: Optionally, the loop to create the task in.
:return: An `asyncio.Future` object.
"""
if sys.version_info >= (3, 7):
if loop is not None:
return loop.create_task(coro)
return asyncio.create_task(coro) # pylint: disable=no-member
# Python 3.6:
return asyncio.ensure_future(coro, loop=loop)
def is_closing(writer: asyncio.StreamWriter) -> bool:
"""
Python 3.6-compatible `asyncio.StreamWriter.is_closing` wrapper.
:param writer: The `asyncio.StreamWriter` object.
:return: `True` if the writer is closing, or closed.
"""
if sys.version_info >= (3, 7):
return writer.is_closing()
# Python 3.6:
transport = writer.transport
assert isinstance(transport, asyncio.WriteTransport)
return transport.is_closing()
async def wait_closed(writer: asyncio.StreamWriter) -> None:
"""
Python 3.6-compatible `asyncio.StreamWriter.wait_closed` wrapper.
:param writer: The `asyncio.StreamWriter` to wait on.
"""
if sys.version_info >= (3, 7):
await writer.wait_closed()
return
# Python 3.6
transport = writer.transport
assert isinstance(transport, asyncio.WriteTransport)
while not transport.is_closing():
await asyncio.sleep(0)
# This is an ugly workaround, but it's the best I can come up with.
sock = transport.get_extra_info('socket')
if sock is None:
# Our transport doesn't have a socket? ...
# Nothing we can reasonably do.
return
while sock.fileno() != -1:
await asyncio.sleep(0)
def asyncio_run(coro: Coroutine[Any, Any, T], *, debug: bool = False) -> T:
"""
Python 3.6-compatible `asyncio.run` wrapper.
:param coro: A coroutine to execute now.
:return: The return value from the coroutine.
"""
if sys.version_info >= (3, 7):
return asyncio.run(coro, debug=debug)
# Python 3.6
loop = asyncio.get_event_loop()
loop.set_debug(debug)
ret = loop.run_until_complete(coro)
loop.close()
return ret
# ----------------------------
# Section: Logging & Debugging
# ----------------------------
def exception_summary(exc: BaseException) -> str:
"""
Return a summary string of an arbitrary exception.
It will be of the form "ExceptionType: Error Message", if the error
string is non-empty, and just "ExceptionType" otherwise.
"""
name = type(exc).__qualname__
smod = type(exc).__module__
if smod not in ("__main__", "builtins"):
name = smod + '.' + name
error = str(exc)
if error:
return f"{name}: {error}"
return name
def pretty_traceback(prefix: str = " | ") -> str:
"""
Formats the current traceback, indented to provide visual distinction.
This is useful for printing a traceback within a traceback for
debugging purposes when encapsulating errors to deliver them up the
stack; when those errors are printed, this helps provide a nice
visual grouping to quickly identify the parts of the error that
belong to the inner exception.
:param prefix: The prefix to append to each line of the traceback.
:return: A string, formatted something like the following::
| Traceback (most recent call last):
| File "foobar.py", line 42, in arbitrary_example
| foo.baz()
| ArbitraryError: [Errno 42] Something bad happened!
"""
output = "".join(traceback.format_exception(*sys.exc_info()))
exc_lines = []
for line in output.split('\n'):
exc_lines.append(prefix + line)
# The last line is always empty, omit it
return "\n".join(exc_lines[:-1])
| 6,229 | 27.318182 | 78 | py |
qemu | qemu-master/python/qemu/qmp/protocol.py | """
Generic Asynchronous Message-based Protocol Support
This module provides a generic framework for sending and receiving
messages over an asyncio stream. `AsyncProtocol` is an abstract class
that implements the core mechanisms of a simple send/receive protocol,
and is designed to be extended.
In this package, it is used as the implementation for the `QMPClient`
class.
"""
# It's all the docstrings ... ! It's long for a good reason ^_^;
# pylint: disable=too-many-lines
import asyncio
from asyncio import StreamReader, StreamWriter
from enum import Enum
from functools import wraps
import logging
import socket
from ssl import SSLContext
from typing import (
Any,
Awaitable,
Callable,
Generic,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from .error import QMPError
from .util import (
bottom_half,
create_task,
exception_summary,
flush,
is_closing,
pretty_traceback,
upper_half,
wait_closed,
)
T = TypeVar('T')
_U = TypeVar('_U')
_TaskFN = Callable[[], Awaitable[None]] # aka ``async def func() -> None``
InternetAddrT = Tuple[str, int]
UnixAddrT = str
SocketAddrT = Union[UnixAddrT, InternetAddrT]
class Runstate(Enum):
"""Protocol session runstate."""
#: Fully quiesced and disconnected.
IDLE = 0
#: In the process of connecting or establishing a session.
CONNECTING = 1
#: Fully connected and active session.
RUNNING = 2
#: In the process of disconnecting.
#: Runstate may be returned to `IDLE` by calling `disconnect()`.
DISCONNECTING = 3
class ConnectError(QMPError):
"""
Raised when the initial connection process has failed.
This Exception always wraps a "root cause" exception that can be
interrogated for additional information.
:param error_message: Human-readable string describing the error.
:param exc: The root-cause exception.
"""
def __init__(self, error_message: str, exc: Exception):
super().__init__(error_message)
#: Human-readable error string
self.error_message: str = error_message
#: Wrapped root cause exception
self.exc: Exception = exc
def __str__(self) -> str:
cause = str(self.exc)
if not cause:
# If there's no error string, use the exception name.
cause = exception_summary(self.exc)
return f"{self.error_message}: {cause}"
class StateError(QMPError):
"""
An API command (connect, execute, etc) was issued at an inappropriate time.
This error is raised when a command like
:py:meth:`~AsyncProtocol.connect()` is issued at an inappropriate
time.
:param error_message: Human-readable string describing the state violation.
:param state: The actual `Runstate` seen at the time of the violation.
:param required: The `Runstate` required to process this command.
"""
def __init__(self, error_message: str,
state: Runstate, required: Runstate):
super().__init__(error_message)
self.error_message = error_message
self.state = state
self.required = required
F = TypeVar('F', bound=Callable[..., Any]) # pylint: disable=invalid-name
# Don't Panic.
def require(required_state: Runstate) -> Callable[[F], F]:
"""
Decorator: protect a method so it can only be run in a certain `Runstate`.
:param required_state: The `Runstate` required to invoke this method.
:raise StateError: When the required `Runstate` is not met.
"""
def _decorator(func: F) -> F:
# _decorator is the decorator that is built by calling the
# require() decorator factory; e.g.:
#
# @require(Runstate.IDLE) def foo(): ...
# will replace 'foo' with the result of '_decorator(foo)'.
@wraps(func)
def _wrapper(proto: 'AsyncProtocol[Any]',
*args: Any, **kwargs: Any) -> Any:
# _wrapper is the function that gets executed prior to the
# decorated method.
name = type(proto).__name__
if proto.runstate != required_state:
if proto.runstate == Runstate.CONNECTING:
emsg = f"{name} is currently connecting."
elif proto.runstate == Runstate.DISCONNECTING:
emsg = (f"{name} is disconnecting."
" Call disconnect() to return to IDLE state.")
elif proto.runstate == Runstate.RUNNING:
emsg = f"{name} is already connected and running."
elif proto.runstate == Runstate.IDLE:
emsg = f"{name} is disconnected and idle."
else:
assert False
raise StateError(emsg, proto.runstate, required_state)
# No StateError, so call the wrapped method.
return func(proto, *args, **kwargs)
# Return the decorated method;
# Transforming Func to Decorated[Func].
return cast(F, _wrapper)
# Return the decorator instance from the decorator factory. Phew!
return _decorator
class AsyncProtocol(Generic[T]):
"""
AsyncProtocol implements a generic async message-based protocol.
This protocol assumes the basic unit of information transfer between
client and server is a "message", the details of which are left up
to the implementation. It assumes the sending and receiving of these
messages is full-duplex and not necessarily correlated; i.e. it
supports asynchronous inbound messages.
It is designed to be extended by a specific protocol which provides
the implementations for how to read and send messages. These must be
defined in `_do_recv()` and `_do_send()`, respectively.
Other callbacks have a default implementation, but are intended to be
either extended or overridden:
- `_establish_session`:
The base implementation starts the reader/writer tasks.
A protocol implementation can override this call, inserting
actions to be taken prior to starting the reader/writer tasks
before the super() call; actions needing to occur afterwards
can be written after the super() call.
- `_on_message`:
Actions to be performed when a message is received.
- `_cb_outbound`:
Logging/Filtering hook for all outbound messages.
- `_cb_inbound`:
Logging/Filtering hook for all inbound messages.
This hook runs *before* `_on_message()`.
:param name:
Name used for logging messages, if any. By default, messages
will log to 'qemu.qmp.protocol', but each individual connection
can be given its own logger by giving it a name; messages will
then log to 'qemu.qmp.protocol.${name}'.
"""
# pylint: disable=too-many-instance-attributes
#: Logger object for debugging messages from this connection.
logger = logging.getLogger(__name__)
# Maximum allowable size of read buffer
_limit = 64 * 1024
# -------------------------
# Section: Public interface
# -------------------------
def __init__(self, name: Optional[str] = None) -> None:
#: The nickname for this connection, if any.
self.name: Optional[str] = name
if self.name is not None:
self.logger = self.logger.getChild(self.name)
# stream I/O
self._reader: Optional[StreamReader] = None
self._writer: Optional[StreamWriter] = None
# Outbound Message queue
self._outgoing: asyncio.Queue[T]
# Special, long-running tasks:
self._reader_task: Optional[asyncio.Future[None]] = None
self._writer_task: Optional[asyncio.Future[None]] = None
# Aggregate of the above two tasks, used for Exception management.
self._bh_tasks: Optional[asyncio.Future[Tuple[None, None]]] = None
#: Disconnect task. The disconnect implementation runs in a task
#: so that asynchronous disconnects (initiated by the
#: reader/writer) are allowed to wait for the reader/writers to
#: exit.
self._dc_task: Optional[asyncio.Future[None]] = None
self._runstate = Runstate.IDLE
self._runstate_changed: Optional[asyncio.Event] = None
# Server state for start_server() and _incoming()
self._server: Optional[asyncio.AbstractServer] = None
self._accepted: Optional[asyncio.Event] = None
def __repr__(self) -> str:
cls_name = type(self).__name__
tokens = []
if self.name is not None:
tokens.append(f"name={self.name!r}")
tokens.append(f"runstate={self.runstate.name}")
return f"<{cls_name} {' '.join(tokens)}>"
@property # @upper_half
def runstate(self) -> Runstate:
"""The current `Runstate` of the connection."""
return self._runstate
@upper_half
async def runstate_changed(self) -> Runstate:
"""
Wait for the `runstate` to change, then return that runstate.
"""
await self._runstate_event.wait()
return self.runstate
@upper_half
@require(Runstate.IDLE)
async def start_server_and_accept(
self, address: SocketAddrT,
ssl: Optional[SSLContext] = None
) -> None:
"""
Accept a connection and begin processing message queues.
If this call fails, `runstate` is guaranteed to be set back to `IDLE`.
This method is precisely equivalent to calling `start_server()`
followed by `accept()`.
:param address:
Address to listen on; UNIX socket path or TCP address/port.
:param ssl: SSL context to use, if any.
:raise StateError: When the `Runstate` is not `IDLE`.
:raise ConnectError:
When a connection or session cannot be established.
This exception will wrap a more concrete one. In most cases,
the wrapped exception will be `OSError` or `EOFError`. If a
protocol-level failure occurs while establishing a new
session, the wrapped error may also be an `QMPError`.
"""
await self.start_server(address, ssl)
await self.accept()
assert self.runstate == Runstate.RUNNING
@upper_half
@require(Runstate.IDLE)
async def open_with_socket(self, sock: socket.socket) -> None:
"""
Start connection with given socket.
:param sock: A socket.
:raise StateError: When the `Runstate` is not `IDLE`.
"""
self._reader, self._writer = await asyncio.open_connection(sock=sock)
self._set_state(Runstate.CONNECTING)
@upper_half
@require(Runstate.IDLE)
async def start_server(self, address: SocketAddrT,
ssl: Optional[SSLContext] = None) -> None:
"""
Start listening for an incoming connection, but do not wait for a peer.
This method starts listening for an incoming connection, but
does not block waiting for a peer. This call will return
immediately after binding and listening on a socket. A later
call to `accept()` must be made in order to finalize the
incoming connection.
:param address:
Address to listen on; UNIX socket path or TCP address/port.
:param ssl: SSL context to use, if any.
:raise StateError: When the `Runstate` is not `IDLE`.
:raise ConnectError:
When the server could not start listening on this address.
This exception will wrap a more concrete one. In most cases,
the wrapped exception will be `OSError`.
"""
await self._session_guard(
self._do_start_server(address, ssl),
'Failed to establish connection')
assert self.runstate == Runstate.CONNECTING
@upper_half
@require(Runstate.CONNECTING)
async def accept(self) -> None:
"""
Accept an incoming connection and begin processing message queues.
If this call fails, `runstate` is guaranteed to be set back to `IDLE`.
:raise StateError: When the `Runstate` is not `CONNECTING`.
:raise QMPError: When `start_server()` was not called yet.
:raise ConnectError:
When a connection or session cannot be established.
This exception will wrap a more concrete one. In most cases,
the wrapped exception will be `OSError` or `EOFError`. If a
protocol-level failure occurs while establishing a new
session, the wrapped error may also be an `QMPError`.
"""
if not self._reader:
if self._accepted is None:
raise QMPError("Cannot call accept() before start_server().")
await self._session_guard(
self._do_accept(),
'Failed to establish connection')
await self._session_guard(
self._establish_session(),
'Failed to establish session')
assert self.runstate == Runstate.RUNNING
@upper_half
@require(Runstate.IDLE)
async def connect(self, address: SocketAddrT,
ssl: Optional[SSLContext] = None) -> None:
"""
Connect to the server and begin processing message queues.
If this call fails, `runstate` is guaranteed to be set back to `IDLE`.
:param address:
Address to connect to; UNIX socket path or TCP address/port.
:param ssl: SSL context to use, if any.
:raise StateError: When the `Runstate` is not `IDLE`.
:raise ConnectError:
When a connection or session cannot be established.
This exception will wrap a more concrete one. In most cases,
the wrapped exception will be `OSError` or `EOFError`. If a
protocol-level failure occurs while establishing a new
session, the wrapped error may also be an `QMPError`.
"""
await self._session_guard(
self._do_connect(address, ssl),
'Failed to establish connection')
await self._session_guard(
self._establish_session(),
'Failed to establish session')
assert self.runstate == Runstate.RUNNING
@upper_half
async def disconnect(self) -> None:
"""
Disconnect and wait for all tasks to fully stop.
If there was an exception that caused the reader/writers to
terminate prematurely, it will be raised here.
:raise Exception: When the reader or writer terminate unexpectedly.
"""
self.logger.debug("disconnect() called.")
self._schedule_disconnect()
await self._wait_disconnect()
# --------------------------
# Section: Session machinery
# --------------------------
async def _session_guard(self, coro: Awaitable[None], emsg: str) -> None:
"""
Async guard function used to roll back to `IDLE` on any error.
On any Exception, the state machine will be reset back to
`IDLE`. Most Exceptions will be wrapped with `ConnectError`, but
`BaseException` events will be left alone (This includes
asyncio.CancelledError, even prior to Python 3.8).
:param error_message:
Human-readable string describing what connection phase failed.
:raise BaseException:
When `BaseException` occurs in the guarded block.
:raise ConnectError:
When any other error is encountered in the guarded block.
"""
# Note: After Python 3.6 support is removed, this should be an
# @asynccontextmanager instead of accepting a callback.
try:
await coro
except BaseException as err:
self.logger.error("%s: %s", emsg, exception_summary(err))
self.logger.debug("%s:\n%s\n", emsg, pretty_traceback())
try:
# Reset the runstate back to IDLE.
await self.disconnect()
except:
# We don't expect any Exceptions from the disconnect function
# here, because we failed to connect in the first place.
# The disconnect() function is intended to perform
# only cannot-fail cleanup here, but you never know.
emsg = (
"Unexpected bottom half exception. "
"This is a bug in the QMP library. "
"Please report it to <[email protected]> and "
"CC: John Snow <[email protected]>."
)
self.logger.critical("%s:\n%s\n", emsg, pretty_traceback())
raise
# CancelledError is an Exception with special semantic meaning;
# We do NOT want to wrap it up under ConnectError.
# NB: CancelledError is not a BaseException before Python 3.8
if isinstance(err, asyncio.CancelledError):
raise
# Any other kind of error can be treated as some kind of connection
# failure broadly. Inspect the 'exc' field to explore the root
# cause in greater detail.
if isinstance(err, Exception):
raise ConnectError(emsg, err) from err
# Raise BaseExceptions un-wrapped, they're more important.
raise
@property
def _runstate_event(self) -> asyncio.Event:
# asyncio.Event() objects should not be created prior to entrance into
# an event loop, so we can ensure we create it in the correct context.
# Create it on-demand *only* at the behest of an 'async def' method.
if not self._runstate_changed:
self._runstate_changed = asyncio.Event()
return self._runstate_changed
@upper_half
@bottom_half
def _set_state(self, state: Runstate) -> None:
"""
Change the `Runstate` of the protocol connection.
Signals the `runstate_changed` event.
"""
if state == self._runstate:
return
self.logger.debug("Transitioning from '%s' to '%s'.",
str(self._runstate), str(state))
self._runstate = state
self._runstate_event.set()
self._runstate_event.clear()
@bottom_half
async def _stop_server(self) -> None:
"""
Stop listening for / accepting new incoming connections.
"""
if self._server is None:
return
try:
self.logger.debug("Stopping server.")
self._server.close()
await self._server.wait_closed()
self.logger.debug("Server stopped.")
finally:
self._server = None
@bottom_half # However, it does not run from the R/W tasks.
async def _incoming(self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter) -> None:
"""
Accept an incoming connection and signal the upper_half.
This method does the minimum necessary to accept a single
incoming connection. It signals back to the upper_half ASAP so
that any errors during session initialization can occur
naturally in the caller's stack.
:param reader: Incoming `asyncio.StreamReader`
:param writer: Incoming `asyncio.StreamWriter`
"""
peer = writer.get_extra_info('peername', 'Unknown peer')
self.logger.debug("Incoming connection from %s", peer)
if self._reader or self._writer:
# Sadly, we can have more than one pending connection
# because of https://bugs.python.org/issue46715
# Close any extra connections we don't actually want.
self.logger.warning("Extraneous connection inadvertently accepted")
writer.close()
return
# A connection has been accepted; stop listening for new ones.
assert self._accepted is not None
await self._stop_server()
self._reader, self._writer = (reader, writer)
self._accepted.set()
@upper_half
async def _do_start_server(self, address: SocketAddrT,
ssl: Optional[SSLContext] = None) -> None:
"""
Start listening for an incoming connection, but do not wait for a peer.
This method starts listening for an incoming connection, but does not
block waiting for a peer. This call will return immediately after
binding and listening to a socket. A later call to accept() must be
made in order to finalize the incoming connection.
:param address:
Address to listen on; UNIX socket path or TCP address/port.
:param ssl: SSL context to use, if any.
:raise OSError: For stream-related errors.
"""
assert self.runstate == Runstate.IDLE
self._set_state(Runstate.CONNECTING)
self.logger.debug("Awaiting connection on %s ...", address)
self._accepted = asyncio.Event()
if isinstance(address, tuple):
coro = asyncio.start_server(
self._incoming,
host=address[0],
port=address[1],
ssl=ssl,
backlog=1,
limit=self._limit,
)
else:
coro = asyncio.start_unix_server(
self._incoming,
path=address,
ssl=ssl,
backlog=1,
limit=self._limit,
)
# Allow runstate watchers to witness 'CONNECTING' state; some
# failures in the streaming layer are synchronous and will not
# otherwise yield.
await asyncio.sleep(0)
# This will start the server (bind(2), listen(2)). It will also
# call accept(2) if we yield, but we don't block on that here.
self._server = await coro
self.logger.debug("Server listening on %s", address)
@upper_half
async def _do_accept(self) -> None:
"""
Wait for and accept an incoming connection.
Requires that we have not yet accepted an incoming connection
from the upper_half, but it's OK if the server is no longer
running because the bottom_half has already accepted the
connection.
"""
assert self._accepted is not None
await self._accepted.wait()
assert self._server is None
self._accepted = None
self.logger.debug("Connection accepted.")
@upper_half
async def _do_connect(self, address: SocketAddrT,
ssl: Optional[SSLContext] = None) -> None:
"""
Acting as the transport client, initiate a connection to a server.
:param address:
Address to connect to; UNIX socket path or TCP address/port.
:param ssl: SSL context to use, if any.
:raise OSError: For stream-related errors.
"""
assert self.runstate == Runstate.IDLE
self._set_state(Runstate.CONNECTING)
# Allow runstate watchers to witness 'CONNECTING' state; some
# failures in the streaming layer are synchronous and will not
# otherwise yield.
await asyncio.sleep(0)
self.logger.debug("Connecting to %s ...", address)
if isinstance(address, tuple):
connect = asyncio.open_connection(
address[0],
address[1],
ssl=ssl,
limit=self._limit,
)
else:
connect = asyncio.open_unix_connection(
path=address,
ssl=ssl,
limit=self._limit,
)
self._reader, self._writer = await connect
self.logger.debug("Connected.")
@upper_half
async def _establish_session(self) -> None:
"""
Establish a new session.
Starts the readers/writer tasks; subclasses may perform their
own negotiations here. The Runstate will be RUNNING upon
successful conclusion.
"""
assert self.runstate == Runstate.CONNECTING
self._outgoing = asyncio.Queue()
reader_coro = self._bh_loop_forever(self._bh_recv_message, 'Reader')
writer_coro = self._bh_loop_forever(self._bh_send_message, 'Writer')
self._reader_task = create_task(reader_coro)
self._writer_task = create_task(writer_coro)
self._bh_tasks = asyncio.gather(
self._reader_task,
self._writer_task,
)
self._set_state(Runstate.RUNNING)
await asyncio.sleep(0) # Allow runstate_event to process
@upper_half
@bottom_half
def _schedule_disconnect(self) -> None:
"""
Initiate a disconnect; idempotent.
This method is used both in the upper-half as a direct
consequence of `disconnect()`, and in the bottom-half in the
case of unhandled exceptions in the reader/writer tasks.
It can be invoked no matter what the `runstate` is.
"""
if not self._dc_task:
self._set_state(Runstate.DISCONNECTING)
self.logger.debug("Scheduling disconnect.")
self._dc_task = create_task(self._bh_disconnect())
@upper_half
async def _wait_disconnect(self) -> None:
"""
Waits for a previously scheduled disconnect to finish.
This method will gather any bottom half exceptions and re-raise
the one that occurred first; presuming it to be the root cause
of any subsequent Exceptions. It is intended to be used in the
upper half of the call chain.
:raise Exception:
Arbitrary exception re-raised on behalf of the reader/writer.
"""
assert self.runstate == Runstate.DISCONNECTING
assert self._dc_task
aws: List[Awaitable[object]] = [self._dc_task]
if self._bh_tasks:
aws.insert(0, self._bh_tasks)
all_defined_tasks = asyncio.gather(*aws)
# Ensure disconnect is done; Exception (if any) is not raised here:
await asyncio.wait((self._dc_task,))
try:
await all_defined_tasks # Raise Exceptions from the bottom half.
finally:
self._cleanup()
self._set_state(Runstate.IDLE)
@upper_half
def _cleanup(self) -> None:
"""
Fully reset this object to a clean state and return to `IDLE`.
"""
def _paranoid_task_erase(task: Optional['asyncio.Future[_U]']
) -> Optional['asyncio.Future[_U]']:
# Help to erase a task, ENSURING it is fully quiesced first.
assert (task is None) or task.done()
return None if (task and task.done()) else task
assert self.runstate == Runstate.DISCONNECTING
self._dc_task = _paranoid_task_erase(self._dc_task)
self._reader_task = _paranoid_task_erase(self._reader_task)
self._writer_task = _paranoid_task_erase(self._writer_task)
self._bh_tasks = _paranoid_task_erase(self._bh_tasks)
self._reader = None
self._writer = None
self._accepted = None
# NB: _runstate_changed cannot be cleared because we still need it to
# send the final runstate changed event ...!
# ----------------------------
# Section: Bottom Half methods
# ----------------------------
@bottom_half
async def _bh_disconnect(self) -> None:
"""
Disconnect and cancel all outstanding tasks.
It is designed to be called from its task context,
:py:obj:`~AsyncProtocol._dc_task`. By running in its own task,
it is free to wait on any pending actions that may still need to
occur in either the reader or writer tasks.
"""
assert self.runstate == Runstate.DISCONNECTING
def _done(task: Optional['asyncio.Future[Any]']) -> bool:
return task is not None and task.done()
# If the server is running, stop it.
await self._stop_server()
# Are we already in an error pathway? If either of the tasks are
# already done, or if we have no tasks but a reader/writer; we
# must be.
#
# NB: We can't use _bh_tasks to check for premature task
# completion, because it may not yet have had a chance to run
# and gather itself.
tasks = tuple(filter(None, (self._writer_task, self._reader_task)))
error_pathway = _done(self._reader_task) or _done(self._writer_task)
if not tasks:
error_pathway |= bool(self._reader) or bool(self._writer)
try:
# Try to flush the writer, if possible.
# This *may* cause an error and force us over into the error path.
if not error_pathway:
await self._bh_flush_writer()
except BaseException as err:
error_pathway = True
emsg = "Failed to flush the writer"
self.logger.error("%s: %s", emsg, exception_summary(err))
self.logger.debug("%s:\n%s\n", emsg, pretty_traceback())
raise
finally:
# Cancel any still-running tasks (Won't raise):
if self._writer_task is not None and not self._writer_task.done():
self.logger.debug("Cancelling writer task.")
self._writer_task.cancel()
if self._reader_task is not None and not self._reader_task.done():
self.logger.debug("Cancelling reader task.")
self._reader_task.cancel()
# Close out the tasks entirely (Won't raise):
if tasks:
self.logger.debug("Waiting for tasks to complete ...")
await asyncio.wait(tasks)
# Lastly, close the stream itself. (*May raise*!):
await self._bh_close_stream(error_pathway)
self.logger.debug("Disconnected.")
@bottom_half
async def _bh_flush_writer(self) -> None:
if not self._writer_task:
return
self.logger.debug("Draining the outbound queue ...")
await self._outgoing.join()
if self._writer is not None:
self.logger.debug("Flushing the StreamWriter ...")
await flush(self._writer)
@bottom_half
async def _bh_close_stream(self, error_pathway: bool = False) -> None:
# NB: Closing the writer also implicitly closes the reader.
if not self._writer:
return
if not is_closing(self._writer):
self.logger.debug("Closing StreamWriter.")
self._writer.close()
self.logger.debug("Waiting for StreamWriter to close ...")
try:
await wait_closed(self._writer)
except Exception: # pylint: disable=broad-except
# It's hard to tell if the Stream is already closed or
# not. Even if one of the tasks has failed, it may have
# failed for a higher-layered protocol reason. The
# stream could still be open and perfectly fine.
# I don't know how to discern its health here.
if error_pathway:
# We already know that *something* went wrong. Let's
# just trust that the Exception we already have is the
# better one to present to the user, even if we don't
# genuinely *know* the relationship between the two.
self.logger.debug(
"Discarding Exception from wait_closed:\n%s\n",
pretty_traceback(),
)
else:
# Oops, this is a brand-new error!
raise
finally:
self.logger.debug("StreamWriter closed.")
@bottom_half
async def _bh_loop_forever(self, async_fn: _TaskFN, name: str) -> None:
"""
Run one of the bottom-half methods in a loop forever.
If the bottom half ever raises any exception, schedule a
disconnect that will terminate the entire loop.
:param async_fn: The bottom-half method to run in a loop.
:param name: The name of this task, used for logging.
"""
try:
while True:
await async_fn()
except asyncio.CancelledError:
# We have been cancelled by _bh_disconnect, exit gracefully.
self.logger.debug("Task.%s: cancelled.", name)
return
except BaseException as err:
self.logger.log(
logging.INFO if isinstance(err, EOFError) else logging.ERROR,
"Task.%s: %s",
name, exception_summary(err)
)
self.logger.debug("Task.%s: failure:\n%s\n",
name, pretty_traceback())
self._schedule_disconnect()
raise
finally:
self.logger.debug("Task.%s: exiting.", name)
@bottom_half
async def _bh_send_message(self) -> None:
"""
Wait for an outgoing message, then send it.
Designed to be run in `_bh_loop_forever()`.
"""
msg = await self._outgoing.get()
try:
await self._send(msg)
finally:
self._outgoing.task_done()
@bottom_half
async def _bh_recv_message(self) -> None:
"""
Wait for an incoming message and call `_on_message` to route it.
Designed to be run in `_bh_loop_forever()`.
"""
msg = await self._recv()
await self._on_message(msg)
# --------------------
# Section: Message I/O
# --------------------
@upper_half
@bottom_half
def _cb_outbound(self, msg: T) -> T:
"""
Callback: outbound message hook.
This is intended for subclasses to be able to add arbitrary
hooks to filter or manipulate outgoing messages. The base
implementation does nothing but log the message without any
manipulation of the message.
:param msg: raw outbound message
:return: final outbound message
"""
self.logger.debug("--> %s", str(msg))
return msg
@upper_half
@bottom_half
def _cb_inbound(self, msg: T) -> T:
"""
Callback: inbound message hook.
This is intended for subclasses to be able to add arbitrary
hooks to filter or manipulate incoming messages. The base
implementation does nothing but log the message without any
manipulation of the message.
This method does not "handle" incoming messages; it is a filter.
The actual "endpoint" for incoming messages is `_on_message()`.
:param msg: raw inbound message
:return: processed inbound message
"""
self.logger.debug("<-- %s", str(msg))
return msg
@upper_half
@bottom_half
async def _readline(self) -> bytes:
"""
Wait for a newline from the incoming reader.
This method is provided as a convenience for upper-layer
protocols, as many are line-based.
This method *may* return a sequence of bytes without a trailing
newline if EOF occurs, but *some* bytes were received. In this
case, the next call will raise `EOFError`. It is assumed that
the layer 5 protocol will decide if there is anything meaningful
to be done with a partial message.
:raise OSError: For stream-related errors.
:raise EOFError:
If the reader stream is at EOF and there are no bytes to return.
:return: bytes, including the newline.
"""
assert self._reader is not None
msg_bytes = await self._reader.readline()
if not msg_bytes:
if self._reader.at_eof():
raise EOFError
return msg_bytes
@upper_half
@bottom_half
async def _do_recv(self) -> T:
"""
Abstract: Read from the stream and return a message.
Very low-level; intended to only be called by `_recv()`.
"""
raise NotImplementedError
@upper_half
@bottom_half
async def _recv(self) -> T:
"""
Read an arbitrary protocol message.
.. warning::
This method is intended primarily for `_bh_recv_message()`
to use in an asynchronous task loop. Using it outside of
this loop will "steal" messages from the normal routing
mechanism. It is safe to use prior to `_establish_session()`,
but should not be used otherwise.
This method uses `_do_recv()` to retrieve the raw message, and
then transforms it using `_cb_inbound()`.
:return: A single (filtered, processed) protocol message.
"""
message = await self._do_recv()
return self._cb_inbound(message)
@upper_half
@bottom_half
def _do_send(self, msg: T) -> None:
"""
Abstract: Write a message to the stream.
Very low-level; intended to only be called by `_send()`.
"""
raise NotImplementedError
@upper_half
@bottom_half
async def _send(self, msg: T) -> None:
"""
Send an arbitrary protocol message.
This method will transform any outgoing messages according to
`_cb_outbound()`.
.. warning::
Like `_recv()`, this method is intended to be called by
the writer task loop that processes outgoing
messages. Calling it directly may circumvent logic
implemented by the caller meant to correlate outgoing and
incoming messages.
:raise OSError: For problems with the underlying stream.
"""
msg = self._cb_outbound(msg)
self._do_send(msg)
@bottom_half
async def _on_message(self, msg: T) -> None:
"""
Called to handle the receipt of a new message.
.. caution::
This is executed from within the reader loop, so be advised
that waiting on either the reader or writer task will lead
to deadlock. Additionally, any unhandled exceptions will
directly cause the loop to halt, so logic may be best-kept
to a minimum if at all possible.
:param msg: The incoming message, already logged/filtered.
"""
# Nothing to do in the abstract case.
| 38,478 | 35.164474 | 79 | py |
qemu | qemu-master/python/qemu/qmp/models.py | """
QMP Data Models
This module provides simplistic data classes that represent the few
structures that the QMP spec mandates; they are used to verify incoming
data to make sure it conforms to spec.
"""
# pylint: disable=too-few-public-methods
from collections import abc
import copy
from typing import (
Any,
Dict,
Mapping,
Optional,
Sequence,
)
class Model:
"""
Abstract data model, representing some QMP object of some kind.
:param raw: The raw object to be validated.
:raise KeyError: If any required fields are absent.
:raise TypeError: If any required fields have the wrong type.
"""
def __init__(self, raw: Mapping[str, Any]):
self._raw = raw
def _check_key(self, key: str) -> None:
if key not in self._raw:
raise KeyError(f"'{self._name}' object requires '{key}' member")
def _check_value(self, key: str, type_: type, typestr: str) -> None:
assert key in self._raw
if not isinstance(self._raw[key], type_):
raise TypeError(
f"'{self._name}' member '{key}' must be a {typestr}"
)
def _check_member(self, key: str, type_: type, typestr: str) -> None:
self._check_key(key)
self._check_value(key, type_, typestr)
@property
def _name(self) -> str:
return type(self).__name__
def __repr__(self) -> str:
return f"{self._name}({self._raw!r})"
class Greeting(Model):
"""
Defined in qmp-spec.txt, section 2.2, "Server Greeting".
:param raw: The raw Greeting object.
:raise KeyError: If any required fields are absent.
:raise TypeError: If any required fields have the wrong type.
"""
def __init__(self, raw: Mapping[str, Any]):
super().__init__(raw)
#: 'QMP' member
self.QMP: QMPGreeting # pylint: disable=invalid-name
self._check_member('QMP', abc.Mapping, "JSON object")
self.QMP = QMPGreeting(self._raw['QMP'])
def _asdict(self) -> Dict[str, object]:
"""
For compatibility with the iotests sync QMP wrapper.
The legacy QMP interface needs Greetings as a garden-variety Dict.
This interface is private in the hopes that it will be able to
be dropped again in the near-future. Caller beware!
"""
return dict(copy.deepcopy(self._raw))
class QMPGreeting(Model):
"""
Defined in qmp-spec.txt, section 2.2, "Server Greeting".
:param raw: The raw QMPGreeting object.
:raise KeyError: If any required fields are absent.
:raise TypeError: If any required fields have the wrong type.
"""
def __init__(self, raw: Mapping[str, Any]):
super().__init__(raw)
#: 'version' member
self.version: Mapping[str, object]
#: 'capabilities' member
self.capabilities: Sequence[object]
self._check_member('version', abc.Mapping, "JSON object")
self.version = self._raw['version']
self._check_member('capabilities', abc.Sequence, "JSON array")
self.capabilities = self._raw['capabilities']
class ErrorResponse(Model):
"""
Defined in qmp-spec.txt, section 2.4.2, "error".
:param raw: The raw ErrorResponse object.
:raise KeyError: If any required fields are absent.
:raise TypeError: If any required fields have the wrong type.
"""
def __init__(self, raw: Mapping[str, Any]):
super().__init__(raw)
#: 'error' member
self.error: ErrorInfo
#: 'id' member
self.id: Optional[object] = None # pylint: disable=invalid-name
self._check_member('error', abc.Mapping, "JSON object")
self.error = ErrorInfo(self._raw['error'])
if 'id' in raw:
self.id = raw['id']
class ErrorInfo(Model):
"""
Defined in qmp-spec.txt, section 2.4.2, "error".
:param raw: The raw ErrorInfo object.
:raise KeyError: If any required fields are absent.
:raise TypeError: If any required fields have the wrong type.
"""
def __init__(self, raw: Mapping[str, Any]):
super().__init__(raw)
#: 'class' member, with an underscore to avoid conflicts in Python.
self.class_: str
#: 'desc' member
self.desc: str
self._check_member('class', str, "string")
self.class_ = self._raw['class']
self._check_member('desc', str, "string")
self.desc = self._raw['desc']
| 4,442 | 29.22449 | 76 | py |
qemu | qemu-master/python/qemu/qmp/__init__.py | """
QEMU Monitor Protocol (QMP) development library & tooling.
This package provides a fairly low-level class for communicating
asynchronously with QMP protocol servers, as implemented by QEMU, the
QEMU Guest Agent, and the QEMU Storage Daemon.
`QMPClient` provides the main functionality of this package. All errors
raised by this library derive from `QMPError`, see `qmp.error` for
additional detail. See `qmp.events` for an in-depth tutorial on
managing QMP events.
"""
# Copyright (C) 2020-2022 John Snow for Red Hat, Inc.
#
# Authors:
# John Snow <[email protected]>
#
# Based on earlier work by Luiz Capitulino <[email protected]>.
#
# This work is licensed under the terms of the GNU LGPL, version 2 or
# later. See the COPYING file in the top-level directory.
import logging
from .error import QMPError
from .events import EventListener
from .message import Message
from .protocol import (
ConnectError,
Runstate,
SocketAddrT,
StateError,
)
from .qmp_client import ExecInterruptedError, ExecuteError, QMPClient
# Suppress logging unless an application engages it.
logging.getLogger('qemu.qmp').addHandler(logging.NullHandler())
# The order of these fields impact the Sphinx documentation order.
__all__ = (
# Classes, most to least important
'QMPClient',
'Message',
'EventListener',
'Runstate',
# Exceptions, most generic to most explicit
'QMPError',
'StateError',
'ConnectError',
'ExecuteError',
'ExecInterruptedError',
# Type aliases
'SocketAddrT',
)
| 1,545 | 24.766667 | 71 | py |
qemu | qemu-master/python/qemu/qmp/qmp_shell.py | #
# Copyright (C) 2009-2022 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <[email protected]>
# John Snow <[email protected]>
#
# This work is licensed under the terms of the GNU LGPL, version 2 or
# later. See the COPYING file in the top-level directory.
#
"""
Low-level QEMU shell on top of QMP.
usage: qmp-shell [-h] [-H] [-N] [-v] [-p] qmp_server
positional arguments:
qmp_server < UNIX socket path | TCP address:port >
optional arguments:
-h, --help show this help message and exit
-H, --hmp Use HMP interface
-N, --skip-negotiation
Skip negotiate (for qemu-ga)
-v, --verbose Verbose (echo commands sent and received)
-p, --pretty Pretty-print JSON
Start QEMU with:
# qemu [...] -qmp unix:./qmp-sock,server
Run the shell:
$ qmp-shell ./qmp-sock
Commands have the following format:
< command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ]
For example:
(QEMU) device_add driver=e1000 id=net1
{'return': {}}
(QEMU)
key=value pairs also support Python or JSON object literal subset notations,
without spaces. Dictionaries/objects {} are supported as are arrays [].
example-command arg-name1={'key':'value','obj'={'prop':"value"}}
Both JSON and Python formatting should work, including both styles of
string literal quotes. Both paradigms of literal values should work,
including null/true/false for JSON and None/True/False for Python.
Transactions have the following multi-line format:
transaction(
action-name1 [ arg-name1=arg1 ] ... [arg-nameN=argN ]
...
action-nameN [ arg-name1=arg1 ] ... [arg-nameN=argN ]
)
One line transactions are also supported:
transaction( action-name1 ... )
For example:
(QEMU) transaction(
TRANS> block-dirty-bitmap-add node=drive0 name=bitmap1
TRANS> block-dirty-bitmap-clear node=drive0 name=bitmap0
TRANS> )
{"return": {}}
(QEMU)
Use the -v and -p options to activate the verbose and pretty-print options,
which will echo back the properly formatted JSON-compliant QMP that is being
sent to QEMU, which is useful for debugging and documentation generation.
"""
import argparse
import ast
import json
import logging
import os
import re
import readline
from subprocess import Popen
import sys
from typing import (
IO,
Iterator,
List,
NoReturn,
Optional,
Sequence,
)
from qemu.qmp import ConnectError, QMPError, SocketAddrT
from qemu.qmp.legacy import (
QEMUMonitorProtocol,
QMPBadPortError,
QMPMessage,
QMPObject,
)
LOG = logging.getLogger(__name__)
class QMPCompleter:
"""
QMPCompleter provides a readline library tab-complete behavior.
"""
# NB: Python 3.9+ will probably allow us to subclass list[str] directly,
# but pylint as of today does not know that List[str] is simply 'list'.
def __init__(self) -> None:
self._matches: List[str] = []
def append(self, value: str) -> None:
"""Append a new valid completion to the list of possibilities."""
return self._matches.append(value)
def complete(self, text: str, state: int) -> Optional[str]:
"""readline.set_completer() callback implementation."""
for cmd in self._matches:
if cmd.startswith(text):
if state == 0:
return cmd
state -= 1
return None
class QMPShellError(QMPError):
"""
QMP Shell Base error class.
"""
class FuzzyJSON(ast.NodeTransformer):
"""
This extension of ast.NodeTransformer filters literal "true/false/null"
values in a Python AST and replaces them by proper "True/False/None" values
that Python can properly evaluate.
"""
@classmethod
def visit_Name(cls, # pylint: disable=invalid-name
node: ast.Name) -> ast.AST:
"""
Transform Name nodes with certain values into Constant (keyword) nodes.
"""
if node.id == 'true':
return ast.Constant(value=True)
if node.id == 'false':
return ast.Constant(value=False)
if node.id == 'null':
return ast.Constant(value=None)
return node
class QMPShell(QEMUMonitorProtocol):
"""
QMPShell provides a basic readline-based QMP shell.
:param address: Address of the QMP server.
:param pretty: Pretty-print QMP messages.
:param verbose: Echo outgoing QMP messages to console.
"""
def __init__(self, address: SocketAddrT,
pretty: bool = False,
verbose: bool = False,
server: bool = False,
logfile: Optional[str] = None):
super().__init__(address, server=server)
self._greeting: Optional[QMPMessage] = None
self._completer = QMPCompleter()
self._transmode = False
self._actions: List[QMPMessage] = []
self._histfile = os.path.join(os.path.expanduser('~'),
'.qmp-shell_history')
self.pretty = pretty
self.verbose = verbose
self.logfile = None
if logfile is not None:
self.logfile = open(logfile, "w", encoding='utf-8')
def close(self) -> None:
# Hook into context manager of parent to save shell history.
self._save_history()
super().close()
def _fill_completion(self) -> None:
cmds = self.cmd('query-commands')
if 'error' in cmds:
return
for cmd in cmds['return']:
self._completer.append(cmd['name'])
def _completer_setup(self) -> None:
self._completer = QMPCompleter()
self._fill_completion()
readline.set_history_length(1024)
readline.set_completer(self._completer.complete)
readline.parse_and_bind("tab: complete")
# NB: default delimiters conflict with some command names
# (eg. query-), clearing everything as it doesn't seem to matter
readline.set_completer_delims('')
try:
readline.read_history_file(self._histfile)
except FileNotFoundError:
pass
except IOError as err:
msg = f"Failed to read history '{self._histfile}': {err!s}"
LOG.warning(msg)
def _save_history(self) -> None:
try:
readline.write_history_file(self._histfile)
except IOError as err:
msg = f"Failed to save history file '{self._histfile}': {err!s}"
LOG.warning(msg)
@classmethod
def _parse_value(cls, val: str) -> object:
try:
return int(val)
except ValueError:
pass
if val.lower() == 'true':
return True
if val.lower() == 'false':
return False
if val.startswith(('{', '[')):
# Try first as pure JSON:
try:
return json.loads(val)
except ValueError:
pass
# Try once again as FuzzyJSON:
try:
tree = ast.parse(val, mode='eval')
transformed = FuzzyJSON().visit(tree)
return ast.literal_eval(transformed)
except (SyntaxError, ValueError):
pass
return val
def _cli_expr(self,
tokens: Sequence[str],
parent: QMPObject) -> None:
for arg in tokens:
(key, sep, val) = arg.partition('=')
if sep != '=':
raise QMPShellError(
f"Expected a key=value pair, got '{arg!s}'"
)
value = self._parse_value(val)
optpath = key.split('.')
curpath = []
for path in optpath[:-1]:
curpath.append(path)
obj = parent.get(path, {})
if not isinstance(obj, dict):
msg = 'Cannot use "{:s}" as both leaf and non-leaf key'
raise QMPShellError(msg.format('.'.join(curpath)))
parent[path] = obj
parent = obj
if optpath[-1] in parent:
if isinstance(parent[optpath[-1]], dict):
msg = 'Cannot use "{:s}" as both leaf and non-leaf key'
raise QMPShellError(msg.format('.'.join(curpath)))
raise QMPShellError(f'Cannot set "{key}" multiple times')
parent[optpath[-1]] = value
def _build_cmd(self, cmdline: str) -> Optional[QMPMessage]:
"""
Build a QMP input object from a user provided command-line in the
following format:
< command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ]
"""
argument_regex = r'''(?:[^\s"']|"(?:\\.|[^"])*"|'(?:\\.|[^'])*')+'''
cmdargs = re.findall(argument_regex, cmdline)
qmpcmd: QMPMessage
# Transactional CLI entry:
if cmdargs and cmdargs[0] == 'transaction(':
self._transmode = True
self._actions = []
cmdargs.pop(0)
# Transactional CLI exit:
if cmdargs and cmdargs[0] == ')' and self._transmode:
self._transmode = False
if len(cmdargs) > 1:
msg = 'Unexpected input after close of Transaction sub-shell'
raise QMPShellError(msg)
qmpcmd = {
'execute': 'transaction',
'arguments': {'actions': self._actions}
}
return qmpcmd
# No args, or no args remaining
if not cmdargs:
return None
if self._transmode:
# Parse and cache this Transactional Action
finalize = False
action = {'type': cmdargs[0], 'data': {}}
if cmdargs[-1] == ')':
cmdargs.pop(-1)
finalize = True
self._cli_expr(cmdargs[1:], action['data'])
self._actions.append(action)
return self._build_cmd(')') if finalize else None
# Standard command: parse and return it to be executed.
qmpcmd = {'execute': cmdargs[0], 'arguments': {}}
self._cli_expr(cmdargs[1:], qmpcmd['arguments'])
return qmpcmd
def _print(self, qmp_message: object, fh: IO[str] = sys.stdout) -> None:
jsobj = json.dumps(qmp_message,
indent=4 if self.pretty else None,
sort_keys=self.pretty)
print(str(jsobj), file=fh)
def _execute_cmd(self, cmdline: str) -> bool:
try:
qmpcmd = self._build_cmd(cmdline)
except QMPShellError as err:
print(
f"Error while parsing command line: {err!s}\n"
"command format: <command-name> "
"[arg-name1=arg1] ... [arg-nameN=argN",
file=sys.stderr
)
return True
# For transaction mode, we may have just cached the action:
if qmpcmd is None:
return True
if self.verbose:
self._print(qmpcmd)
resp = self.cmd_obj(qmpcmd)
if resp is None:
print('Disconnected')
return False
self._print(resp)
if self.logfile is not None:
cmd = {**qmpcmd, **resp}
self._print(cmd, fh=self.logfile)
return True
def connect(self, negotiate: bool = True) -> None:
self._greeting = super().connect(negotiate)
self._completer_setup()
def show_banner(self,
msg: str = 'Welcome to the QMP low-level shell!') -> None:
"""
Print to stdio a greeting, and the QEMU version if available.
"""
print(msg)
if not self._greeting:
print('Connected')
return
version = self._greeting['QMP']['version']['qemu']
print("Connected to QEMU {major}.{minor}.{micro}\n".format(**version))
@property
def prompt(self) -> str:
"""
Return the current shell prompt, including a trailing space.
"""
if self._transmode:
return 'TRANS> '
return '(QEMU) '
def read_exec_command(self) -> bool:
"""
Read and execute a command.
@return True if execution was ok, return False if disconnected.
"""
try:
cmdline = input(self.prompt)
except EOFError:
print()
return False
if cmdline == '':
for event in self.get_events():
print(event)
return True
return self._execute_cmd(cmdline)
def repl(self) -> Iterator[None]:
"""
Return an iterator that implements the REPL.
"""
self.show_banner()
while self.read_exec_command():
yield
self.close()
class HMPShell(QMPShell):
"""
HMPShell provides a basic readline-based HMP shell, tunnelled via QMP.
:param address: Address of the QMP server.
:param pretty: Pretty-print QMP messages.
:param verbose: Echo outgoing QMP messages to console.
"""
def __init__(self, address: SocketAddrT,
pretty: bool = False,
verbose: bool = False,
server: bool = False,
logfile: Optional[str] = None):
super().__init__(address, pretty, verbose, server, logfile)
self._cpu_index = 0
def _cmd_completion(self) -> None:
for cmd in self._cmd_passthrough('help')['return'].split('\r\n'):
if cmd and cmd[0] != '[' and cmd[0] != '\t':
name = cmd.split()[0] # drop help text
if name == 'info':
continue
if name.find('|') != -1:
# Command in the form 'foobar|f' or 'f|foobar', take the
# full name
opt = name.split('|')
if len(opt[0]) == 1:
name = opt[1]
else:
name = opt[0]
self._completer.append(name)
self._completer.append('help ' + name) # help completion
def _info_completion(self) -> None:
for cmd in self._cmd_passthrough('info')['return'].split('\r\n'):
if cmd:
self._completer.append('info ' + cmd.split()[1])
def _other_completion(self) -> None:
# special cases
self._completer.append('help info')
def _fill_completion(self) -> None:
self._cmd_completion()
self._info_completion()
self._other_completion()
def _cmd_passthrough(self, cmdline: str,
cpu_index: int = 0) -> QMPMessage:
return self.cmd_obj({
'execute': 'human-monitor-command',
'arguments': {
'command-line': cmdline,
'cpu-index': cpu_index
}
})
def _execute_cmd(self, cmdline: str) -> bool:
if cmdline.split()[0] == "cpu":
# trap the cpu command, it requires special setting
try:
idx = int(cmdline.split()[1])
if 'return' not in self._cmd_passthrough('info version', idx):
print('bad CPU index')
return True
self._cpu_index = idx
except ValueError:
print('cpu command takes an integer argument')
return True
resp = self._cmd_passthrough(cmdline, self._cpu_index)
if resp is None:
print('Disconnected')
return False
assert 'return' in resp or 'error' in resp
if 'return' in resp:
# Success
if len(resp['return']) > 0:
print(resp['return'], end=' ')
else:
# Error
print('%s: %s' % (resp['error']['class'], resp['error']['desc']))
return True
def show_banner(self, msg: str = 'Welcome to the HMP shell!') -> None:
QMPShell.show_banner(self, msg)
def die(msg: str) -> NoReturn:
"""Write an error to stderr, then exit with a return code of 1."""
sys.stderr.write('ERROR: %s\n' % msg)
sys.exit(1)
def main() -> None:
"""
qmp-shell entry point: parse command line arguments and start the REPL.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--hmp', action='store_true',
help='Use HMP interface')
parser.add_argument('-N', '--skip-negotiation', action='store_true',
help='Skip negotiate (for qemu-ga)')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose (echo commands sent and received)')
parser.add_argument('-p', '--pretty', action='store_true',
help='Pretty-print JSON')
parser.add_argument('-l', '--logfile',
help='Save log of all QMP messages to PATH')
default_server = os.environ.get('QMP_SOCKET')
parser.add_argument('qmp_server', action='store',
default=default_server,
help='< UNIX socket path | TCP address:port >')
args = parser.parse_args()
if args.qmp_server is None:
parser.error("QMP socket or TCP address must be specified")
shell_class = HMPShell if args.hmp else QMPShell
try:
address = shell_class.parse_address(args.qmp_server)
except QMPBadPortError:
parser.error(f"Bad port number: {args.qmp_server}")
return # pycharm doesn't know error() is noreturn
with shell_class(address, args.pretty, args.verbose, args.logfile) as qemu:
try:
qemu.connect(negotiate=not args.skip_negotiation)
except ConnectError as err:
if isinstance(err.exc, OSError):
die(f"Couldn't connect to {args.qmp_server}: {err!s}")
die(str(err))
for _ in qemu.repl():
pass
def main_wrap() -> None:
"""
qmp-shell-wrap entry point: parse command line arguments and
start the REPL.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-H', '--hmp', action='store_true',
help='Use HMP interface')
parser.add_argument('-v', '--verbose', action='store_true',
help='Verbose (echo commands sent and received)')
parser.add_argument('-p', '--pretty', action='store_true',
help='Pretty-print JSON')
parser.add_argument('-l', '--logfile',
help='Save log of all QMP messages to PATH')
parser.add_argument('command', nargs=argparse.REMAINDER,
help='QEMU command line to invoke')
args = parser.parse_args()
cmd = args.command
if len(cmd) != 0 and cmd[0] == '--':
cmd = cmd[1:]
if len(cmd) == 0:
cmd = ["qemu-system-x86_64"]
sockpath = "qmp-shell-wrap-%d" % os.getpid()
cmd += ["-qmp", "unix:%s" % sockpath]
shell_class = HMPShell if args.hmp else QMPShell
try:
address = shell_class.parse_address(sockpath)
except QMPBadPortError:
parser.error(f"Bad port number: {sockpath}")
return # pycharm doesn't know error() is noreturn
try:
with shell_class(address, args.pretty, args.verbose,
True, args.logfile) as qemu:
with Popen(cmd):
try:
qemu.accept()
except ConnectError as err:
if isinstance(err.exc, OSError):
die(f"Couldn't connect to {args.qmp_server}: {err!s}")
die(str(err))
for _ in qemu.repl():
pass
finally:
os.unlink(sockpath)
if __name__ == '__main__':
main()
| 19,800 | 31.407529 | 79 | py |
qemu | qemu-master/python/qemu/utils/qom_common.py | """
QOM Command abstractions.
"""
##
# Copyright John Snow 2020, for Red Hat, Inc.
# Copyright IBM, Corp. 2011
#
# Authors:
# John Snow <[email protected]>
# Anthony Liguori <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
# Based on ./scripts/qmp/qom-[set|get|tree|list]
##
import argparse
import os
import sys
from typing import (
Any,
Dict,
List,
Optional,
Type,
TypeVar,
)
from qemu.qmp import QMPError
from qemu.qmp.legacy import QEMUMonitorProtocol
class ObjectPropertyInfo:
"""
Represents the return type from e.g. qom-list.
"""
def __init__(self, name: str, type_: str,
description: Optional[str] = None,
default_value: Optional[object] = None):
self.name = name
self.type = type_
self.description = description
self.default_value = default_value
@classmethod
def make(cls, value: Dict[str, Any]) -> 'ObjectPropertyInfo':
"""
Build an ObjectPropertyInfo from a Dict with an unknown shape.
"""
assert value.keys() >= {'name', 'type'}
assert value.keys() <= {'name', 'type', 'description', 'default-value'}
return cls(value['name'], value['type'],
value.get('description'),
value.get('default-value'))
@property
def child(self) -> bool:
"""Is this property a child property?"""
return self.type.startswith('child<')
@property
def link(self) -> bool:
"""Is this property a link property?"""
return self.type.startswith('link<')
CommandT = TypeVar('CommandT', bound='QOMCommand')
class QOMCommand:
"""
Represents a QOM sub-command.
:param args: Parsed arguments, as returned from parser.parse_args.
"""
name: str
help: str
def __init__(self, args: argparse.Namespace):
if args.socket is None:
raise QMPError("No QMP socket path or address given")
self.qmp = QEMUMonitorProtocol(
QEMUMonitorProtocol.parse_address(args.socket)
)
self.qmp.connect()
@classmethod
def register(cls, subparsers: Any) -> None:
"""
Register this command with the argument parser.
:param subparsers: argparse subparsers object, from "add_subparsers".
"""
subparser = subparsers.add_parser(cls.name, help=cls.help,
description=cls.help)
cls.configure_parser(subparser)
@classmethod
def configure_parser(cls, parser: argparse.ArgumentParser) -> None:
"""
Configure a parser with this command's arguments.
:param parser: argparse parser or subparser object.
"""
default_path = os.environ.get('QMP_SOCKET')
parser.add_argument(
'--socket', '-s',
dest='socket',
action='store',
help='QMP socket path or address (addr:port).'
' May also be set via QMP_SOCKET environment variable.',
default=default_path
)
parser.set_defaults(cmd_class=cls)
@classmethod
def add_path_prop_arg(cls, parser: argparse.ArgumentParser) -> None:
"""
Add the <path>.<proptery> positional argument to this command.
:param parser: The parser to add the argument to.
"""
parser.add_argument(
'path_prop',
metavar='<path>.<property>',
action='store',
help="QOM path and property, separated by a period '.'"
)
def run(self) -> int:
"""
Run this command.
:return: 0 on success, 1 otherwise.
"""
raise NotImplementedError
def qom_list(self, path: str) -> List[ObjectPropertyInfo]:
"""
:return: a strongly typed list from the 'qom-list' command.
"""
rsp = self.qmp.command('qom-list', path=path)
# qom-list returns List[ObjectPropertyInfo]
assert isinstance(rsp, list)
return [ObjectPropertyInfo.make(x) for x in rsp]
@classmethod
def command_runner(
cls: Type[CommandT],
args: argparse.Namespace
) -> int:
"""
Run a fully-parsed subcommand, with error-handling for the CLI.
:return: The return code from `run()`.
"""
try:
cmd = cls(args)
return cmd.run()
except QMPError as err:
print(f"{type(err).__name__}: {err!s}", file=sys.stderr)
return -1
@classmethod
def entry_point(cls) -> int:
"""
Build this command's parser, parse arguments, and run the command.
:return: `run`'s return code.
"""
parser = argparse.ArgumentParser(description=cls.help)
cls.configure_parser(parser)
args = parser.parse_args()
return cls.command_runner(args)
| 4,995 | 27.386364 | 79 | py |
qemu | qemu-master/python/qemu/utils/qemu_ga_client.py | """
QEMU Guest Agent Client
Usage:
Start QEMU with:
# qemu [...] -chardev socket,path=/tmp/qga.sock,server=on,wait=off,id=qga0 \
-device virtio-serial \
-device virtserialport,chardev=qga0,name=org.qemu.guest_agent.0
Run the script:
$ qemu-ga-client --address=/tmp/qga.sock <command> [args...]
or
$ export QGA_CLIENT_ADDRESS=/tmp/qga.sock
$ qemu-ga-client <command> [args...]
For example:
$ qemu-ga-client cat /etc/resolv.conf
# Generated by NetworkManager
nameserver 10.0.2.3
$ qemu-ga-client fsfreeze status
thawed
$ qemu-ga-client fsfreeze freeze
2 filesystems frozen
See also: https://wiki.qemu.org/Features/QAPI/GuestAgent
"""
# Copyright (C) 2012 Ryota Ozaki <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import argparse
import asyncio
import base64
import os
import random
import sys
from typing import (
Any,
Callable,
Dict,
Optional,
Sequence,
)
from qemu.qmp import ConnectError, SocketAddrT
from qemu.qmp.legacy import QEMUMonitorProtocol
# This script has not seen many patches or careful attention in quite
# some time. If you would like to improve it, please review the design
# carefully and add docstrings at that point in time. Until then:
# pylint: disable=missing-docstring
class QemuGuestAgent(QEMUMonitorProtocol):
def __getattr__(self, name: str) -> Callable[..., Any]:
def wrapper(**kwds: object) -> object:
return self.command('guest-' + name.replace('_', '-'), **kwds)
return wrapper
class QemuGuestAgentClient:
def __init__(self, address: SocketAddrT):
self.qga = QemuGuestAgent(address)
self.qga.connect(negotiate=False)
def sync(self, timeout: Optional[float] = 3) -> None:
# Avoid being blocked forever
if not self.ping(timeout):
raise EnvironmentError('Agent seems not alive')
uid = random.randint(0, (1 << 32) - 1)
while True:
ret = self.qga.sync(id=uid)
if isinstance(ret, int) and int(ret) == uid:
break
def __file_read_all(self, handle: int) -> bytes:
eof = False
data = b''
while not eof:
ret = self.qga.file_read(handle=handle, count=1024)
_data = base64.b64decode(ret['buf-b64'])
data += _data
eof = ret['eof']
return data
def read(self, path: str) -> bytes:
handle = self.qga.file_open(path=path)
try:
data = self.__file_read_all(handle)
finally:
self.qga.file_close(handle=handle)
return data
def info(self) -> str:
info = self.qga.info()
msgs = []
msgs.append('version: ' + info['version'])
msgs.append('supported_commands:')
enabled = [c['name'] for c in info['supported_commands']
if c['enabled']]
msgs.append('\tenabled: ' + ', '.join(enabled))
disabled = [c['name'] for c in info['supported_commands']
if not c['enabled']]
msgs.append('\tdisabled: ' + ', '.join(disabled))
return '\n'.join(msgs)
@classmethod
def __gen_ipv4_netmask(cls, prefixlen: int) -> str:
mask = int('1' * prefixlen + '0' * (32 - prefixlen), 2)
return '.'.join([str(mask >> 24),
str((mask >> 16) & 0xff),
str((mask >> 8) & 0xff),
str(mask & 0xff)])
def ifconfig(self) -> str:
nifs = self.qga.network_get_interfaces()
msgs = []
for nif in nifs:
msgs.append(nif['name'] + ':')
if 'ip-addresses' in nif:
for ipaddr in nif['ip-addresses']:
if ipaddr['ip-address-type'] == 'ipv4':
addr = ipaddr['ip-address']
mask = self.__gen_ipv4_netmask(int(ipaddr['prefix']))
msgs.append(f"\tinet {addr} netmask {mask}")
elif ipaddr['ip-address-type'] == 'ipv6':
addr = ipaddr['ip-address']
prefix = ipaddr['prefix']
msgs.append(f"\tinet6 {addr} prefixlen {prefix}")
if nif['hardware-address'] != '00:00:00:00:00:00':
msgs.append("\tether " + nif['hardware-address'])
return '\n'.join(msgs)
def ping(self, timeout: Optional[float]) -> bool:
self.qga.settimeout(timeout)
try:
self.qga.ping()
except asyncio.TimeoutError:
return False
return True
def fsfreeze(self, cmd: str) -> object:
if cmd not in ['status', 'freeze', 'thaw']:
raise ValueError('Invalid command: ' + cmd)
# Can be int (freeze, thaw) or GuestFsfreezeStatus (status)
return getattr(self.qga, 'fsfreeze' + '_' + cmd)()
def fstrim(self, minimum: int) -> Dict[str, object]:
# returns GuestFilesystemTrimResponse
ret = getattr(self.qga, 'fstrim')(minimum=minimum)
assert isinstance(ret, dict)
return ret
def suspend(self, mode: str) -> None:
if mode not in ['disk', 'ram', 'hybrid']:
raise ValueError('Invalid mode: ' + mode)
try:
getattr(self.qga, 'suspend' + '_' + mode)()
# On error exception will raise
except asyncio.TimeoutError:
# On success command will timed out
return
def shutdown(self, mode: str = 'powerdown') -> None:
if mode not in ['powerdown', 'halt', 'reboot']:
raise ValueError('Invalid mode: ' + mode)
try:
self.qga.shutdown(mode=mode)
except asyncio.TimeoutError:
pass
def _cmd_cat(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
if len(args) != 1:
print('Invalid argument')
print('Usage: cat <file>')
sys.exit(1)
print(client.read(args[0]))
def _cmd_fsfreeze(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
usage = 'Usage: fsfreeze status|freeze|thaw'
if len(args) != 1:
print('Invalid argument')
print(usage)
sys.exit(1)
if args[0] not in ['status', 'freeze', 'thaw']:
print('Invalid command: ' + args[0])
print(usage)
sys.exit(1)
cmd = args[0]
ret = client.fsfreeze(cmd)
if cmd == 'status':
print(ret)
return
assert isinstance(ret, int)
verb = 'frozen' if cmd == 'freeze' else 'thawed'
print(f"{ret:d} filesystems {verb}")
def _cmd_fstrim(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
if len(args) == 0:
minimum = 0
else:
minimum = int(args[0])
print(client.fstrim(minimum))
def _cmd_ifconfig(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
assert not args
print(client.ifconfig())
def _cmd_info(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
assert not args
print(client.info())
def _cmd_ping(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
timeout = 3.0 if len(args) == 0 else float(args[0])
alive = client.ping(timeout)
if not alive:
print("Not responded in %s sec" % args[0])
sys.exit(1)
def _cmd_suspend(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
usage = 'Usage: suspend disk|ram|hybrid'
if len(args) != 1:
print('Less argument')
print(usage)
sys.exit(1)
if args[0] not in ['disk', 'ram', 'hybrid']:
print('Invalid command: ' + args[0])
print(usage)
sys.exit(1)
client.suspend(args[0])
def _cmd_shutdown(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
assert not args
client.shutdown()
_cmd_powerdown = _cmd_shutdown
def _cmd_halt(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
assert not args
client.shutdown('halt')
def _cmd_reboot(client: QemuGuestAgentClient, args: Sequence[str]) -> None:
assert not args
client.shutdown('reboot')
commands = [m.replace('_cmd_', '') for m in dir() if '_cmd_' in m]
def send_command(address: str, cmd: str, args: Sequence[str]) -> None:
if not os.path.exists(address):
print(f"'{address}' not found. (Is QEMU running?)")
sys.exit(1)
if cmd not in commands:
print('Invalid command: ' + cmd)
print('Available commands: ' + ', '.join(commands))
sys.exit(1)
try:
client = QemuGuestAgentClient(address)
except ConnectError as err:
print(err)
if isinstance(err.exc, ConnectionError):
print('(Is QEMU running?)')
sys.exit(1)
if cmd == 'fsfreeze' and args[0] == 'freeze':
client.sync(60)
elif cmd != 'ping':
client.sync()
globals()['_cmd_' + cmd](client, args)
def main() -> None:
address = os.environ.get('QGA_CLIENT_ADDRESS')
parser = argparse.ArgumentParser()
parser.add_argument('--address', action='store',
default=address,
help='Specify a ip:port pair or a unix socket path')
parser.add_argument('command', choices=commands)
parser.add_argument('args', nargs='*')
args = parser.parse_args()
if args.address is None:
parser.error('address is not specified')
sys.exit(1)
send_command(args.address, args.command, args.args)
if __name__ == '__main__':
main()
| 9,490 | 28.29321 | 77 | py |
qemu | qemu-master/python/qemu/utils/qom.py | """
QEMU Object Model testing tools.
usage: qom [-h] {set,get,list,tree,fuse} ...
Query and manipulate QOM data
optional arguments:
-h, --help show this help message and exit
QOM commands:
{set,get,list,tree,fuse}
set Set a QOM property value
get Get a QOM property value
list List QOM properties at a given path
tree Show QOM tree from a given path
fuse Mount a QOM tree as a FUSE filesystem
"""
##
# Copyright John Snow 2020, for Red Hat, Inc.
# Copyright IBM, Corp. 2011
#
# Authors:
# John Snow <[email protected]>
# Anthony Liguori <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
# Based on ./scripts/qmp/qom-[set|get|tree|list]
##
import argparse
from qemu.qmp import ExecuteError
from .qom_common import QOMCommand
try:
from .qom_fuse import QOMFuse
except ModuleNotFoundError as _err:
if _err.name != 'fuse':
raise
else:
assert issubclass(QOMFuse, QOMCommand)
class QOMSet(QOMCommand):
"""
QOM Command - Set a property to a given value.
usage: qom-set [-h] [--socket SOCKET] <path>.<property> <value>
Set a QOM property value
positional arguments:
<path>.<property> QOM path and property, separated by a period '.'
<value> new QOM property value
optional arguments:
-h, --help show this help message and exit
--socket SOCKET, -s SOCKET
QMP socket path or address (addr:port). May also be
set via QMP_SOCKET environment variable.
"""
name = 'set'
help = 'Set a QOM property value'
@classmethod
def configure_parser(cls, parser: argparse.ArgumentParser) -> None:
super().configure_parser(parser)
cls.add_path_prop_arg(parser)
parser.add_argument(
'value',
metavar='<value>',
action='store',
help='new QOM property value'
)
def __init__(self, args: argparse.Namespace):
super().__init__(args)
self.path, self.prop = args.path_prop.rsplit('.', 1)
self.value = args.value
def run(self) -> int:
rsp = self.qmp.command(
'qom-set',
path=self.path,
property=self.prop,
value=self.value
)
print(rsp)
return 0
class QOMGet(QOMCommand):
"""
QOM Command - Get a property's current value.
usage: qom-get [-h] [--socket SOCKET] <path>.<property>
Get a QOM property value
positional arguments:
<path>.<property> QOM path and property, separated by a period '.'
optional arguments:
-h, --help show this help message and exit
--socket SOCKET, -s SOCKET
QMP socket path or address (addr:port). May also be
set via QMP_SOCKET environment variable.
"""
name = 'get'
help = 'Get a QOM property value'
@classmethod
def configure_parser(cls, parser: argparse.ArgumentParser) -> None:
super().configure_parser(parser)
cls.add_path_prop_arg(parser)
def __init__(self, args: argparse.Namespace):
super().__init__(args)
try:
tmp = args.path_prop.rsplit('.', 1)
except ValueError as err:
raise ValueError('Invalid format for <path>.<property>') from err
self.path = tmp[0]
self.prop = tmp[1]
def run(self) -> int:
rsp = self.qmp.command(
'qom-get',
path=self.path,
property=self.prop
)
if isinstance(rsp, dict):
for key, value in rsp.items():
print(f"{key}: {value}")
else:
print(rsp)
return 0
class QOMList(QOMCommand):
"""
QOM Command - List the properties at a given path.
usage: qom-list [-h] [--socket SOCKET] <path>
List QOM properties at a given path
positional arguments:
<path> QOM path
optional arguments:
-h, --help show this help message and exit
--socket SOCKET, -s SOCKET
QMP socket path or address (addr:port). May also be
set via QMP_SOCKET environment variable.
"""
name = 'list'
help = 'List QOM properties at a given path'
@classmethod
def configure_parser(cls, parser: argparse.ArgumentParser) -> None:
super().configure_parser(parser)
parser.add_argument(
'path',
metavar='<path>',
action='store',
help='QOM path',
)
def __init__(self, args: argparse.Namespace):
super().__init__(args)
self.path = args.path
def run(self) -> int:
rsp = self.qom_list(self.path)
for item in rsp:
if item.child:
print(f"{item.name}/")
elif item.link:
print(f"@{item.name}/")
else:
print(item.name)
return 0
class QOMTree(QOMCommand):
"""
QOM Command - Show the full tree below a given path.
usage: qom-tree [-h] [--socket SOCKET] [<path>]
Show QOM tree from a given path
positional arguments:
<path> QOM path
optional arguments:
-h, --help show this help message and exit
--socket SOCKET, -s SOCKET
QMP socket path or address (addr:port). May also be
set via QMP_SOCKET environment variable.
"""
name = 'tree'
help = 'Show QOM tree from a given path'
@classmethod
def configure_parser(cls, parser: argparse.ArgumentParser) -> None:
super().configure_parser(parser)
parser.add_argument(
'path',
metavar='<path>',
action='store',
help='QOM path',
nargs='?',
default='/'
)
def __init__(self, args: argparse.Namespace):
super().__init__(args)
self.path = args.path
def _list_node(self, path: str) -> None:
print(path)
items = self.qom_list(path)
for item in items:
if item.child:
continue
try:
rsp = self.qmp.command('qom-get', path=path,
property=item.name)
print(f" {item.name}: {rsp} ({item.type})")
except ExecuteError as err:
print(f" {item.name}: <EXCEPTION: {err!s}> ({item.type})")
print('')
for item in items:
if not item.child:
continue
if path == '/':
path = ''
self._list_node(f"{path}/{item.name}")
def run(self) -> int:
self._list_node(self.path)
return 0
def main() -> int:
"""QOM script main entry point."""
parser = argparse.ArgumentParser(
description='Query and manipulate QOM data'
)
subparsers = parser.add_subparsers(
title='QOM commands',
dest='command'
)
for command in QOMCommand.__subclasses__():
command.register(subparsers)
args = parser.parse_args()
if args.command is None:
parser.error('Command not specified.')
return 1
cmd_class = args.cmd_class
assert isinstance(cmd_class, type(QOMCommand))
return cmd_class.command_runner(args)
| 7,580 | 26.667883 | 79 | py |
qemu | qemu-master/python/qemu/utils/accel.py | """
QEMU accel module:
This module provides utilities for discover and check the availability of
accelerators.
"""
# Copyright (C) 2015-2016 Red Hat Inc.
# Copyright (C) 2012 IBM Corp.
#
# Authors:
# Fam Zheng <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
import logging
import os
import subprocess
from typing import List, Optional
LOG = logging.getLogger(__name__)
# Mapping host architecture to any additional architectures it can
# support which often includes its 32 bit cousin.
ADDITIONAL_ARCHES = {
"x86_64": "i386",
"aarch64": "armhf",
"ppc64le": "ppc64",
}
def list_accel(qemu_bin: str) -> List[str]:
"""
List accelerators enabled in the QEMU binary.
@param qemu_bin (str): path to the QEMU binary.
@raise Exception: if failed to run ``qemu -accel help``
@return a list of accelerator names.
"""
if not qemu_bin:
return []
try:
out = subprocess.check_output([qemu_bin, '-accel', 'help'],
universal_newlines=True)
except:
LOG.debug("Failed to get the list of accelerators in %s", qemu_bin)
raise
# Skip the first line which is the header.
return [acc.strip() for acc in out.splitlines()[1:]]
def kvm_available(target_arch: Optional[str] = None,
qemu_bin: Optional[str] = None) -> bool:
"""
Check if KVM is available using the following heuristic:
- Kernel module is present in the host;
- Target and host arches don't mismatch;
- KVM is enabled in the QEMU binary.
@param target_arch (str): target architecture
@param qemu_bin (str): path to the QEMU binary
@return True if kvm is available, otherwise False.
"""
if not os.access("/dev/kvm", os.R_OK | os.W_OK):
return False
if target_arch:
host_arch = os.uname()[4]
if target_arch != host_arch:
if target_arch != ADDITIONAL_ARCHES.get(host_arch):
return False
if qemu_bin and "kvm" not in list_accel(qemu_bin):
return False
return True
def tcg_available(qemu_bin: str) -> bool:
"""
Check if TCG is available.
@param qemu_bin (str): path to the QEMU binary
"""
return 'tcg' in list_accel(qemu_bin)
| 2,348 | 26.635294 | 75 | py |
qemu | qemu-master/python/qemu/utils/qom_fuse.py | """
QEMU Object Model FUSE filesystem tool
This script offers a simple FUSE filesystem within which the QOM tree
may be browsed, queried and edited using traditional shell tooling.
This script requires the 'fusepy' python package.
usage: qom-fuse [-h] [--socket SOCKET] <mount>
Mount a QOM tree as a FUSE filesystem
positional arguments:
<mount> Mount point
optional arguments:
-h, --help show this help message and exit
--socket SOCKET, -s SOCKET
QMP socket path or address (addr:port). May also be
set via QMP_SOCKET environment variable.
"""
##
# Copyright IBM, Corp. 2012
# Copyright (C) 2020 Red Hat, Inc.
#
# Authors:
# Anthony Liguori <[email protected]>
# Markus Armbruster <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
##
import argparse
from errno import ENOENT, EPERM
import stat
import sys
from typing import (
IO,
Dict,
Iterator,
Mapping,
Optional,
Union,
)
import fuse
from fuse import FUSE, FuseOSError, Operations
from qemu.qmp import ExecuteError
from .qom_common import QOMCommand
fuse.fuse_python_api = (0, 2)
class QOMFuse(QOMCommand, Operations):
"""
QOMFuse implements both fuse.Operations and QOMCommand.
Operations implements the FS, and QOMCommand implements the CLI command.
"""
name = 'fuse'
help = 'Mount a QOM tree as a FUSE filesystem'
fuse: FUSE
@classmethod
def configure_parser(cls, parser: argparse.ArgumentParser) -> None:
super().configure_parser(parser)
parser.add_argument(
'mount',
metavar='<mount>',
action='store',
help="Mount point",
)
def __init__(self, args: argparse.Namespace):
super().__init__(args)
self.mount = args.mount
self.ino_map: Dict[str, int] = {}
self.ino_count = 1
def run(self) -> int:
print(f"Mounting QOMFS to '{self.mount}'", file=sys.stderr)
self.fuse = FUSE(self, self.mount, foreground=True)
return 0
def get_ino(self, path: str) -> int:
"""Get an inode number for a given QOM path."""
if path in self.ino_map:
return self.ino_map[path]
self.ino_map[path] = self.ino_count
self.ino_count += 1
return self.ino_map[path]
def is_object(self, path: str) -> bool:
"""Is the given QOM path an object?"""
try:
self.qom_list(path)
return True
except ExecuteError:
return False
def is_property(self, path: str) -> bool:
"""Is the given QOM path a property?"""
path, prop = path.rsplit('/', 1)
if path == '':
path = '/'
try:
for item in self.qom_list(path):
if item.name == prop:
return True
return False
except ExecuteError:
return False
def is_link(self, path: str) -> bool:
"""Is the given QOM path a link?"""
path, prop = path.rsplit('/', 1)
if path == '':
path = '/'
try:
for item in self.qom_list(path):
if item.name == prop and item.link:
return True
return False
except ExecuteError:
return False
def read(self, path: str, size: int, offset: int, fh: IO[bytes]) -> bytes:
if not self.is_property(path):
raise FuseOSError(ENOENT)
path, prop = path.rsplit('/', 1)
if path == '':
path = '/'
try:
data = str(self.qmp.command('qom-get', path=path, property=prop))
data += '\n' # make values shell friendly
except ExecuteError as err:
raise FuseOSError(EPERM) from err
if offset > len(data):
return b''
return bytes(data[offset:][:size], encoding='utf-8')
def readlink(self, path: str) -> Union[bool, str]:
if not self.is_link(path):
return False
path, prop = path.rsplit('/', 1)
prefix = '/'.join(['..'] * (len(path.split('/')) - 1))
return prefix + str(self.qmp.command('qom-get', path=path,
property=prop))
def getattr(self, path: str,
fh: Optional[IO[bytes]] = None) -> Mapping[str, object]:
if self.is_link(path):
value = {
'st_mode': 0o755 | stat.S_IFLNK,
'st_ino': self.get_ino(path),
'st_dev': 0,
'st_nlink': 2,
'st_uid': 1000,
'st_gid': 1000,
'st_size': 4096,
'st_atime': 0,
'st_mtime': 0,
'st_ctime': 0
}
elif self.is_object(path):
value = {
'st_mode': 0o755 | stat.S_IFDIR,
'st_ino': self.get_ino(path),
'st_dev': 0,
'st_nlink': 2,
'st_uid': 1000,
'st_gid': 1000,
'st_size': 4096,
'st_atime': 0,
'st_mtime': 0,
'st_ctime': 0
}
elif self.is_property(path):
value = {
'st_mode': 0o644 | stat.S_IFREG,
'st_ino': self.get_ino(path),
'st_dev': 0,
'st_nlink': 1,
'st_uid': 1000,
'st_gid': 1000,
'st_size': 4096,
'st_atime': 0,
'st_mtime': 0,
'st_ctime': 0
}
else:
raise FuseOSError(ENOENT)
return value
def readdir(self, path: str, fh: IO[bytes]) -> Iterator[str]:
yield '.'
yield '..'
for item in self.qom_list(path):
yield item.name
| 5,978 | 27.745192 | 78 | py |
qemu | qemu-master/python/qemu/utils/__init__.py | """
QEMU development and testing utilities
This package provides a small handful of utilities for performing
various tasks not directly related to the launching of a VM.
"""
# Copyright (C) 2021 Red Hat Inc.
#
# Authors:
# John Snow <[email protected]>
# Cleber Rosa <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
import os
import re
import shutil
from subprocess import CalledProcessError
import textwrap
from typing import Optional
# pylint: disable=import-error
from .accel import kvm_available, list_accel, tcg_available
__all__ = (
'VerboseProcessError',
'add_visual_margin',
'get_info_usernet_hostfwd_port',
'kvm_available',
'list_accel',
'tcg_available',
)
def get_info_usernet_hostfwd_port(info_usernet_output: str) -> Optional[int]:
"""
Returns the port given to the hostfwd parameter via info usernet
:param info_usernet_output: output generated by hmp command "info usernet"
:return: the port number allocated by the hostfwd option
"""
for line in info_usernet_output.split('\r\n'):
regex = r'TCP.HOST_FORWARD.*127\.0\.0\.1\s+(\d+)\s+10\.'
match = re.search(regex, line)
if match is not None:
return int(match[1])
return None
# pylint: disable=too-many-arguments
def add_visual_margin(
content: str = '',
width: Optional[int] = None,
name: Optional[str] = None,
padding: int = 1,
upper_left: str = '┏',
lower_left: str = '┗',
horizontal: str = '━',
vertical: str = '┃',
) -> str:
"""
Decorate and wrap some text with a visual decoration around it.
This function assumes that the text decoration characters are single
characters that display using a single monospace column.
┏━ Example ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
┃ This is what this function looks like with text content that's
┃ wrapped to 66 characters. The right-hand margin is left open to
┃ accommodate the occasional unicode character that might make
┃ predicting the total "visual" width of a line difficult. This
┃ provides a visual distinction that's good-enough, though.
┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
:param content: The text to wrap and decorate.
:param width:
The number of columns to use, including for the decoration
itself. The default (None) uses the available width of the
current terminal, or a fallback of 72 lines. A negative number
subtracts a fixed-width from the default size. The default obeys
the COLUMNS environment variable, if set.
:param name: A label to apply to the upper-left of the box.
:param padding: How many columns of padding to apply inside.
:param upper_left: Upper-left single-width text decoration character.
:param lower_left: Lower-left single-width text decoration character.
:param horizontal: Horizontal single-width text decoration character.
:param vertical: Vertical single-width text decoration character.
"""
if width is None or width < 0:
avail = shutil.get_terminal_size(fallback=(72, 24))[0]
if width is None:
_width = avail
else:
_width = avail + width
else:
_width = width
prefix = vertical + (' ' * padding)
def _bar(name: Optional[str], top: bool = True) -> str:
ret = upper_left if top else lower_left
if name is not None:
ret += f"{horizontal} {name} "
filler_len = _width - len(ret)
ret += f"{horizontal * filler_len}"
return ret
def _wrap(line: str) -> str:
return os.linesep.join(
textwrap.wrap(
line, width=_width - padding, initial_indent=prefix,
subsequent_indent=prefix, replace_whitespace=False,
drop_whitespace=True, break_on_hyphens=False)
)
return os.linesep.join((
_bar(name, top=True),
os.linesep.join(_wrap(line) for line in content.splitlines()),
_bar(None, top=False),
))
class VerboseProcessError(CalledProcessError):
"""
The same as CalledProcessError, but more verbose.
This is useful for debugging failed calls during test executions.
The return code, signal (if any), and terminal output will be displayed
on unhandled exceptions.
"""
def summary(self) -> str:
"""Return the normal CalledProcessError str() output."""
return super().__str__()
def __str__(self) -> str:
lmargin = ' '
width = -len(lmargin)
sections = []
# Does self.stdout contain both stdout and stderr?
has_combined_output = self.stderr is None
name = 'output' if has_combined_output else 'stdout'
if self.stdout:
sections.append(add_visual_margin(self.stdout, width, name))
else:
sections.append(f"{name}: N/A")
if self.stderr:
sections.append(add_visual_margin(self.stderr, width, 'stderr'))
elif not has_combined_output:
sections.append("stderr: N/A")
return os.linesep.join((
self.summary(),
textwrap.indent(os.linesep.join(sections), prefix=lmargin),
))
| 5,382 | 32.02454 | 78 | py |
qemu | qemu-master/python/qemu/machine/machine.py | """
QEMU machine module:
The machine module primarily provides the QEMUMachine class,
which provides facilities for managing the lifetime of a QEMU VM.
"""
# Copyright (C) 2015-2016 Red Hat Inc.
# Copyright (C) 2012 IBM Corp.
#
# Authors:
# Fam Zheng <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Based on qmp.py.
#
import errno
from itertools import chain
import locale
import logging
import os
import shutil
import signal
import socket
import subprocess
import tempfile
from types import TracebackType
from typing import (
Any,
BinaryIO,
Dict,
List,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
)
from qemu.qmp import SocketAddrT
from qemu.qmp.legacy import (
QEMUMonitorProtocol,
QMPMessage,
QMPReturnValue,
)
from . import console_socket
LOG = logging.getLogger(__name__)
class QEMUMachineError(Exception):
"""
Exception called when an error in QEMUMachine happens.
"""
class QEMUMachineAddDeviceError(QEMUMachineError):
"""
Exception raised when a request to add a device can not be fulfilled
The failures are caused by limitations, lack of information or conflicting
requests on the QEMUMachine methods. This exception does not represent
failures reported by the QEMU binary itself.
"""
class VMLaunchFailure(QEMUMachineError):
"""
Exception raised when a VM launch was attempted, but failed.
"""
def __init__(self, exitcode: Optional[int],
command: str, output: Optional[str]):
super().__init__(exitcode, command, output)
self.exitcode = exitcode
self.command = command
self.output = output
def __str__(self) -> str:
ret = ''
if self.__cause__ is not None:
name = type(self.__cause__).__name__
reason = str(self.__cause__)
if reason:
ret += f"{name}: {reason}"
else:
ret += f"{name}"
ret += '\n'
if self.exitcode is not None:
ret += f"\tExit code: {self.exitcode}\n"
ret += f"\tCommand: {self.command}\n"
ret += f"\tOutput: {self.output}\n"
return ret
class AbnormalShutdown(QEMUMachineError):
"""
Exception raised when a graceful shutdown was requested, but not performed.
"""
_T = TypeVar('_T', bound='QEMUMachine')
class QEMUMachine:
"""
A QEMU VM.
Use this object as a context manager to ensure
the QEMU process terminates::
with VM(binary) as vm:
...
# vm is guaranteed to be shut down here
"""
# pylint: disable=too-many-instance-attributes, too-many-public-methods
def __init__(self,
binary: str,
args: Sequence[str] = (),
wrapper: Sequence[str] = (),
name: Optional[str] = None,
base_temp_dir: str = "/var/tmp",
monitor_address: Optional[SocketAddrT] = None,
sock_dir: Optional[str] = None,
drain_console: bool = False,
console_log: Optional[str] = None,
log_dir: Optional[str] = None,
qmp_timer: Optional[float] = 30):
'''
Initialize a QEMUMachine
@param binary: path to the qemu binary
@param args: list of extra arguments
@param wrapper: list of arguments used as prefix to qemu binary
@param name: prefix for socket and log file names (default: qemu-PID)
@param base_temp_dir: default location where temp files are created
@param monitor_address: address for QMP monitor
@param sock_dir: where to create socket (defaults to base_temp_dir)
@param drain_console: (optional) True to drain console socket to buffer
@param console_log: (optional) path to console log file
@param log_dir: where to create and keep log files
@param qmp_timer: (optional) default QMP socket timeout
@note: Qemu process is not started until launch() is used.
'''
# pylint: disable=too-many-arguments
# Direct user configuration
self._binary = binary
self._args = list(args)
self._wrapper = wrapper
self._qmp_timer = qmp_timer
self._name = name or f"{id(self):x}"
self._sock_pair: Optional[Tuple[socket.socket, socket.socket]] = None
self._temp_dir: Optional[str] = None
self._base_temp_dir = base_temp_dir
self._sock_dir = sock_dir
self._log_dir = log_dir
self._monitor_address = monitor_address
self._console_log_path = console_log
if self._console_log_path:
# In order to log the console, buffering needs to be enabled.
self._drain_console = True
else:
self._drain_console = drain_console
# Runstate
self._qemu_log_path: Optional[str] = None
self._qemu_log_file: Optional[BinaryIO] = None
self._popen: Optional['subprocess.Popen[bytes]'] = None
self._events: List[QMPMessage] = []
self._iolog: Optional[str] = None
self._qmp_set = True # Enable QMP monitor by default.
self._qmp_connection: Optional[QEMUMonitorProtocol] = None
self._qemu_full_args: Tuple[str, ...] = ()
self._launched = False
self._machine: Optional[str] = None
self._console_index = 0
self._console_set = False
self._console_device_type: Optional[str] = None
self._console_address = os.path.join(
self.sock_dir, f"{self._name}.con"
)
self._console_socket: Optional[socket.socket] = None
self._remove_files: List[str] = []
self._user_killed = False
self._quit_issued = False
def __enter__(self: _T) -> _T:
return self
def __exit__(self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType]) -> None:
self.shutdown()
def add_monitor_null(self) -> None:
"""
This can be used to add an unused monitor instance.
"""
self._args.append('-monitor')
self._args.append('null')
def add_fd(self: _T, fd: int, fdset: int,
opaque: str, opts: str = '') -> _T:
"""
Pass a file descriptor to the VM
"""
options = ['fd=%d' % fd,
'set=%d' % fdset,
'opaque=%s' % opaque]
if opts:
options.append(opts)
# This did not exist before 3.4, but since then it is
# mandatory for our purpose
if hasattr(os, 'set_inheritable'):
os.set_inheritable(fd, True)
self._args.append('-add-fd')
self._args.append(','.join(options))
return self
def send_fd_scm(self, fd: Optional[int] = None,
file_path: Optional[str] = None) -> int:
"""
Send an fd or file_path to the remote via SCM_RIGHTS.
Exactly one of fd and file_path must be given. If it is
file_path, the file will be opened read-only and the new file
descriptor will be sent to the remote.
"""
if file_path is not None:
assert fd is None
with open(file_path, "rb") as passfile:
fd = passfile.fileno()
self._qmp.send_fd_scm(fd)
else:
assert fd is not None
self._qmp.send_fd_scm(fd)
return 0
@staticmethod
def _remove_if_exists(path: str) -> None:
"""
Remove file object at path if it exists
"""
try:
os.remove(path)
except OSError as exception:
if exception.errno == errno.ENOENT:
return
raise
def is_running(self) -> bool:
"""Returns true if the VM is running."""
return self._popen is not None and self._popen.poll() is None
@property
def _subp(self) -> 'subprocess.Popen[bytes]':
if self._popen is None:
raise QEMUMachineError('Subprocess pipe not present')
return self._popen
def exitcode(self) -> Optional[int]:
"""Returns the exit code if possible, or None."""
if self._popen is None:
return None
return self._popen.poll()
def get_pid(self) -> Optional[int]:
"""Returns the PID of the running process, or None."""
if not self.is_running():
return None
return self._subp.pid
def _load_io_log(self) -> None:
# Assume that the output encoding of QEMU's terminal output is
# defined by our locale. If indeterminate, allow open() to fall
# back to the platform default.
_, encoding = locale.getlocale()
if self._qemu_log_path is not None:
with open(self._qemu_log_path, "r", encoding=encoding) as iolog:
self._iolog = iolog.read()
@property
def _base_args(self) -> List[str]:
args = ['-display', 'none', '-vga', 'none']
if self._qmp_set:
if self._sock_pair:
fd = self._sock_pair[0].fileno()
os.set_inheritable(fd, True)
moncdev = f"socket,id=mon,fd={fd}"
elif isinstance(self._monitor_address, tuple):
moncdev = "socket,id=mon,host={},port={}".format(
*self._monitor_address
)
else:
moncdev = f"socket,id=mon,path={self._monitor_address}"
args.extend(['-chardev', moncdev, '-mon',
'chardev=mon,mode=control'])
if self._machine is not None:
args.extend(['-machine', self._machine])
for _ in range(self._console_index):
args.extend(['-serial', 'null'])
if self._console_set:
chardev = ('socket,id=console,path=%s,server=on,wait=off' %
self._console_address)
args.extend(['-chardev', chardev])
if self._console_device_type is None:
args.extend(['-serial', 'chardev:console'])
else:
device = '%s,chardev=console' % self._console_device_type
args.extend(['-device', device])
return args
@property
def args(self) -> List[str]:
"""Returns the list of arguments given to the QEMU binary."""
return self._args
def _pre_launch(self) -> None:
if self._console_set:
self._remove_files.append(self._console_address)
if self._qmp_set:
monitor_address = None
sock = None
if self._monitor_address is None:
self._sock_pair = socket.socketpair()
sock = self._sock_pair[1]
if isinstance(self._monitor_address, str):
self._remove_files.append(self._monitor_address)
monitor_address = self._monitor_address
self._qmp_connection = QEMUMonitorProtocol(
address=monitor_address,
sock=sock,
server=True,
nickname=self._name
)
# NOTE: Make sure any opened resources are *definitely* freed in
# _post_shutdown()!
# pylint: disable=consider-using-with
self._qemu_log_path = os.path.join(self.log_dir, self._name + ".log")
self._qemu_log_file = open(self._qemu_log_path, 'wb')
self._iolog = None
self._qemu_full_args = tuple(chain(
self._wrapper,
[self._binary],
self._base_args,
self._args
))
def _post_launch(self) -> None:
if self._sock_pair:
self._sock_pair[0].close()
if self._qmp_connection:
self._qmp.accept(self._qmp_timer)
def _close_qemu_log_file(self) -> None:
if self._qemu_log_file is not None:
self._qemu_log_file.close()
self._qemu_log_file = None
def _post_shutdown(self) -> None:
"""
Called to cleanup the VM instance after the process has exited.
May also be called after a failed launch.
"""
LOG.debug("Cleaning up after VM process")
try:
self._close_qmp_connection()
except Exception as err: # pylint: disable=broad-except
LOG.warning(
"Exception closing QMP connection: %s",
str(err) if str(err) else type(err).__name__
)
finally:
assert self._qmp_connection is None
self._close_qemu_log_file()
self._load_io_log()
self._qemu_log_path = None
if self._temp_dir is not None:
shutil.rmtree(self._temp_dir)
self._temp_dir = None
while len(self._remove_files) > 0:
self._remove_if_exists(self._remove_files.pop())
exitcode = self.exitcode()
if (exitcode is not None and exitcode < 0
and not (self._user_killed and exitcode == -signal.SIGKILL)):
msg = 'qemu received signal %i; command: "%s"'
if self._qemu_full_args:
command = ' '.join(self._qemu_full_args)
else:
command = ''
LOG.warning(msg, -int(exitcode), command)
self._quit_issued = False
self._user_killed = False
self._launched = False
def launch(self) -> None:
"""
Launch the VM and make sure we cleanup and expose the
command line/output in case of exception
"""
if self._launched:
raise QEMUMachineError('VM already launched')
try:
self._launch()
except BaseException as exc:
# We may have launched the process but it may
# have exited before we could connect via QMP.
# Assume the VM didn't launch or is exiting.
# If we don't wait for the process, exitcode() may still be
# 'None' by the time control is ceded back to the caller.
if self._launched:
self.wait()
else:
self._post_shutdown()
if isinstance(exc, Exception):
raise VMLaunchFailure(
exitcode=self.exitcode(),
command=' '.join(self._qemu_full_args),
output=self._iolog
) from exc
# Don't wrap 'BaseException'; doing so would downgrade
# that exception. However, we still want to clean up.
raise
def _launch(self) -> None:
"""
Launch the VM and establish a QMP connection
"""
self._pre_launch()
LOG.debug('VM launch command: %r', ' '.join(self._qemu_full_args))
# Cleaning up of this subprocess is guaranteed by _do_shutdown.
# pylint: disable=consider-using-with
self._popen = subprocess.Popen(self._qemu_full_args,
stdin=subprocess.DEVNULL,
stdout=self._qemu_log_file,
stderr=subprocess.STDOUT,
shell=False,
close_fds=False)
self._launched = True
self._post_launch()
def _close_qmp_connection(self) -> None:
"""
Close the underlying QMP connection, if any.
Dutifully report errors that occurred while closing, but assume
that any error encountered indicates an abnormal termination
process and not a failure to close.
"""
if self._qmp_connection is None:
return
try:
self._qmp.close()
except EOFError:
# EOF can occur as an Exception here when using the Async
# QMP backend. It indicates that the server closed the
# stream. If we successfully issued 'quit' at any point,
# then this was expected. If the remote went away without
# our permission, it's worth reporting that as an abnormal
# shutdown case.
if not (self._user_killed or self._quit_issued):
raise
finally:
self._qmp_connection = None
def _early_cleanup(self) -> None:
"""
Perform any cleanup that needs to happen before the VM exits.
This method may be called twice upon shutdown, once each by soft
and hard shutdown in failover scenarios.
"""
# If we keep the console socket open, we may deadlock waiting
# for QEMU to exit, while QEMU is waiting for the socket to
# become writable.
if self._console_socket is not None:
LOG.debug("Closing console socket")
self._console_socket.close()
self._console_socket = None
def _hard_shutdown(self) -> None:
"""
Perform early cleanup, kill the VM, and wait for it to terminate.
:raise subprocess.Timeout: When timeout is exceeds 60 seconds
waiting for the QEMU process to terminate.
"""
LOG.debug("Performing hard shutdown")
self._early_cleanup()
self._subp.kill()
self._subp.wait(timeout=60)
def _soft_shutdown(self, timeout: Optional[int]) -> None:
"""
Perform early cleanup, attempt to gracefully shut down the VM, and wait
for it to terminate.
:param timeout: Timeout in seconds for graceful shutdown.
A value of None is an infinite wait.
:raise ConnectionReset: On QMP communication errors
:raise subprocess.TimeoutExpired: When timeout is exceeded waiting for
the QEMU process to terminate.
"""
LOG.debug("Attempting graceful termination")
self._early_cleanup()
if self._quit_issued:
LOG.debug(
"Anticipating QEMU termination due to prior 'quit' command, "
"or explicit call to wait()"
)
else:
LOG.debug("Politely asking QEMU to terminate")
if self._qmp_connection:
try:
if not self._quit_issued:
# May raise ExecInterruptedError or StateError if the
# connection dies or has *already* died.
self.qmp('quit')
finally:
# Regardless, we want to quiesce the connection.
self._close_qmp_connection()
elif not self._quit_issued:
LOG.debug(
"Not anticipating QEMU quit and no QMP connection present, "
"issuing SIGTERM"
)
self._subp.terminate()
# May raise subprocess.TimeoutExpired
LOG.debug(
"Waiting (timeout=%s) for QEMU process (pid=%s) to terminate",
timeout, self._subp.pid
)
self._subp.wait(timeout=timeout)
def _do_shutdown(self, timeout: Optional[int]) -> None:
"""
Attempt to shutdown the VM gracefully; fallback to a hard shutdown.
:param timeout: Timeout in seconds for graceful shutdown.
A value of None is an infinite wait.
:raise AbnormalShutdown: When the VM could not be shut down gracefully.
The inner exception will likely be ConnectionReset or
subprocess.TimeoutExpired. In rare cases, non-graceful termination
may result in its own exceptions, likely subprocess.TimeoutExpired.
"""
try:
self._soft_shutdown(timeout)
except Exception as exc:
if isinstance(exc, subprocess.TimeoutExpired):
LOG.debug("Timed out waiting for QEMU process to exit")
LOG.debug("Graceful shutdown failed", exc_info=True)
LOG.debug("Falling back to hard shutdown")
self._hard_shutdown()
raise AbnormalShutdown("Could not perform graceful shutdown") \
from exc
def shutdown(self,
hard: bool = False,
timeout: Optional[int] = 30) -> None:
"""
Terminate the VM (gracefully if possible) and perform cleanup.
Cleanup will always be performed.
If the VM has not yet been launched, or shutdown(), wait(), or kill()
have already been called, this method does nothing.
:param hard: When true, do not attempt graceful shutdown, and
suppress the SIGKILL warning log message.
:param timeout: Optional timeout in seconds for graceful shutdown.
Default 30 seconds, A `None` value is an infinite wait.
"""
if not self._launched:
return
LOG.debug("Shutting down VM appliance; timeout=%s", timeout)
if hard:
LOG.debug("Caller requests immediate termination of QEMU process.")
try:
if hard:
self._user_killed = True
self._hard_shutdown()
else:
self._do_shutdown(timeout)
finally:
self._post_shutdown()
def kill(self) -> None:
"""
Terminate the VM forcefully, wait for it to exit, and perform cleanup.
"""
self.shutdown(hard=True)
def wait(self, timeout: Optional[int] = 30) -> None:
"""
Wait for the VM to power off and perform post-shutdown cleanup.
:param timeout: Optional timeout in seconds. Default 30 seconds.
A value of `None` is an infinite wait.
"""
self._quit_issued = True
self.shutdown(timeout=timeout)
def set_qmp_monitor(self, enabled: bool = True) -> None:
"""
Set the QMP monitor.
@param enabled: if False, qmp monitor options will be removed from
the base arguments of the resulting QEMU command
line. Default is True.
.. note:: Call this function before launch().
"""
self._qmp_set = enabled
@property
def _qmp(self) -> QEMUMonitorProtocol:
if self._qmp_connection is None:
raise QEMUMachineError("Attempt to access QMP with no connection")
return self._qmp_connection
@classmethod
def _qmp_args(cls, conv_keys: bool,
args: Dict[str, Any]) -> Dict[str, object]:
if conv_keys:
return {k.replace('_', '-'): v for k, v in args.items()}
return args
def qmp(self, cmd: str,
args_dict: Optional[Dict[str, object]] = None,
conv_keys: Optional[bool] = None,
**args: Any) -> QMPMessage:
"""
Invoke a QMP command and return the response dict
"""
if args_dict is not None:
assert not args
assert conv_keys is None
args = args_dict
conv_keys = False
if conv_keys is None:
conv_keys = True
qmp_args = self._qmp_args(conv_keys, args)
ret = self._qmp.cmd(cmd, args=qmp_args)
if cmd == 'quit' and 'error' not in ret and 'return' in ret:
self._quit_issued = True
return ret
def command(self, cmd: str,
conv_keys: bool = True,
**args: Any) -> QMPReturnValue:
"""
Invoke a QMP command.
On success return the response dict.
On failure raise an exception.
"""
qmp_args = self._qmp_args(conv_keys, args)
ret = self._qmp.command(cmd, **qmp_args)
if cmd == 'quit':
self._quit_issued = True
return ret
def get_qmp_event(self, wait: bool = False) -> Optional[QMPMessage]:
"""
Poll for one queued QMP events and return it
"""
if self._events:
return self._events.pop(0)
return self._qmp.pull_event(wait=wait)
def get_qmp_events(self, wait: bool = False) -> List[QMPMessage]:
"""
Poll for queued QMP events and return a list of dicts
"""
events = self._qmp.get_events(wait=wait)
events.extend(self._events)
del self._events[:]
return events
@staticmethod
def event_match(event: Any, match: Optional[Any]) -> bool:
"""
Check if an event matches optional match criteria.
The match criteria takes the form of a matching subdict. The event is
checked to be a superset of the subdict, recursively, with matching
values whenever the subdict values are not None.
This has a limitation that you cannot explicitly check for None values.
Examples, with the subdict queries on the left:
- None matches any object.
- {"foo": None} matches {"foo": {"bar": 1}}
- {"foo": None} matches {"foo": 5}
- {"foo": {"abc": None}} does not match {"foo": {"bar": 1}}
- {"foo": {"rab": 2}} matches {"foo": {"bar": 1, "rab": 2}}
"""
if match is None:
return True
try:
for key in match:
if key in event:
if not QEMUMachine.event_match(event[key], match[key]):
return False
else:
return False
return True
except TypeError:
# either match or event wasn't iterable (not a dict)
return bool(match == event)
def event_wait(self, name: str,
timeout: float = 60.0,
match: Optional[QMPMessage] = None) -> Optional[QMPMessage]:
"""
event_wait waits for and returns a named event from QMP with a timeout.
name: The event to wait for.
timeout: QEMUMonitorProtocol.pull_event timeout parameter.
match: Optional match criteria. See event_match for details.
"""
return self.events_wait([(name, match)], timeout)
def events_wait(self,
events: Sequence[Tuple[str, Any]],
timeout: float = 60.0) -> Optional[QMPMessage]:
"""
events_wait waits for and returns a single named event from QMP.
In the case of multiple qualifying events, this function returns the
first one.
:param events: A sequence of (name, match_criteria) tuples.
The match criteria are optional and may be None.
See event_match for details.
:param timeout: Optional timeout, in seconds.
See QEMUMonitorProtocol.pull_event.
:raise asyncio.TimeoutError:
If timeout was non-zero and no matching events were found.
:return: A QMP event matching the filter criteria.
If timeout was 0 and no event matched, None.
"""
def _match(event: QMPMessage) -> bool:
for name, match in events:
if event['event'] == name and self.event_match(event, match):
return True
return False
event: Optional[QMPMessage]
# Search cached events
for event in self._events:
if _match(event):
self._events.remove(event)
return event
# Poll for new events
while True:
event = self._qmp.pull_event(wait=timeout)
if event is None:
# NB: None is only returned when timeout is false-ish.
# Timeouts raise asyncio.TimeoutError instead!
break
if _match(event):
return event
self._events.append(event)
return None
def get_log(self) -> Optional[str]:
"""
After self.shutdown or failed qemu execution, this returns the output
of the qemu process.
"""
return self._iolog
def add_args(self, *args: str) -> None:
"""
Adds to the list of extra arguments to be given to the QEMU binary
"""
self._args.extend(args)
def set_machine(self, machine_type: str) -> None:
"""
Sets the machine type
If set, the machine type will be added to the base arguments
of the resulting QEMU command line.
"""
self._machine = machine_type
def set_console(self,
device_type: Optional[str] = None,
console_index: int = 0) -> None:
"""
Sets the device type for a console device
If set, the console device and a backing character device will
be added to the base arguments of the resulting QEMU command
line.
This is a convenience method that will either use the provided
device type, or default to a "-serial chardev:console" command
line argument.
The actual setting of command line arguments will be be done at
machine launch time, as it depends on the temporary directory
to be created.
@param device_type: the device type, such as "isa-serial". If
None is given (the default value) a "-serial
chardev:console" command line argument will
be used instead, resorting to the machine's
default device type.
@param console_index: the index of the console device to use.
If not zero, the command line will create
'index - 1' consoles and connect them to
the 'null' backing character device.
"""
self._console_set = True
self._console_device_type = device_type
self._console_index = console_index
@property
def console_socket(self) -> socket.socket:
"""
Returns a socket connected to the console
"""
if self._console_socket is None:
self._console_socket = console_socket.ConsoleSocket(
self._console_address,
file=self._console_log_path,
drain=self._drain_console)
return self._console_socket
@property
def temp_dir(self) -> str:
"""
Returns a temporary directory to be used for this machine
"""
if self._temp_dir is None:
self._temp_dir = tempfile.mkdtemp(prefix="qemu-machine-",
dir=self._base_temp_dir)
return self._temp_dir
@property
def sock_dir(self) -> str:
"""
Returns the directory used for sockfiles by this machine.
"""
if self._sock_dir:
return self._sock_dir
return self.temp_dir
@property
def log_dir(self) -> str:
"""
Returns a directory to be used for writing logs
"""
if self._log_dir is None:
return self.temp_dir
return self._log_dir
| 31,109 | 33.29989 | 79 | py |
qemu | qemu-master/python/qemu/machine/qtest.py | """
QEMU qtest library
qtest offers the QEMUQtestProtocol and QEMUQTestMachine classes, which
offer a connection to QEMU's qtest protocol socket, and a qtest-enabled
subclass of QEMUMachine, respectively.
"""
# Copyright (C) 2015 Red Hat Inc.
#
# Authors:
# Fam Zheng <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# Based on qmp.py.
#
import os
import socket
from typing import (
List,
Optional,
Sequence,
TextIO,
)
from qemu.qmp import SocketAddrT
from .machine import QEMUMachine
class QEMUQtestProtocol:
"""
QEMUQtestProtocol implements a connection to a qtest socket.
:param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
:param server: server mode, listens on the socket (bool)
:raise socket.error: on socket connection errors
.. note::
No connection is established by __init__(), this is done
by the connect() or accept() methods.
"""
def __init__(self, address: SocketAddrT,
server: bool = False):
self._address = address
self._sock = self._get_sock()
self._sockfile: Optional[TextIO] = None
if server:
self._sock.bind(self._address)
self._sock.listen(1)
def _get_sock(self) -> socket.socket:
if isinstance(self._address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def connect(self) -> None:
"""
Connect to the qtest socket.
@raise socket.error on socket connection errors
"""
self._sock.connect(self._address)
self._sockfile = self._sock.makefile(mode='r')
def accept(self) -> None:
"""
Await connection from QEMU.
@raise socket.error on socket connection errors
"""
self._sock, _ = self._sock.accept()
self._sockfile = self._sock.makefile(mode='r')
def cmd(self, qtest_cmd: str) -> str:
"""
Send a qtest command on the wire.
@param qtest_cmd: qtest command text to be sent
"""
assert self._sockfile is not None
self._sock.sendall((qtest_cmd + "\n").encode('utf-8'))
resp = self._sockfile.readline()
return resp
def close(self) -> None:
"""
Close this socket.
"""
self._sock.close()
if self._sockfile:
self._sockfile.close()
self._sockfile = None
def settimeout(self, timeout: Optional[float]) -> None:
"""Set a timeout, in seconds."""
self._sock.settimeout(timeout)
class QEMUQtestMachine(QEMUMachine):
"""
A QEMU VM, with a qtest socket available.
"""
def __init__(self,
binary: str,
args: Sequence[str] = (),
wrapper: Sequence[str] = (),
name: Optional[str] = None,
base_temp_dir: str = "/var/tmp",
sock_dir: Optional[str] = None,
qmp_timer: Optional[float] = None):
# pylint: disable=too-many-arguments
if name is None:
name = "qemu-%d" % os.getpid()
if sock_dir is None:
sock_dir = base_temp_dir
super().__init__(binary, args, wrapper=wrapper, name=name,
base_temp_dir=base_temp_dir,
sock_dir=sock_dir, qmp_timer=qmp_timer)
self._qtest: Optional[QEMUQtestProtocol] = None
self._qtest_path = os.path.join(sock_dir, name + "-qtest.sock")
@property
def _base_args(self) -> List[str]:
args = super()._base_args
args.extend([
'-qtest', f"unix:path={self._qtest_path}",
'-accel', 'qtest'
])
return args
def _pre_launch(self) -> None:
super()._pre_launch()
self._qtest = QEMUQtestProtocol(self._qtest_path, server=True)
def _post_launch(self) -> None:
assert self._qtest is not None
super()._post_launch()
self._qtest.accept()
def _post_shutdown(self) -> None:
super()._post_shutdown()
self._remove_if_exists(self._qtest_path)
def qtest(self, cmd: str) -> str:
"""
Send a qtest command to the guest.
:param cmd: qtest command to send
:return: qtest server response
"""
if self._qtest is None:
raise RuntimeError("qtest socket not available")
return self._qtest.cmd(cmd)
| 4,696 | 27.640244 | 75 | py |
qemu | qemu-master/python/qemu/machine/console_socket.py | """
QEMU Console Socket Module:
This python module implements a ConsoleSocket object,
which can drain a socket and optionally dump the bytes to file.
"""
# Copyright 2020 Linaro
#
# Authors:
# Robert Foley <[email protected]>
#
# This code is licensed under the GPL version 2 or later. See
# the COPYING file in the top-level directory.
#
from collections import deque
import socket
import threading
import time
from typing import Deque, Optional
class ConsoleSocket(socket.socket):
"""
ConsoleSocket represents a socket attached to a char device.
Optionally (if drain==True), drains the socket and places the bytes
into an in memory buffer for later processing.
Optionally a file path can be passed in and we will also
dump the characters to this file for debugging purposes.
"""
def __init__(self, address: str, file: Optional[str] = None,
drain: bool = False):
self._recv_timeout_sec = 300.0
self._sleep_time = 0.5
self._buffer: Deque[int] = deque()
socket.socket.__init__(self, socket.AF_UNIX, socket.SOCK_STREAM)
self.connect(address)
self._logfile = None
if file:
# pylint: disable=consider-using-with
self._logfile = open(file, "bw")
self._open = True
self._drain_thread = None
if drain:
self._drain_thread = self._thread_start()
def __repr__(self) -> str:
tmp = super().__repr__()
tmp = tmp.rstrip(">")
tmp = "%s, logfile=%s, drain_thread=%s>" % (tmp, self._logfile,
self._drain_thread)
return tmp
def _drain_fn(self) -> None:
"""Drains the socket and runs while the socket is open."""
while self._open:
try:
self._drain_socket()
except socket.timeout:
# The socket is expected to timeout since we set a
# short timeout to allow the thread to exit when
# self._open is set to False.
time.sleep(self._sleep_time)
def _thread_start(self) -> threading.Thread:
"""Kick off a thread to drain the socket."""
# Configure socket to not block and timeout.
# This allows our drain thread to not block
# on receive and exit smoothly.
socket.socket.setblocking(self, False)
socket.socket.settimeout(self, 1)
drain_thread = threading.Thread(target=self._drain_fn)
drain_thread.daemon = True
drain_thread.start()
return drain_thread
def close(self) -> None:
"""Close the base object and wait for the thread to terminate"""
if self._open:
self._open = False
if self._drain_thread is not None:
thread, self._drain_thread = self._drain_thread, None
thread.join()
socket.socket.close(self)
if self._logfile:
self._logfile.close()
self._logfile = None
def _drain_socket(self) -> None:
"""process arriving characters into in memory _buffer"""
data = socket.socket.recv(self, 1)
if self._logfile:
self._logfile.write(data)
self._logfile.flush()
self._buffer.extend(data)
def recv(self, bufsize: int = 1, flags: int = 0) -> bytes:
"""Return chars from in memory buffer.
Maintains the same API as socket.socket.recv.
"""
if self._drain_thread is None:
# Not buffering the socket, pass thru to socket.
return socket.socket.recv(self, bufsize, flags)
assert not flags, "Cannot pass flags to recv() in drained mode"
start_time = time.time()
while len(self._buffer) < bufsize:
time.sleep(self._sleep_time)
elapsed_sec = time.time() - start_time
if elapsed_sec > self._recv_timeout_sec:
raise socket.timeout
return bytes((self._buffer.popleft() for i in range(bufsize)))
def setblocking(self, value: bool) -> None:
"""When not draining we pass thru to the socket,
since when draining we control socket blocking.
"""
if self._drain_thread is None:
socket.socket.setblocking(self, value)
def settimeout(self, value: Optional[float]) -> None:
"""When not draining we pass thru to the socket,
since when draining we control the timeout.
"""
if value is not None:
self._recv_timeout_sec = value
if self._drain_thread is None:
socket.socket.settimeout(self, value)
| 4,685 | 35.046154 | 72 | py |
qemu | qemu-master/python/qemu/machine/__init__.py | """
QEMU development and testing library.
This library provides a few high-level classes for driving QEMU from a
test suite, not intended for production use.
| QEMUQtestProtocol: send/receive qtest messages.
| QEMUMachine: Configure and Boot a QEMU VM
| +-- QEMUQtestMachine: VM class, with a qtest socket.
"""
# Copyright (C) 2020-2021 John Snow for Red Hat Inc.
# Copyright (C) 2015-2016 Red Hat Inc.
# Copyright (C) 2012 IBM Corp.
#
# Authors:
# John Snow <[email protected]>
# Fam Zheng <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
#
# pylint: disable=import-error
# see: https://github.com/PyCQA/pylint/issues/3624
# see: https://github.com/PyCQA/pylint/issues/3651
from .machine import QEMUMachine
from .qtest import QEMUQtestMachine, QEMUQtestProtocol
__all__ = (
'QEMUMachine',
'QEMUQtestProtocol',
'QEMUQtestMachine',
)
| 945 | 24.567568 | 71 | py |
qemu | qemu-master/python/tests/protocol.py | import asyncio
from contextlib import contextmanager
import os
import socket
from tempfile import TemporaryDirectory
import avocado
from qemu.qmp import ConnectError, Runstate
from qemu.qmp.protocol import AsyncProtocol, StateError
from qemu.qmp.util import asyncio_run, create_task
class NullProtocol(AsyncProtocol[None]):
"""
NullProtocol is a test mockup of an AsyncProtocol implementation.
It adds a fake_session instance variable that enables a code path
that bypasses the actual connection logic, but still allows the
reader/writers to start.
Because the message type is defined as None, an asyncio.Event named
'trigger_input' is created that prohibits the reader from
incessantly being able to yield None; this event can be poked to
simulate an incoming message.
For testing symmetry with do_recv, an interface is added to "send" a
Null message.
For testing purposes, a "simulate_disconnection" method is also
added which allows us to trigger a bottom half disconnect without
injecting any real errors into the reader/writer loops; in essence
it performs exactly half of what disconnect() normally does.
"""
def __init__(self, name=None):
self.fake_session = False
self.trigger_input: asyncio.Event
super().__init__(name)
async def _establish_session(self):
self.trigger_input = asyncio.Event()
await super()._establish_session()
async def _do_start_server(self, address, ssl=None):
if self.fake_session:
self._accepted = asyncio.Event()
self._set_state(Runstate.CONNECTING)
await asyncio.sleep(0)
else:
await super()._do_start_server(address, ssl)
async def _do_accept(self):
if self.fake_session:
self._accepted = None
else:
await super()._do_accept()
async def _do_connect(self, address, ssl=None):
if self.fake_session:
self._set_state(Runstate.CONNECTING)
await asyncio.sleep(0)
else:
await super()._do_connect(address, ssl)
async def _do_recv(self) -> None:
await self.trigger_input.wait()
self.trigger_input.clear()
def _do_send(self, msg: None) -> None:
pass
async def send_msg(self) -> None:
await self._outgoing.put(None)
async def simulate_disconnect(self) -> None:
"""
Simulates a bottom-half disconnect.
This method schedules a disconnection but does not wait for it
to complete. This is used to put the loop into the DISCONNECTING
state without fully quiescing it back to IDLE. This is normally
something you cannot coax AsyncProtocol to do on purpose, but it
will be similar to what happens with an unhandled Exception in
the reader/writer.
Under normal circumstances, the library design requires you to
await on disconnect(), which awaits the disconnect task and
returns bottom half errors as a pre-condition to allowing the
loop to return back to IDLE.
"""
self._schedule_disconnect()
class LineProtocol(AsyncProtocol[str]):
def __init__(self, name=None):
super().__init__(name)
self.rx_history = []
async def _do_recv(self) -> str:
raw = await self._readline()
msg = raw.decode()
self.rx_history.append(msg)
return msg
def _do_send(self, msg: str) -> None:
assert self._writer is not None
self._writer.write(msg.encode() + b'\n')
async def send_msg(self, msg: str) -> None:
await self._outgoing.put(msg)
def run_as_task(coro, allow_cancellation=False):
"""
Run a given coroutine as a task.
Optionally, wrap it in a try..except block that allows this
coroutine to be canceled gracefully.
"""
async def _runner():
try:
await coro
except asyncio.CancelledError:
if allow_cancellation:
return
raise
return create_task(_runner())
@contextmanager
def jammed_socket():
"""
Opens up a random unused TCP port on localhost, then jams it.
"""
socks = []
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('127.0.0.1', 0))
sock.listen(1)
address = sock.getsockname()
socks.append(sock)
# I don't *fully* understand why, but it takes *two* un-accepted
# connections to start jamming the socket.
for _ in range(2):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(address)
socks.append(sock)
yield address
finally:
for sock in socks:
sock.close()
class Smoke(avocado.Test):
def setUp(self):
self.proto = NullProtocol()
def test__repr__(self):
self.assertEqual(
repr(self.proto),
"<NullProtocol runstate=IDLE>"
)
def testRunstate(self):
self.assertEqual(
self.proto.runstate,
Runstate.IDLE
)
def testDefaultName(self):
self.assertEqual(
self.proto.name,
None
)
def testLogger(self):
self.assertEqual(
self.proto.logger.name,
'qemu.qmp.protocol'
)
def testName(self):
self.proto = NullProtocol('Steve')
self.assertEqual(
self.proto.name,
'Steve'
)
self.assertEqual(
self.proto.logger.name,
'qemu.qmp.protocol.Steve'
)
self.assertEqual(
repr(self.proto),
"<NullProtocol name='Steve' runstate=IDLE>"
)
class TestBase(avocado.Test):
def setUp(self):
self.proto = NullProtocol(type(self).__name__)
self.assertEqual(self.proto.runstate, Runstate.IDLE)
self.runstate_watcher = None
def tearDown(self):
self.assertEqual(self.proto.runstate, Runstate.IDLE)
async def _asyncSetUp(self):
pass
async def _asyncTearDown(self):
if self.runstate_watcher:
await self.runstate_watcher
@staticmethod
def async_test(async_test_method):
"""
Decorator; adds SetUp and TearDown to async tests.
"""
async def _wrapper(self, *args, **kwargs):
loop = asyncio.get_event_loop()
loop.set_debug(True)
await self._asyncSetUp()
await async_test_method(self, *args, **kwargs)
await self._asyncTearDown()
return _wrapper
# Definitions
# The states we expect a "bad" connect/accept attempt to transition through
BAD_CONNECTION_STATES = (
Runstate.CONNECTING,
Runstate.DISCONNECTING,
Runstate.IDLE,
)
# The states we expect a "good" session to transition through
GOOD_CONNECTION_STATES = (
Runstate.CONNECTING,
Runstate.RUNNING,
Runstate.DISCONNECTING,
Runstate.IDLE,
)
# Helpers
async def _watch_runstates(self, *states):
"""
This launches a task alongside (most) tests below to confirm that
the sequence of runstate changes that occur is exactly as
anticipated.
"""
async def _watcher():
for state in states:
new_state = await self.proto.runstate_changed()
self.assertEqual(
new_state,
state,
msg=f"Expected state '{state.name}'",
)
self.runstate_watcher = create_task(_watcher())
# Kick the loop and force the task to block on the event.
await asyncio.sleep(0)
class State(TestBase):
@TestBase.async_test
async def testSuperfluousDisconnect(self):
"""
Test calling disconnect() while already disconnected.
"""
await self._watch_runstates(
Runstate.DISCONNECTING,
Runstate.IDLE,
)
await self.proto.disconnect()
class Connect(TestBase):
"""
Tests primarily related to calling Connect().
"""
async def _bad_connection(self, family: str):
assert family in ('INET', 'UNIX')
if family == 'INET':
await self.proto.connect(('127.0.0.1', 0))
elif family == 'UNIX':
await self.proto.connect('/dev/null')
async def _hanging_connection(self):
with jammed_socket() as addr:
await self.proto.connect(addr)
async def _bad_connection_test(self, family: str):
await self._watch_runstates(*self.BAD_CONNECTION_STATES)
with self.assertRaises(ConnectError) as context:
await self._bad_connection(family)
self.assertIsInstance(context.exception.exc, OSError)
self.assertEqual(
context.exception.error_message,
"Failed to establish connection"
)
@TestBase.async_test
async def testBadINET(self):
"""
Test an immediately rejected call to an IP target.
"""
await self._bad_connection_test('INET')
@TestBase.async_test
async def testBadUNIX(self):
"""
Test an immediately rejected call to a UNIX socket target.
"""
await self._bad_connection_test('UNIX')
@TestBase.async_test
async def testCancellation(self):
"""
Test what happens when a connection attempt is aborted.
"""
# Note that accept() cannot be cancelled outright, as it isn't a task.
# However, we can wrap it in a task and cancel *that*.
await self._watch_runstates(*self.BAD_CONNECTION_STATES)
task = run_as_task(self._hanging_connection(), allow_cancellation=True)
state = await self.proto.runstate_changed()
self.assertEqual(state, Runstate.CONNECTING)
# This is insider baseball, but the connection attempt has
# yielded *just* before the actual connection attempt, so kick
# the loop to make sure it's truly wedged.
await asyncio.sleep(0)
task.cancel()
await task
@TestBase.async_test
async def testTimeout(self):
"""
Test what happens when a connection attempt times out.
"""
await self._watch_runstates(*self.BAD_CONNECTION_STATES)
task = run_as_task(self._hanging_connection())
# More insider baseball: to improve the speed of this test while
# guaranteeing that the connection even gets a chance to start,
# verify that the connection hangs *first*, then await the
# result of the task with a nearly-zero timeout.
state = await self.proto.runstate_changed()
self.assertEqual(state, Runstate.CONNECTING)
await asyncio.sleep(0)
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(task, timeout=0)
@TestBase.async_test
async def testRequire(self):
"""
Test what happens when a connection attempt is made while CONNECTING.
"""
await self._watch_runstates(*self.BAD_CONNECTION_STATES)
task = run_as_task(self._hanging_connection(), allow_cancellation=True)
state = await self.proto.runstate_changed()
self.assertEqual(state, Runstate.CONNECTING)
with self.assertRaises(StateError) as context:
await self._bad_connection('UNIX')
self.assertEqual(
context.exception.error_message,
"NullProtocol is currently connecting."
)
self.assertEqual(context.exception.state, Runstate.CONNECTING)
self.assertEqual(context.exception.required, Runstate.IDLE)
task.cancel()
await task
@TestBase.async_test
async def testImplicitRunstateInit(self):
"""
Test what happens if we do not wait on the runstate event until
AFTER a connection is made, i.e., connect()/accept() themselves
initialize the runstate event. All of the above tests force the
initialization by waiting on the runstate *first*.
"""
task = run_as_task(self._hanging_connection(), allow_cancellation=True)
# Kick the loop to coerce the state change
await asyncio.sleep(0)
assert self.proto.runstate == Runstate.CONNECTING
# We already missed the transition to CONNECTING
await self._watch_runstates(Runstate.DISCONNECTING, Runstate.IDLE)
task.cancel()
await task
class Accept(Connect):
"""
All of the same tests as Connect, but using the accept() interface.
"""
async def _bad_connection(self, family: str):
assert family in ('INET', 'UNIX')
if family == 'INET':
await self.proto.start_server_and_accept(('example.com', 1))
elif family == 'UNIX':
await self.proto.start_server_and_accept('/dev/null')
async def _hanging_connection(self):
with TemporaryDirectory(suffix='.qmp') as tmpdir:
sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock")
await self.proto.start_server_and_accept(sock)
class FakeSession(TestBase):
def setUp(self):
super().setUp()
self.proto.fake_session = True
async def _asyncSetUp(self):
await super()._asyncSetUp()
await self._watch_runstates(*self.GOOD_CONNECTION_STATES)
async def _asyncTearDown(self):
await self.proto.disconnect()
await super()._asyncTearDown()
####
@TestBase.async_test
async def testFakeConnect(self):
"""Test the full state lifecycle (via connect) with a no-op session."""
await self.proto.connect('/not/a/real/path')
self.assertEqual(self.proto.runstate, Runstate.RUNNING)
@TestBase.async_test
async def testFakeAccept(self):
"""Test the full state lifecycle (via accept) with a no-op session."""
await self.proto.start_server_and_accept('/not/a/real/path')
self.assertEqual(self.proto.runstate, Runstate.RUNNING)
@TestBase.async_test
async def testFakeRecv(self):
"""Test receiving a fake/null message."""
await self.proto.start_server_and_accept('/not/a/real/path')
logname = self.proto.logger.name
with self.assertLogs(logname, level='DEBUG') as context:
self.proto.trigger_input.set()
self.proto.trigger_input.clear()
await asyncio.sleep(0) # Kick reader.
self.assertEqual(
context.output,
[f"DEBUG:{logname}:<-- None"],
)
@TestBase.async_test
async def testFakeSend(self):
"""Test sending a fake/null message."""
await self.proto.start_server_and_accept('/not/a/real/path')
logname = self.proto.logger.name
with self.assertLogs(logname, level='DEBUG') as context:
# Cheat: Send a Null message to nobody.
await self.proto.send_msg()
# Kick writer; awaiting on a queue.put isn't sufficient to yield.
await asyncio.sleep(0)
self.assertEqual(
context.output,
[f"DEBUG:{logname}:--> None"],
)
async def _prod_session_api(
self,
current_state: Runstate,
error_message: str,
accept: bool = True
):
with self.assertRaises(StateError) as context:
if accept:
await self.proto.start_server_and_accept('/not/a/real/path')
else:
await self.proto.connect('/not/a/real/path')
self.assertEqual(context.exception.error_message, error_message)
self.assertEqual(context.exception.state, current_state)
self.assertEqual(context.exception.required, Runstate.IDLE)
@TestBase.async_test
async def testAcceptRequireRunning(self):
"""Test that accept() cannot be called when Runstate=RUNNING"""
await self.proto.start_server_and_accept('/not/a/real/path')
await self._prod_session_api(
Runstate.RUNNING,
"NullProtocol is already connected and running.",
accept=True,
)
@TestBase.async_test
async def testConnectRequireRunning(self):
"""Test that connect() cannot be called when Runstate=RUNNING"""
await self.proto.start_server_and_accept('/not/a/real/path')
await self._prod_session_api(
Runstate.RUNNING,
"NullProtocol is already connected and running.",
accept=False,
)
@TestBase.async_test
async def testAcceptRequireDisconnecting(self):
"""Test that accept() cannot be called when Runstate=DISCONNECTING"""
await self.proto.start_server_and_accept('/not/a/real/path')
# Cheat: force a disconnect.
await self.proto.simulate_disconnect()
await self._prod_session_api(
Runstate.DISCONNECTING,
("NullProtocol is disconnecting."
" Call disconnect() to return to IDLE state."),
accept=True,
)
@TestBase.async_test
async def testConnectRequireDisconnecting(self):
"""Test that connect() cannot be called when Runstate=DISCONNECTING"""
await self.proto.start_server_and_accept('/not/a/real/path')
# Cheat: force a disconnect.
await self.proto.simulate_disconnect()
await self._prod_session_api(
Runstate.DISCONNECTING,
("NullProtocol is disconnecting."
" Call disconnect() to return to IDLE state."),
accept=False,
)
class SimpleSession(TestBase):
def setUp(self):
super().setUp()
self.server = LineProtocol(type(self).__name__ + '-server')
async def _asyncSetUp(self):
await super()._asyncSetUp()
await self._watch_runstates(*self.GOOD_CONNECTION_STATES)
async def _asyncTearDown(self):
await self.proto.disconnect()
try:
await self.server.disconnect()
except EOFError:
pass
await super()._asyncTearDown()
@TestBase.async_test
async def testSmoke(self):
with TemporaryDirectory(suffix='.qmp') as tmpdir:
sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock")
server_task = create_task(self.server.start_server_and_accept(sock))
# give the server a chance to start listening [...]
await asyncio.sleep(0)
await self.proto.connect(sock)
| 18,678 | 30.288107 | 80 | py |
qemu | qemu-master/target/hexagon/gen_analyze_funcs.py | #!/usr/bin/env python3
##
## Copyright(c) 2022-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Helpers for gen_analyze_func
##
def is_predicated(tag):
return 'A_CONDEXEC' in hex_common.attribdict[tag]
def analyze_opn_old(f, tag, regtype, regid, regno):
regN = "%s%sN" % (regtype, regid)
predicated = "true" if is_predicated(tag) else "false"
if (regtype == "R"):
if (regid in {"ss", "tt"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"dd", "ee", "xx", "yy"}):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
f.write(" ctx_log_reg_write_pair(ctx, %s, %s);\n" % \
(regN, predicated))
elif (regid in {"s", "t", "u", "v"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"d", "e", "x", "y"}):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
f.write(" ctx_log_reg_write(ctx, %s, %s);\n" % \
(regN, predicated))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid in {"s", "t", "u", "v"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"d", "e", "x"}):
f.write(" const int %s = insn->regno[%d];\n" % (regN, regno))
f.write(" ctx_log_pred_write(ctx, %s);\n" % (regN))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "C"):
if (regid == "ss"):
f.write("// const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
elif (regid == "dd"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
f.write(" ctx_log_reg_write_pair(ctx, %s, %s);\n" % \
(regN, predicated))
elif (regid == "s"):
f.write("// const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
elif (regid == "d"):
f.write(" const int %s = insn->regno[%d] + HEX_REG_SA0;\n" % \
(regN, regno))
f.write(" ctx_log_reg_write(ctx, %s, %s);\n" % \
(regN, predicated))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "M"):
if (regid == "u"):
f.write("// const int %s = insn->regno[%d];\n"% \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "V"):
newv = "EXT_DFL"
if (hex_common.is_new_result(tag)):
newv = "EXT_NEW"
elif (hex_common.is_tmp_result(tag)):
newv = "EXT_TMP"
if (regid in {"dd", "xx"}):
f.write(" const int %s = insn->regno[%d];\n" %\
(regN, regno))
f.write(" ctx_log_vreg_write_pair(ctx, %s, %s, %s);\n" % \
(regN, newv, predicated))
elif (regid in {"uu", "vv"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"s", "u", "v", "w"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"d", "x", "y"}):
f.write(" const int %s = insn->regno[%d];\n" % \
(regN, regno))
f.write(" ctx_log_vreg_write(ctx, %s, %s, %s);\n" % \
(regN, newv, predicated))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "Q"):
if (regid in {"d", "e", "x"}):
f.write(" const int %s = insn->regno[%d];\n" % \
(regN, regno))
f.write(" ctx_log_qreg_write(ctx, %s);\n" % (regN))
elif (regid in {"s", "t", "u", "v"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "G"):
if (regid in {"dd"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"d"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"ss"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"s"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "S"):
if (regid in {"dd"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"d"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"ss"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
elif (regid in {"s"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def analyze_opn_new(f, tag, regtype, regid, regno):
regN = "%s%sN" % (regtype, regid)
if (regtype == "N"):
if (regid in {"s", "t"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "P"):
if (regid in {"t", "u", "v"}):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
elif (regtype == "O"):
if (regid == "s"):
f.write("// const int %s = insn->regno[%d];\n" % \
(regN, regno))
else:
print("Bad register parse: ", regtype, regid)
else:
print("Bad register parse: ", regtype, regid)
def analyze_opn(f, tag, regtype, regid, toss, numregs, i):
if (hex_common.is_pair(regid)):
analyze_opn_old(f, tag, regtype, regid, i)
elif (hex_common.is_single(regid)):
if hex_common.is_old_val(regtype, regid, tag):
analyze_opn_old(f,tag, regtype, regid, i)
elif hex_common.is_new_val(regtype, regid, tag):
analyze_opn_new(f, tag, regtype, regid, i)
else:
print("Bad register parse: ", regtype, regid, toss, numregs)
else:
print("Bad register parse: ", regtype, regid, toss, numregs)
##
## Generate the code to analyze the instruction
## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
## We produce:
## static void analyze_A2_add(DisasContext *ctx)
## {
## Insn *insn G_GNUC_UNUSED = ctx->insn;
## const int RdN = insn->regno[0];
## ctx_log_reg_write(ctx, RdN, false);
## // const int RsN = insn->regno[1];
## // const int RtN = insn->regno[2];
## }
##
def gen_analyze_func(f, tag, regs, imms):
f.write("static void analyze_%s(DisasContext *ctx)\n" %tag)
f.write('{\n')
f.write(" Insn *insn G_GNUC_UNUSED = ctx->insn;\n")
i=0
## Analyze all the registers
for regtype, regid, toss, numregs in regs:
analyze_opn(f, tag, regtype, regid, toss, numregs, i)
i += 1
has_generated_helper = (not hex_common.skip_qemu_helper(tag) and
not hex_common.is_idef_parser_enabled(tag))
if (has_generated_helper and
'A_SCALAR_LOAD' in hex_common.attribdict[tag]):
f.write(" ctx->need_pkt_has_store_s1 = true;\n")
f.write("}\n\n")
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.read_overrides_file(sys.argv[3])
hex_common.read_overrides_file(sys.argv[4])
## Whether or not idef-parser is enabled is
## determined by the number of arguments to
## this script:
##
## 5 args. -> not enabled,
## 6 args. -> idef-parser enabled.
##
## The 6:th arg. then holds a list of the successfully
## parsed instructions.
is_idef_parser_enabled = len(sys.argv) > 6
if is_idef_parser_enabled:
hex_common.read_idef_parser_enabled_file(sys.argv[5])
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
with open(sys.argv[-1], 'w') as f:
f.write("#ifndef HEXAGON_TCG_FUNCS_H\n")
f.write("#define HEXAGON_TCG_FUNCS_H\n\n")
for tag in hex_common.tags:
gen_analyze_func(f, tag, tagregs[tag], tagimms[tag])
f.write("#endif /* HEXAGON_TCG_FUNCS_H */\n")
if __name__ == "__main__":
main()
| 9,792 | 37.70751 | 80 | py |
qemu | qemu-master/target/hexagon/gen_helper_funcs.py | #!/usr/bin/env python3
##
## Copyright(c) 2019-2023 Qualcomm Innovation Center, Inc. All Rights Reserved.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sys
import re
import string
import hex_common
##
## Helpers for gen_helper_function
##
def gen_decl_ea(f):
f.write(" uint32_t EA;\n")
def gen_helper_return_type(f,regtype,regid,regno):
if regno > 1 : f.write(", ")
f.write("int32_t")
def gen_helper_return_type_pair(f,regtype,regid,regno):
if regno > 1 : f.write(", ")
f.write("int64_t")
def gen_helper_arg(f,regtype,regid,regno):
if regno > 0 : f.write(", " )
f.write("int32_t %s%sV" % (regtype,regid))
def gen_helper_arg_new(f,regtype,regid,regno):
if regno >= 0 : f.write(", " )
f.write("int32_t %s%sN" % (regtype,regid))
def gen_helper_arg_pair(f,regtype,regid,regno):
if regno >= 0 : f.write(", ")
f.write("int64_t %s%sV" % (regtype,regid))
def gen_helper_arg_ext(f,regtype,regid,regno):
if regno > 0 : f.write(", ")
f.write("void *%s%sV_void" % (regtype,regid))
def gen_helper_arg_ext_pair(f,regtype,regid,regno):
if regno > 0 : f.write(", ")
f.write("void *%s%sV_void" % (regtype,regid))
def gen_helper_arg_opn(f,regtype,regid,i,tag):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_arg_ext_pair(f,regtype,regid,i)
else:
gen_helper_arg_pair(f,regtype,regid,i)
elif (hex_common.is_single(regid)):
if hex_common.is_old_val(regtype, regid, tag):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_arg_ext(f,regtype,regid,i)
else:
gen_helper_arg(f,regtype,regid,i)
elif hex_common.is_new_val(regtype, regid, tag):
gen_helper_arg_new(f,regtype,regid,i)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
def gen_helper_arg_imm(f,immlett):
f.write(", int32_t %s" % (hex_common.imm_name(immlett)))
def gen_helper_dest_decl(f,regtype,regid,regno,subfield=""):
f.write(" int32_t %s%sV%s = 0;\n" % \
(regtype,regid,subfield))
def gen_helper_dest_decl_pair(f,regtype,regid,regno,subfield=""):
f.write(" int64_t %s%sV%s = 0;\n" % \
(regtype,regid,subfield))
def gen_helper_dest_decl_ext(f,regtype,regid):
if (regtype == "Q"):
f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \
(regtype,regid,regtype,regid))
else:
f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \
(regtype,regid,regtype,regid))
def gen_helper_dest_decl_ext_pair(f,regtype,regid,regno):
f.write(" /* %s%sV is *(MMVectorPair *))%s%sV_void) */\n" % \
(regtype,regid,regtype, regid))
def gen_helper_dest_decl_opn(f,regtype,regid,i):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_dest_decl_ext_pair(f,regtype,regid, i)
else:
gen_helper_dest_decl_pair(f,regtype,regid,i)
elif (hex_common.is_single(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_dest_decl_ext(f,regtype,regid)
else:
gen_helper_dest_decl(f,regtype,regid,i)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
def gen_helper_src_var_ext(f,regtype,regid):
if (regtype == "Q"):
f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \
(regtype,regid,regtype,regid))
else:
f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \
(regtype,regid,regtype,regid))
def gen_helper_src_var_ext_pair(f,regtype,regid,regno):
f.write(" /* %s%sV%s is *(MMVectorPair *)(%s%sV%s_void) */\n" % \
(regtype,regid,regno,regtype,regid,regno))
def gen_helper_return(f,regtype,regid,regno):
f.write(" return %s%sV;\n" % (regtype,regid))
def gen_helper_return_pair(f,regtype,regid,regno):
f.write(" return %s%sV;\n" % (regtype,regid))
def gen_helper_dst_write_ext(f,regtype,regid):
return
def gen_helper_dst_write_ext_pair(f,regtype,regid):
return
def gen_helper_return_opn(f, regtype, regid, i):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_dst_write_ext_pair(f,regtype,regid)
else:
gen_helper_return_pair(f,regtype,regid,i)
elif (hex_common.is_single(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_dst_write_ext(f,regtype,regid)
else:
gen_helper_return(f,regtype,regid,i)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
##
## Generate the TCG code to call the helper
## For A2_add: Rd32=add(Rs32,Rt32), { RdV=RsV+RtV;}
## We produce:
## int32_t HELPER(A2_add)(CPUHexagonState *env, int32_t RsV, int32_t RtV)
## {
## uint32_t slot __attribute__(unused)) = 4;
## int32_t RdV = 0;
## { RdV=RsV+RtV;}
## COUNT_HELPER(A2_add);
## return RdV;
## }
##
def gen_helper_function(f, tag, tagregs, tagimms):
regs = tagregs[tag]
imms = tagimms[tag]
numresults = 0
numscalarresults = 0
numscalarreadwrite = 0
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
numresults += 1
if (hex_common.is_scalar_reg(regtype)):
numscalarresults += 1
if (hex_common.is_readwrite(regid)):
if (hex_common.is_scalar_reg(regtype)):
numscalarreadwrite += 1
if (numscalarresults > 1):
## The helper is bogus when there is more than one result
f.write("void HELPER(%s)(CPUHexagonState *env) { BOGUS_HELPER(%s); }\n"
% (tag, tag))
else:
## The return type of the function is the type of the destination
## register (if scalar)
i=0
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
continue
else:
gen_helper_return_type_pair(f,regtype,regid,i)
elif (hex_common.is_single(regid)):
if (hex_common.is_hvx_reg(regtype)):
continue
else:
gen_helper_return_type(f,regtype,regid,i)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
i += 1
if (numscalarresults == 0):
f.write("void")
f.write(" HELPER(%s)(CPUHexagonState *env" % tag)
## Arguments include the vector destination operands
i = 1
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_arg_ext_pair(f,regtype,regid,i)
else:
continue
elif (hex_common.is_single(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_arg_ext(f,regtype,regid,i)
else:
# This is the return value of the function
continue
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
i += 1
## For conditional instructions, we pass in the destination register
if 'A_CONDEXEC' in hex_common.attribdict[tag]:
for regtype, regid, toss, numregs in regs:
if (hex_common.is_writeonly(regid) and
not hex_common.is_hvx_reg(regtype)):
gen_helper_arg_opn(f, regtype, regid, i, tag)
i += 1
## Arguments to the helper function are the source regs and immediates
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
if (hex_common.is_hvx_reg(regtype) and
hex_common.is_readwrite(regid)):
continue
gen_helper_arg_opn(f,regtype,regid,i,tag)
i += 1
for immlett,bits,immshift in imms:
gen_helper_arg_imm(f,immlett)
i += 1
if (hex_common.need_pkt_has_multi_cof(tag)):
f.write(", uint32_t pkt_has_multi_cof")
if hex_common.need_PC(tag):
if i > 0: f.write(", ")
f.write("target_ulong PC")
i += 1
if hex_common.helper_needs_next_PC(tag):
if i > 0: f.write(", ")
f.write("target_ulong next_PC")
i += 1
if hex_common.need_slot(tag):
if i > 0: f.write(", ")
f.write("uint32_t slot")
i += 1
if hex_common.need_part1(tag):
if i > 0: f.write(", ")
f.write("uint32_t part1")
f.write(")\n{\n")
if (not hex_common.need_slot(tag)):
f.write(" uint32_t slot __attribute__((unused)) = 4;\n" )
if hex_common.need_ea(tag): gen_decl_ea(f)
## Declare the return variable
i=0
if 'A_CONDEXEC' not in hex_common.attribdict[tag]:
for regtype,regid,toss,numregs in regs:
if (hex_common.is_writeonly(regid)):
gen_helper_dest_decl_opn(f,regtype,regid,i)
i += 1
for regtype,regid,toss,numregs in regs:
if (hex_common.is_read(regid)):
if (hex_common.is_pair(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_src_var_ext_pair(f,regtype,regid,i)
elif (hex_common.is_single(regid)):
if (hex_common.is_hvx_reg(regtype)):
gen_helper_src_var_ext(f,regtype,regid)
else:
print("Bad register parse: ",regtype,regid,toss,numregs)
if 'A_FPOP' in hex_common.attribdict[tag]:
f.write(' arch_fpop_start(env);\n');
f.write(" %s\n" % hex_common.semdict[tag])
if 'A_FPOP' in hex_common.attribdict[tag]:
f.write(' arch_fpop_end(env);\n');
## Save/return the return variable
for regtype,regid,toss,numregs in regs:
if (hex_common.is_written(regid)):
gen_helper_return_opn(f, regtype, regid, i)
f.write("}\n\n")
## End of the helper definition
def main():
hex_common.read_semantics_file(sys.argv[1])
hex_common.read_attribs_file(sys.argv[2])
hex_common.read_overrides_file(sys.argv[3])
hex_common.read_overrides_file(sys.argv[4])
## Whether or not idef-parser is enabled is
## determined by the number of arguments to
## this script:
##
## 5 args. -> not enabled,
## 6 args. -> idef-parser enabled.
##
## The 6:th arg. then holds a list of the successfully
## parsed instructions.
is_idef_parser_enabled = len(sys.argv) > 6
if is_idef_parser_enabled:
hex_common.read_idef_parser_enabled_file(sys.argv[5])
hex_common.calculate_attribs()
tagregs = hex_common.get_tagregs()
tagimms = hex_common.get_tagimms()
output_file = sys.argv[-1]
with open(output_file, 'w') as f:
for tag in hex_common.tags:
## Skip the priv instructions
if ( "A_PRIV" in hex_common.attribdict[tag] ) :
continue
## Skip the guest instructions
if ( "A_GUEST" in hex_common.attribdict[tag] ) :
continue
## Skip the diag instructions
if ( tag == "Y6_diag" ) :
continue
if ( tag == "Y6_diag0" ) :
continue
if ( tag == "Y6_diag1" ) :
continue
if ( hex_common.skip_qemu_helper(tag) ):
continue
if ( hex_common.is_idef_parser_enabled(tag) ):
continue
gen_helper_function(f, tag, tagregs, tagimms)
if __name__ == "__main__":
main()
| 12,876 | 35.68661 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.