python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
import logging
import dask
from cuchemcommon.data.helper.chembldata import IMP_PROPS
from cuchemcommon.data.cluster_wf import ChemblClusterWfDao
from cuchemcommon.fingerprint import MorganFingerprint, TransformationDefaults
logger = logging.getLogger(__name__)
def test_dataframe():
"""
Verify fetching data from chemblDB.
"""
dao = ChemblClusterWfDao(MorganFingerprint)
mol_df = dao.fetch_molecular_embedding(n_molecules=100)
assert isinstance(mol_df, dask.dataframe.core.DataFrame), \
'Incorrect data structure from DAO'
fp_size = TransformationDefaults.MorganFingerprint.value['nBits']
# Fingerprint size + Important Columns + ID + (smile + tranformed smile)
df_size = fp_size + len(IMP_PROPS) + 3
logger.info(df_size)
logger.info(fp_size)
logger.info(mol_df.columns)
logger.info(mol_df.head())
assert mol_df.shape[1] == df_size, \
'Expected dataframe size is %d found %d.' % (df_size, mol_df.shape[1])
| cheminformatics-master | cuchem/tests/test_chembl.py |
cheminformatics-master | cuchem/tests/__init__.py |
|
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tempfile
import os
import shutil
import glob
import logging
from pydoc import locate
import pandas as pd
from tests.utils import _create_context
from cuchemcommon.utils.logger import initialize_logfile
from cuchem.utils.plot_benchmark_results \
import prepare_benchmark_df, prepare_acceleration_stacked_plot
logger = logging.getLogger(__name__)
# Parameter lists
run_benchmark_params = [([{'test_type': 'cuchem.cheminformatics.wf.cluster.gpukmeansumap.GpuKmeansUmap',
'use_gpu': True,
'n_workers': 1,
'n_mol': 5000},
{'test_type': 'cuchem.cheminformatics.wf.cluster.cpukmeansumap.CpuKmeansUmap',
'use_gpu': False,
'n_workers': 10,
'n_mol': 5000}])]
# @pytest.mark.parametrize('benchmark_config_list', run_benchmark_params)
def test_run_benchmark(benchmark_config_list):
output_dir = tempfile.tempdir
output_file = os.path.join(output_dir, 'benchmark.csv')
initialize_logfile(output_file)
max_n_mol = 0
for config in benchmark_config_list:
test_type = config['test_type']
use_gpu = config['use_gpu']
n_workers = config['n_workers']
n_mol = config['n_mol']
max_n_mol = max(max_n_mol, n_mol)
context = _create_context(use_gpu=use_gpu,
n_workers=n_workers,
benchmark_file=output_file)
if (not use_gpu):
context.compute_type = 'cpu'
context.n_molecule = n_mol
context.cache_directory = None
context.is_benchmark = True
wf_class = locate(test_type)
workflow = wf_class()
workflow.cluster()
workflow.compute_qa_matric()
context.dask_client.cluster.close()
context.dask_client.close()
context.dask_client = None
# Filename is set in workflow -- move to create randomized name
temp_file = tempfile.NamedTemporaryFile(prefix='benchmark_',
suffix='.csv',
dir=output_dir,
delete=False).name
shutil.move(output_file, temp_file)
assert os.path.exists(temp_file)
benchmark_results = pd.read_csv(temp_file, comment='#')
logger.info(benchmark_results)
nrows, ncols = benchmark_results.shape
assert ncols == 8
assert nrows >= len(benchmark_config_list)
# assert benchmark_results['`n_molecules`'].min() > 0
# assert benchmark_results['n_molecules'].min() < max_n_mol
df, machine_config = prepare_benchmark_df(temp_file)
basename = os.path.splitext(temp_file)[0]
excel_file = basename + '.xlsx'
assert os.path.exists(excel_file)
md_file = basename + '.md'
assert os.path.exists(md_file)
png_file = basename + '.png'
prepare_acceleration_stacked_plot(df, machine_config, output_path=png_file)
assert os.path.exists(png_file)
| cheminformatics-master | cuchem/tests/test_benchmark.py |
import os
import dask
import time
import inspect
import tempfile
import logging
from locust import events
from cuchem.utils.dask import initialize_cluster
from cuchemcommon.context import Context
logger = logging.getLogger(__name__)
def _fetch_chembl_test_dataset(n_molecules=None):
if n_molecules is None:
n_molecules = 1000
from cuchemcommon.data.cluster_wf import ChemblClusterWfDao
from cuchemcommon.fingerprint import MorganFingerprint
dao = ChemblClusterWfDao(MorganFingerprint)
mol_df = dao.fetch_molecular_embedding(n_molecules=n_molecules)
assert isinstance(mol_df, dask.dataframe.core.DataFrame), \
'Incorrect data structure from DAO'
return n_molecules, dao, mol_df
def _create_context(use_gpu=True,
n_workers=-1,
benchmark_file=None,
cache_directory=None,
batch_size=None):
context = Context()
if context.dask_client is None:
context.dask_client = initialize_cluster(use_gpu=use_gpu,
n_gpu=n_workers,
n_cpu=n_workers)
context.is_benchmark = False
context.cache_directory = cache_directory
if cache_directory is None:
context.cache_directory = tempfile.tempdir
context.benchmark_file = benchmark_file
if benchmark_file is None:
context.benchmark_file = os.path.join(tempfile.tempdir, 'benchmark.csv')
context.batch_size = batch_size
if batch_size is None:
context.batch_size = 10000
return context
def stopwatch(request_type):
def _stopwatch(func):
def wrapper(*args, **kwargs):
previous_frame = inspect.currentframe().f_back
_, _, task_name, _, _ = inspect.getframeinfo(previous_frame)
_start_time = time.time()
result = None
try:
result = func(*args, **kwargs)
except Exception as e:
total = int((time.time() - _start_time) * 1000)
events.request_failure.fire(request_type=request_type,
name=task_name,
response_time=total,
response_length=0,
exception=e)
else:
total = int((time.time() - _start_time) * 1000)
events.request_success.fire(request_type=request_type,
name=task_name,
response_time=total,
response_length=0)
print(total, _start_time, time.time())
return result
return wrapper
return _stopwatch
| cheminformatics-master | cuchem/tests/utils.py |
import os
import logging
import tempfile
import dask
from cuchemcommon.data.helper.chembldata import ChEmblData
from cuchemcommon.data.cluster_wf import FINGER_PRINT_FILES
from cuchemcommon.fingerprint import MorganFingerprint, Embeddings
logger = logging.getLogger(__name__)
def test_cache_morganfingerprint():
"""
Verify fetching data from chemblDB.
"""
num_recs = 1000
logger.info('Morgan Fingerprinting Check!')
cache_dir = os.path.join(tempfile.mkdtemp())
logger.info('Creating cache at %s' % cache_dir)
logger.info(type(cache_dir))
# Write to cache
chem_data = ChEmblData(fp_type=MorganFingerprint)
chem_data.save_fingerprints(os.path.join(cache_dir, FINGER_PRINT_FILES),
num_recs=num_recs)
# Verify cache
hdf_path = os.path.join(cache_dir, FINGER_PRINT_FILES)
logger.info('Reading molecules from %s...' % hdf_path)
mol_df = dask.dataframe.read_hdf(hdf_path, 'fingerprints')
mol_df = mol_df.compute()
logger.info('Expected %s rec found %s.', num_recs, mol_df.shape[0])
assert mol_df.shape[0] <= num_recs, \
('Expected %d rec found %d.' % (num_recs, mol_df.shape[0]))
def test_cache_cddd_embeddings():
"""
Verify fetching data from chemblDB.
"""
num_recs = 1000
logger.info('CDDD Embeddings Check!')
cache_dir = os.path.join(tempfile.mkdtemp())
logger.info('Creating cache at %s' % cache_dir)
logger.info(type(cache_dir))
# Write to cache
chem_data = ChEmblData(fp_type=Embeddings)
chem_data.save_fingerprints(os.path.join(cache_dir, FINGER_PRINT_FILES),
num_recs=num_recs)
# Verify cache
hdf_path = os.path.join(cache_dir, FINGER_PRINT_FILES)
logger.info('Reading molecules from %s...' % hdf_path)
mol_df = dask.dataframe.read_hdf(hdf_path, 'fingerprints')
mol_df = mol_df.compute()
logger.info('Expected %s rec found %s.', num_recs, mol_df.shape[0])
assert mol_df.shape[0] <= num_recs, \
('Expected %d rec found %d.' % (num_recs, mol_df.shape[0]))
| cheminformatics-master | cuchem/tests/test_fp_cache.py |
import logging
from cuchem.benchmark.data import TrainingData
logger = logging.getLogger(__name__)
def test_training_data():
training_data = TrainingData()
cursor = training_data.conn.cursor()
cursor.execute('SELECT smiles FROM train_data limit 10')
smiles_strs = cursor.fetchall()
for smiles in smiles_strs:
logger.info(f'Looking for {smiles} in known smiles database...')
assert training_data.is_known_smiles(smiles[0]) == True
smiles = 'adasdadsasdasd'
logger.info(f'Looking for {smiles} in known smiles database...')
assert training_data.is_known_smiles(smiles) == False | cheminformatics-master | cuchem/tests/test_benchmark_data.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from tritonclient.utils import *
import tritonclient.http as httpclient
from locust import task, User, constant
from tests.utils import stopwatch
class TirtonLocust(User):
host = 'http://127.0.0.1:8000'
wait_time = constant(0.1)
model_name = "molbart"
@task
@stopwatch('Triton_Sample')
def client_task(self):
with httpclient.InferenceServerClient('localhost:8000') as client:
input0_data = np.array(['CN1C=NC2=C1C(=O)N(C(=O)N2C)C']).astype(np.object)
inputs = [httpclient.InferInput("INPUT0", input0_data.shape,
np_to_triton_dtype(input0_data.dtype)), ]
inputs[0].set_data_from_numpy(input0_data)
outputs = [httpclient.InferRequestedOutput("OUTPUT0"), ]
response = client.infer(TirtonLocust.model_name,
inputs,
request_id=str(1),
outputs=outputs)
result = response.get_response()
| cheminformatics-master | cuchem/tests/perf_triton.py |
import logging
from cuchem.decorator import LipinskiRuleOfFiveDecorator, MolecularStructureDecorator
from cuchem.wf.generative import MolBART, Cddd
logger = logging.getLogger(__name__)
def interpolation(wf, num_points=20, force_unique=False):
"""
Verify fetching data from chemblDB when the input is a pandas df.
"""
smiles = ['CHEMBL6328', 'CHEMBL415286']
# smiles = ['CHEMBL10454', 'CHEMBL10469']
genreated_df = wf.interpolate_by_id(smiles,
num_points=num_points,
force_unique=force_unique)
genreated_df = MolecularStructureDecorator().decorate(genreated_df)
genreated_df = LipinskiRuleOfFiveDecorator().decorate(genreated_df)
logger.info(genreated_df.shape)
return genreated_df
def test_cddd_interpolation():
num_points = 20
wf = Cddd()
interp = interpolation(wf,
num_points=num_points,
force_unique=True)
logger.info(interp)
logger.info(interp.columns)
assert interp.shape[0] == num_points + 2
def test_cddd_similar_smiles():
wf = Cddd()
num_to_generate = 3
generated_smiles = wf.find_similars_smiles_by_id(['CHEMBL6273'],
num_requested=num_to_generate,
force_unique=True)
logger.info(generated_smiles)
assert len(generated_smiles) == num_to_generate + 1
# TODO: Fix me
# def test_molbart_interpolation():
# wf = MolBART()
# interpolation(wf)
# TODO: Fix me
# def test_molbart_similar_smiles():
# wf = MolBART()
# num_to_generate = 3
# generated_smiles = wf.find_similars_smiles_by_id(['CHEMBL6273'],
# num_requested=num_to_generate,
# force_unique=True)
# logger.info('# of generated SMILES %s', len(generated_smiles))
# logger.info(generated_smiles)
# assert generated_smiles.shape[0] == num_to_generate + 1
| cheminformatics-master | cuchem/tests/test_generative_wf.py |
cheminformatics-master | cuchem/cuchem/__init__.py |
|
#!/usr/bin/env python3
import glob
import os
import sys
import matplotlib.pyplot as plt
import argparse
import numpy as np
import textwrap
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description='Plot Results')
parser.add_argument('-i', '--input_dir',
dest='input_dir',
type=str,
default='/workspace/megamolbart/benchmark',
help='Path containing CSV files')
parser.add_argument('-o', '--output_dir',
dest='output_dir',
type=str,
help='Output directory -- defaults to input_dir')
parser.add_argument('-r', '--radius',
dest='radius',
type=float,
default=0.1,
help='Radius to select for appropriate metrics')
parser.add_argument('-t', '--top_k',
dest='top_k',
type=int,
default=None,
help='Top K for Nearest Neighbor -- default is max value')
args = parser.parse_args(sys.argv[1:])
if args.output_dir is None:
args.output_dir = args.input_dir
return args
def create_data_sets(input_dir, radius, top_k):
"""Load data files and coalesce into dataset"""
# Combine files
data_files = glob.glob(os.path.join(input_dir, '*.csv'))
assert len(data_files) > 0
data_list = list()
for data_file in data_files:
data = pd.read_csv(data_file)
data = data.replace('unique', 'uniqueness')
data_list.append(data)
data_agg = pd.concat(data_list, axis=0)
# Clean up data
top_k = data_agg['top_k'].max() if top_k is None else top_k
mask = (data_agg['radius'] == radius) | (data_agg['top_k'] == top_k) | data_agg['model'].notnull()
data_agg = data_agg[mask]
# Set sort order
name_category = pd.CategoricalDtype(['validity', 'novelty', 'uniqueness',
'nearest neighbor correlation', 'modelability'],
ordered=True)
model_category = pd.CategoricalDtype(['linear regression', 'elastic net', 'support vector machine', 'random forest'],
ordered=True)
data_agg['name'] = data_agg['name'].astype(name_category)
data_agg['model'] = data_agg['model'].astype(model_category)
data_agg = data_agg.sort_values(['name', 'model'])
return data_agg
def create_plot(data, radius, iteration, output_dir):
"""Create plot of metrics"""
def _clean_label(label):
label = label.get_text().title()
label = textwrap.wrap(label, width=20)
label = '\n'.join(label)
return label
green = '#86B637'
blue = '#5473DC'
fig, axList = plt.subplots(ncols=2)
fig.set_size_inches(10, 5)
# Validity, uniqueness, novelty, and nearest neighbor correlation plot
ax = axList[0]
mask = data['name'] != 'modelability'
data.loc[mask, ['name', 'value']].set_index('name').plot(kind='bar', ax=ax, legend=False, color=green, rot=45)
xlabels = [_clean_label(x) for x in ax.get_xticklabels()]
ax.set_xticklabels(xlabels)
ax.set(ylabel='Percentage', xlabel='Metric', title=f'Metrics at Radius {radius} with Model at Iteration {iteration}')
ax.set_ylim(0, 1.0)
# ML Model Error Ratios
ax = axList[1]
data.loc[mask.pipe(np.invert), ['model', 'value']].set_index('model').plot(kind='bar', ax=ax, legend=False, color=green, rot=45)
ax.set(ylabel='Ratio of Mean Squared Errors\n(Morgan Fingerprint / Embedding)', xlabel='Model', title='Modelability Ratio: Higher --> Better Embeddings')
xlabels = [_clean_label(x) for x in ax.get_xticklabels()]
ax.set_xticklabels(xlabels)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, 'metrics.png'), dpi=300, facecolor='white')
if __name__ == '__main__':
args = parse_args()
data = create_data_sets(args.input_dir, args.radius, args.top_k)
assert data['iteration'].nunique() == 1
iteration = data['iteration'].iloc[0]
create_plot(data, args.radius, iteration, args.output_dir)
| cheminformatics-master | cuchem/cuchem/benchmark/plot.py |
cheminformatics-master | cuchem/cuchem/benchmark/__init__.py |
|
import os
import time
import logging
import hydra
import pandas as pd
from datetime import datetime
import os
import logging
import argparse
from cuml.ensemble.randomforestregressor import RandomForestRegressor
from cuml import LinearRegression, ElasticNet
from cuml.svm import SVR
from cuchem.wf.generative.megatronmolbart import MegatronMolBART
from cuchem.datasets.loaders import ZINC15_TestSplit_20K_Samples, ZINC15_TestSplit_20K_Fingerprints
from cuchem.metrics.model import Validity, Unique, Novelty, NearestNeighborCorrelation, Modelability
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def get_model():
rf_estimator = RandomForestRegressor(accuracy_metric='mse', random_state=0)
rf_param_dict = {'n_estimators': [10, 50]}
sv_estimator = SVR(kernel='rbf')
sv_param_dict = {'C': [0.01, 0.1, 1.0, 10], 'degree': [3,5,7,9]}
lr_estimator = LinearRegression(normalize=True)
lr_param_dict = {'normalize': [True]}
en_estimator = ElasticNet(normalize=True)
en_param_dict = {'alpha': [0.001, 0.01, 0.1, 1.0, 10.0, 100],
'l1_ratio': [0.1, 0.5, 1.0, 10.0]}
return {'random forest': [rf_estimator, rf_param_dict],
'support vector machine': [sv_estimator, sv_param_dict],
'linear regression': [lr_estimator, lr_param_dict],
'elastic net': [en_estimator, en_param_dict]}
def save_metric_results(metric_list, output_dir):
metric_df = pd.concat(metric_list, axis=1).T
logger.info(metric_df)
metric = metric_df['name'].iloc[0].replace(' ', '_')
iteration = metric_df['iteration'].iloc[0]
metric_df.to_csv(os.path.join(output_dir, f'{metric}_{iteration}.csv'), index=False)
@hydra.main(config_path=".", config_name="benchmark")
def main(cfg):
logger.info(cfg)
os.makedirs(cfg.output.path, exist_ok=True)
output_dir = cfg.output.path
seq_len = int(cfg.samplingSpec.seq_len) # Import from MegaMolBART codebase?
sample_size = int(cfg.samplingSpec.sample_size)
# radius_list = [1, 2, 5] # TODO calculate radius and automate this
# top_k_list = [None, 50, 100, 500] # TODO decide on top k value
inferrer = MegatronMolBART()
# Metrics
metric_list = []
if cfg.metric.validity.enabled == True:
metric_list.append(Validity(inferrer))
if cfg.metric.unique.enabled == True:
metric_list.append(Unique(inferrer))
if cfg.metric.novelty.enabled == True:
metric_list.append(Novelty(inferrer))
if cfg.metric.nearestNeighborCorrelation.enabled == True:
metric_list.append(NearestNeighborCorrelation(inferrer))
if cfg.metric.modelability.enabled == True:
metric_list.append(Modelability(inferrer))
# ML models
model_dict = get_model()
# Create Datasets of size input_size. Initialy load 20% more then reduce to
# input_size after cleaning and preprocessing.
smiles_dataset = ZINC15_TestSplit_20K_Samples(max_len=seq_len)
fingerprint_dataset = ZINC15_TestSplit_20K_Fingerprints()
smiles_dataset.load()
fingerprint_dataset.load(smiles_dataset.data.index)
n_data = cfg.samplingSpec.input_size
if n_data <= 0:
n_data = len(smiles_dataset.data)
# assert fingerprint_dataset.data.index == smiles_dataset.data.index
# DEBUG
smiles_dataset.data = smiles_dataset.data.iloc[:n_data]
smiles_dataset.properties = smiles_dataset.properties.iloc[:n_data]
fingerprint_dataset.data = fingerprint_dataset.data.iloc[:n_data]
# DEBUG
n_data = cfg.samplingSpec.input_size
convert_runtime = lambda x: x.seconds + (x.microseconds / 1.0e6)
iteration = None
retry_count = 0
while retry_count < 30:
try:
# Wait for upto 5 min for the server to be up
iteration = inferrer.get_iteration()
break
except Exception as e:
logging.warning(f'Service not available. Retrying {retry_count}...')
time.sleep(10)
retry_count += 1
continue
logging.info(f'Service found after {retry_count} retries.')
for metric in metric_list:
logger.info(f'METRIC: {metric.name}')
result_list = []
iter_list = metric.variations(cfg, model_dict=model_dict)
for iter_val in iter_list:
start_time = datetime.now()
try:
iter_val = int(iter_val)
except ValueError:
pass
estimator, param_dict = None, None
if iter_val in model_dict:
estimator, param_dict = model_dict[iter_val]
result = metric.calculate(smiles_dataset=smiles_dataset,
fingerprint_dataset=fingerprint_dataset,
top_k=iter_val,
properties=smiles_dataset.properties,
estimator=estimator,
param_dict=param_dict,
num_samples=sample_size,
radius=iter_val)
run_time = convert_runtime(datetime.now() - start_time)
result['iteration'] = iteration
result['run_time'] = run_time
result['data_size'] = n_data
result_list.append(result)
save_metric_results(result_list, output_dir)
if __name__ == '__main__':
main()
| cheminformatics-master | cuchem/cuchem/benchmark/megamolbart.py |
import os
import pickle
import sqlite3
import logging
from typing import List
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.context import Context
logger = logging.getLogger(__name__)
class TrainingData(object, metaclass=Singleton):
def __init__(self):
context = Context()
db_file = context.get_config('data_mount_path', default='/data')
db_file = os.path.join(db_file, 'db/zinc_train.sqlite3')
logger.info(f'Benchmark database {db_file}...')
self.conn = sqlite3.connect(db_file)
def is_known_smiles(self, smiles: str) -> bool:
"""
Checks if the given SMILES is known.
:param data:
:return:
"""
cursor = self.conn.cursor()
cursor.execute(
'''
SELECT smiles FROM train_data
WHERE smiles=?
''',
[smiles])
id = cursor.fetchone()
cursor.close()
return True if id else False
class BenchmarkData(object, metaclass=Singleton):
def __init__(self):
context = Context()
db_file = context.get_config('data_mount_path', default='/data')
db_file = os.path.join(db_file, 'db/benchmark.sqlite3')
logger.info(f'Benchmark database {db_file}...')
self.conn = sqlite3.connect(db_file)
cursor = self.conn.cursor()
sql_file = open("/workspace/cuchem/benchmark/scripts/benchmark.sql")
sql_as_string = sql_file.read()
cursor.executescript(sql_as_string)
def insert_sampling_data(self,
smiles,
num_samples,
scaled_radius,
force_unique,
sanitize,
generated_smiles: List[str],
embeddings: List,
embeddings_dim: List):
"""
Inserts a list of dicts into the benchmark data table.
:param data:
:return:
"""
logger.debug('Inserting benchmark data...')
cursor = self.conn.cursor()
id = cursor.execute(
'''
INSERT INTO smiles(smiles, num_samples, scaled_radius,
force_unique, sanitize)
VALUES(?,?,?,?,?)
''',
[smiles, num_samples, scaled_radius, force_unique, sanitize]).lastrowid
for i in range(len(generated_smiles)):
gsmiles = generated_smiles[i]
embedding = list(embeddings[i])
embedding_dim = list(embeddings_dim[i])
embedding = pickle.dumps(embedding)
embedding_dim = pickle.dumps(embedding_dim)
cursor.execute(
'''
INSERT INTO smiles_samples(input_id, smiles, embedding, embedding_dim)
VALUES(?, ?, ?, ?)
''', [id, gsmiles, sqlite3.Binary(embedding), sqlite3.Binary(embedding_dim)])
self.conn.commit()
def fetch_sampling_data(self,
smiles,
num_samples,
scaled_radius,
force_unique,
sanitize):
"""
Fetch the benchmark data for a given set of parameters.
:param data:
:return:
"""
logger.debug('Fetching benchmark data...')
cursor = self.conn.cursor()
cursor.execute(
'''
SELECT id FROM smiles
WHERE smiles=?
AND num_samples=?
AND scaled_radius=?
AND force_unique=?
AND sanitize=?
''',
[smiles, num_samples, scaled_radius, force_unique, sanitize])
id = cursor.fetchone()
if not id:
return None
cursor.execute('SELECT smiles FROM smiles_samples WHERE input_id=?',
[id[0]])
generated_smiles = cursor.fetchall()
generated_smiles = [x[0] for x in generated_smiles]
return generated_smiles
def fetch_n_sampling_data(self,
smiles,
num_samples,
scaled_radius,
force_unique,
sanitize):
"""
Fetch the benchmark data for a given set of parameters.
:param data:
:return:
"""
logger.debug('Fetching benchmark data...')
cursor = self.conn.cursor()
cursor.execute(
'''
SELECT id FROM smiles
WHERE smiles=?
AND scaled_radius=?
AND force_unique=?
AND sanitize=?
''',
[smiles, scaled_radius, force_unique, sanitize])
id = cursor.fetchone()
if not id:
return None
cursor.execute(
'''
SELECT smiles, embedding, embedding_dim
FROM smiles_samples WHERE input_id=?
LIMIT ?
''',
[id[0], num_samples])
generated_smiles = cursor.fetchall()
# generated_smiles = [x for x in generated_smiles]
return generated_smiles
| cheminformatics-master | cuchem/cuchem/benchmark/data.py |
cheminformatics-master | cuchem/cuchem/metrics/__init__.py |
|
#!/usr/bin/env python3
import logging
import pickle
import cupy
import numpy as np
import pandas as pd
from cuml.metrics import pairwise_distances
from sklearn.model_selection import ParameterGrid, KFold
from cuml.metrics.regression import mean_squared_error
from cuchem.utils.metrics import spearmanr
from cuchem.utils.distance import tanimoto_calculate
from cuchem.benchmark.data import BenchmarkData, TrainingData
logger = logging.getLogger(__name__)
class BaseSampleMetric():
name = None
"""Base class for metrics based on sampling for a single SMILES string"""
def __init__(self, inferrer):
self.inferrer = inferrer
self.benchmark_data = BenchmarkData()
self.training_data = TrainingData()
def _find_similars_smiles(self,
smiles,
num_samples,
scaled_radius,
force_unique,
sanitize):
# Check db for results from a previous run
generated_smiles = self.benchmark_data.fetch_sampling_data(smiles,
num_samples,
scaled_radius,
force_unique,
sanitize)
if not generated_smiles:
# Generate new samples and update the database
result = self.inferrer.find_similars_smiles(smiles,
num_samples,
scaled_radius=scaled_radius,
force_unique=force_unique,
sanitize=sanitize)
# Result from sampler includes the input SMILES. Removing it.
# result = result[result.Generated == True]
generated_smiles = result['SMILES'].to_list()
embeddings = result['embeddings'].to_list()
embeddings_dim = result['embeddings_dim'].to_list()
# insert generated smiles into a database for use later.
self.benchmark_data.insert_sampling_data(smiles,
num_samples,
scaled_radius,
force_unique,
sanitize,
generated_smiles,
embeddings,
embeddings_dim)
return generated_smiles
def _calculate_metric(self, metric_array, num_samples):
total_samples = len(metric_array) * num_samples
return np.nansum(metric_array) / float(total_samples)
def variations(self, cfg, model_dict=None):
return NotImplemented
def sample(self):
return NotImplemented
def sample_many(self, smiles_dataset, num_samples, radius):
metric_result = list()
for index in range(len(smiles_dataset.data)):
smiles = smiles_dataset.data.iloc[index]
logger.debug(f'Sampling around {smiles}...')
result = self.sample(smiles, num_samples, radius)
metric_result.append(result)
return np.array(metric_result)
def calculate(self, **kwargs):
smiles_dataset = kwargs['smiles_dataset']
num_samples = kwargs['num_samples']
radius = kwargs['radius']
metric_array = self.sample_many(smiles_dataset, num_samples, radius)
metric = self._calculate_metric(metric_array, num_samples)
return pd.Series({'name': self.__class__.name,
'value': metric,
'radius': radius,
'num_samples': num_samples})
class BaseEmbeddingMetric():
name = None
"""Base class for metrics based on embedding datasets"""
def __init__(self, inferrer):
self.inferrer = inferrer
self.benchmark_data = BenchmarkData()
def variations(self, cfg):
return NotImplemented
def _find_embedding(self,
smiles,
scaled_radius,
force_unique,
sanitize,
max_len):
num_samples = 1
# Check db for results from a previous run
generated_smiles = self.benchmark_data.fetch_n_sampling_data(smiles,
num_samples,
scaled_radius,
force_unique,
sanitize)
if not generated_smiles:
# Generate new samples and update the database
generated_smiles = self.inferrer.smiles_to_embedding(smiles,
max_len,
scaled_radius=scaled_radius,
num_samples=num_samples)
else:
temp = generated_smiles[0]
embedding = pickle.loads(temp[1])
generated_smiles = []
generated_smiles.append(temp[0])
generated_smiles.append(embedding)
generated_smiles.append(pickle.loads(temp[2]))
return generated_smiles[0], generated_smiles[1], generated_smiles[2]
def sample(self, smiles, max_len, zero_padded_vals, average_tokens):
smiles, embedding, dim = self._find_embedding(smiles, 1, False, True, max_len)
embedding = cupy.array(embedding)
embedding = embedding.reshape(dim)
if zero_padded_vals:
embedding[len(smiles):, :] = 0.0
if average_tokens:
embedding = embedding[:len(smiles)].mean(axis=0).squeeze()
assert embedding.shape[0] == dim[-1]
else:
embedding = embedding.flatten()
return embedding
def _calculate_metric(self):
raise NotImplementedError
def sample_many(self, smiles_dataset, zero_padded_vals=True, average_tokens=False):
# Calculate pairwise distances for embeddings
embeddings = []
max_len = 0
for smiles in smiles_dataset.data.to_pandas():
embedding = self.sample(smiles, smiles_dataset.max_len, zero_padded_vals, average_tokens)
max_len = max(max_len, embedding.shape[0])
embeddings.append(cupy.array(embedding))
if max_len > 0:
embeddings_resized = []
for embedding in embeddings:
n_pad = max_len - embedding.shape[0]
if n_pad <= 0:
embeddings_resized.append(embedding)
continue
embedding = cupy.resize(embedding, max_len)
embeddings_resized.append(embedding)
embeddings = embeddings_resized
return cupy.asarray(embeddings)
def calculate(self, **kwargs):
raise NotImplementedError
class Validity(BaseSampleMetric):
name = 'validity'
def __init__(self, inferrer):
super().__init__(inferrer)
def variations(self, cfg, model_dict=None):
return cfg.metric.validity.radius_list
def sample(self, smiles, num_samples, radius):
generated_smiles = self._find_similars_smiles(smiles,
num_samples,
scaled_radius=radius,
force_unique=False,
sanitize=True)
return len(generated_smiles)
class Unique(BaseSampleMetric):
name = 'uniqueness'
def __init__(self, inferrer):
super().__init__(inferrer)
def variations(self, cfg, model_dict=None):
return cfg.metric.unique.radius_list
def sample(self, smiles, num_samples, radius):
generated_smiles = self._find_similars_smiles(smiles,
num_samples,
scaled_radius=radius,
force_unique=False,
sanitize=True)
# Get the unquie ones
generated_smiles = set(generated_smiles)
return len(generated_smiles)
class Novelty(BaseSampleMetric):
name = 'novelty'
def __init__(self, inferrer):
super().__init__(inferrer)
def variations(self, cfg, model_dict=None):
return cfg.metric.novelty.radius_list
def smiles_in_train(self, smiles):
in_train = self.training_data.is_known_smiles(smiles)
return in_train
def sample(self, smiles, num_samples, radius):
generated_smiles = self._find_similars_smiles(smiles,
num_samples,
scaled_radius=radius,
force_unique=False,
sanitize=True)
result = sum([self.smiles_in_train(x) for x in generated_smiles])
return result
class NearestNeighborCorrelation(BaseEmbeddingMetric):
"""Sperman's Rho for correlation of pairwise Tanimoto distances vs Euclidean distance from embeddings"""
name = 'nearest neighbor correlation'
def __init__(self, inferrer):
super().__init__(inferrer)
def variations(self, cfg, model_dict=None):
return cfg.metric.nearestNeighborCorrelation.top_k_list
def _calculate_metric(self, embeddings, fingerprints, top_k=None):
embeddings_dist = pairwise_distances(embeddings)
del embeddings
fingerprints_dist = tanimoto_calculate(fingerprints, calc_distance=True)
del fingerprints
corr = spearmanr(fingerprints_dist, embeddings_dist, top_k)
return corr
def calculate(self, **kwargs):
smiles_dataset = kwargs['smiles_dataset']
fingerprint_dataset = kwargs['fingerprint_dataset']
top_k = kwargs['top_k']
embeddings = self.sample_many(smiles_dataset,
zero_padded_vals=True,
average_tokens=False)
# Calculate pairwise distances for fingerprints
fingerprints = cupy.fromDlpack(fingerprint_dataset.data.to_dlpack())
fingerprints = cupy.asarray(fingerprints, order='C')
metric = self._calculate_metric(embeddings, fingerprints, top_k)
metric = cupy.nanmean(metric)
top_k = embeddings.shape[0] - 1 if not top_k else top_k
return pd.Series({'name': self.name, 'value': metric, 'top_k':top_k})
class Modelability(BaseEmbeddingMetric):
"""Ability to model molecular properties from embeddings vs Morgan Fingerprints"""
name = 'modelability'
def __init__(self, inferrer):
super().__init__(inferrer)
self.embeddings = None
def variations(self, cfg, model_dict=None):
return model_dict.keys()
def gpu_gridsearch_cv(self, estimator, param_dict, xdata, ydata, n_splits=5):
"""Perform grid search with cross validation and return score"""
best_score = np.inf
for param in ParameterGrid(param_dict):
estimator.set_params(**param)
metric_list = []
# Generate CV folds
kfold_gen = KFold(n_splits=n_splits, shuffle=True, random_state=0)
for train_idx, test_idx in kfold_gen.split(xdata, ydata):
xtrain, xtest, ytrain, ytest = xdata[train_idx], xdata[test_idx], ydata[train_idx], ydata[test_idx]
estimator.fit(xtrain, ytrain)
ypred = estimator.predict(xtest)
score = mean_squared_error(ypred, ytest).item() # NB: convert to negative MSE and maximize metric for SKLearn GridSearch
metric_list.append(score)
metric = np.array(metric_list).mean()
best_score = min(metric, best_score)
return best_score
def _calculate_metric(self, embeddings, fingerprints, properties, estimator, param_dict):
"""Perform grid search for each metric and calculate ratio"""
metric_array = []
embedding_errors = []
fingerprint_errors = []
for col in properties.columns:
props = properties[col].astype(cupy.float32).to_array()
embedding_error = self.gpu_gridsearch_cv(estimator, param_dict, embeddings, props)
fingerprint_error = self.gpu_gridsearch_cv(estimator, param_dict, fingerprints, props)
ratio = fingerprint_error / embedding_error # If ratio > 1.0 --> embedding error is smaller --> embedding model is better
metric_array.append(ratio)
embedding_errors.append(embedding_error)
fingerprint_errors.append(fingerprint_error)
return cupy.array(metric_array), cupy.array(fingerprint_errors), cupy.array(embedding_errors)
def calculate(self, **kwargs):
smiles_dataset = kwargs['smiles_dataset']
fingerprint_dataset = kwargs['fingerprint_dataset']
properties = kwargs['properties']
estimator = kwargs['estimator']
param_dict = kwargs['param_dict']
embeddings = self.sample_many(smiles_dataset, zero_padded_vals=False, average_tokens=True)
embeddings = cupy.asarray(embeddings, dtype=cupy.float32)
fingerprints = cupy.fromDlpack(fingerprint_dataset.data.to_dlpack())
fingerprints = cupy.asarray(fingerprints, order='C', dtype=cupy.float32)
metric, fingerprint_errors, embedding_errors = self._calculate_metric(embeddings,
fingerprints,
properties,
estimator,
param_dict)
logger.info(f'{type(metric)} {type(fingerprint_errors)} {type(embedding_errors)}')
metric = cupy.nanmean(metric)
fingerprint_errors = cupy.nanmean(fingerprint_errors)
embedding_errors = cupy.nanmean(embedding_errors)
return pd.Series({'name': self.name,
'value': metric,
'fingerprint_error': fingerprint_errors,
'embedding_error': embedding_errors})
| cheminformatics-master | cuchem/cuchem/metrics/model.py |
import base64
import logging
from typing import Union
import cudf
import pandas
from cuchem.decorator import BaseMolPropertyDecorator
from rdkit import Chem
from rdkit.Chem import Draw
logger = logging.getLogger(__name__)
class MolecularStructureDecorator(BaseMolPropertyDecorator):
ERROR_VALUE = 'Error interpreting SMILES using RDKit'
def decorate(self,
df: Union[cudf.DataFrame, pandas.DataFrame],
smile_cols: int = 0):
mol_struct = []
for idx in range(df.shape[0]):
smiles = df.iat[idx, smile_cols]
try:
m = Chem.MolFromSmiles(smiles)
drawer = Draw.rdMolDraw2D.MolDraw2DCairo(500, 125)
drawer.SetFontSize(1.0)
drawer.DrawMolecule(m)
drawer.FinishDrawing()
img_binary = "data:image/png;base64," + \
base64.b64encode(drawer.GetDrawingText()).decode("utf-8")
mol_struct.append({'value': img_binary, 'level': 'info'})
except Exception as ex:
logger.exception(ex)
mol_struct.append(
{'value': MolecularStructureDecorator.ERROR_VALUE,
'level': 'error'})
df['Chemical Structure'] = mol_struct
return df
| cheminformatics-master | cuchem/cuchem/decorator/mol_structure.py |
import logging
from typing import Union
import cudf
import pandas
from numpy.core.numeric import NaN
from cuchem.decorator import BaseMolPropertyDecorator
from rdkit import Chem
from rdkit.Chem import QED, Descriptors, Lipinski
logger = logging.getLogger(__name__)
class LipinskiRuleOfFiveDecorator(BaseMolPropertyDecorator):
MAX_LOGP = 3
MAX_MOL_WT = 300
MAX_H_DONORS = 6
MAX_H_ACCEPTORS = 6
MAX_ROTATABLE_BONDS = 3
MAX_QED = 3
def decorate(self,
df: Union[cudf.DataFrame, pandas.DataFrame],
smile_cols: int = 0):
mol_wt = []
mol_logp = []
hdonors = []
hacceptors = []
rotatable_bonds = []
qeds = []
for idx in range(df.shape[0]):
smiles = df.iat[idx, smile_cols]
m = Chem.MolFromSmiles(smiles)
if m is None:
mol_logp.append({'value': '-', 'level': 'info'})
mol_wt.append({'value': '-', 'level': 'info'})
hdonors.append({'value': '-', 'level': 'info'})
hacceptors.append({'value': '-', 'level': 'info'})
rotatable_bonds.append({'value': '-', 'level': 'info'})
qeds.append({'value': '-', 'level': 'info'})
continue
try:
logp = Descriptors.MolLogP(m)
mol_logp.append({'value': round(logp, 2),
'level': 'info' if logp < LipinskiRuleOfFiveDecorator.MAX_LOGP else 'error'})
except Exception as ex:
logger.exception(ex)
mol_logp.append({'value': '-', 'level': 'info'})
try:
wt = Descriptors.MolWt(m)
mol_wt.append({'value': round(wt, 2),
'level': 'info' if wt < LipinskiRuleOfFiveDecorator.MAX_MOL_WT else 'error'})
except Exception as ex:
logger.exception(ex)
mol_wt.append({'value': '-', 'level': 'info'})
try:
hdonor = Lipinski.NumHDonors(m)
hdonors.append({'value': hdonor,
'level': 'info' if hdonor < LipinskiRuleOfFiveDecorator.MAX_H_DONORS else 'error'})
except Exception as ex:
logger.exception(ex)
hdonors.append({'value': '-', 'level': 'info'})
try:
hacceptor = Lipinski.NumHAcceptors(m)
hacceptors.append(
{'value': hacceptor,
'level': 'info' if hacceptor < LipinskiRuleOfFiveDecorator.MAX_H_DONORS else 'error'})
except Exception as ex:
logger.exception(ex)
hacceptors.append({'value': '-', 'level': 'info'})
try:
rotatable_bond = Lipinski.NumRotatableBonds(m)
rotatable_bonds.append(
{'value': rotatable_bond,
'level': 'info' if rotatable_bond < LipinskiRuleOfFiveDecorator.MAX_ROTATABLE_BONDS else 'error'})
except Exception as ex:
logger.exception(ex)
rotatable_bonds.append({'value': '-', 'level': 'info'})
try:
qed = QED.qed(m)
qeds.append({'value': round(qed, 4),
'level': 'info' if qed < LipinskiRuleOfFiveDecorator.MAX_QED else 'error'})
except Exception as ex:
logger.exception(ex)
qeds.append({'value': '-', 'level': 'info'})
df['Molecular Weight'] = mol_wt
df['LogP'] = mol_logp
df['H-Bond Donors'] = hdonors
df['H-Bond Acceptors'] = hacceptors
df['Rotatable Bonds'] = rotatable_bonds
df['QED'] = qeds
return df
| cheminformatics-master | cuchem/cuchem/decorator/lipinski.py |
from typing import Union
import cudf
import pandas
class BaseMolPropertyDecorator(object):
def decorate(self,
df: Union[cudf.DataFrame, pandas.DataFrame],
smile_cols: int = 0):
NotImplemented
from .lipinski import LipinskiRuleOfFiveDecorator as LipinskiRuleOfFiveDecorator
from .mol_structure import MolecularStructureDecorator as MolecularStructureDecorator
| cheminformatics-master | cuchem/cuchem/decorator/__init__.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parallel processing of ZINC15 data files to create a trie
import glob
import logging
import multiprocessing
import os
from pathlib import Path
import pandas as pd
from cuchem.utils.dataset import ZINC_CSV_DIR, \
ZINC_TRIE_DIR, \
generate_trie_filename, \
TRIE_FILENAME_LENGTH, \
SHORT_TRIE_FILENAME
### SETTINGS ###
LOG_PATH = os.path.join(ZINC_TRIE_DIR, 'processing.log')
FILE_LOG_PATH = os.path.join(ZINC_TRIE_DIR, 'processed_files.txt')
# Use number of processes and queue size to balance memory
# so there are just slightly more items in queue than processes
NUM_PROCESSES = (multiprocessing.cpu_count() * 2) - 1 # --> max num proceses, but needs more memory
QUEUE_SIZE = int(1e5)
def load_data(fil, trie_filename_length, short_trie_filename):
"""Load data as a pandas dataframe"""
data = pd.read_csv(fil, usecols=['smiles', 'set'])
data['filename'] = data['smiles'].map(generate_trie_filename)
data['filename'] = data['set'] + '/' + data['filename']
data.drop('set', axis=1, inplace=True)
data = data.set_index('filename').sort_index()
return data
def listener(queue, filelist, trie_filename_length, short_trie_filename):
"""
Add batches to the queue
Params:
queue: multiprocessing queue of batches
filelist: list of filenames to import
trie_filename_length: integer length of filename to use for trie
short_trie_filename: name to use for molecules shorter than minimum length
"""
chunksize = 100
logger = multiprocessing.get_logger()
data_cleanup = lambda x: (x[0], x[1]['smiles'].tolist())
for fil in filelist:
logger.info(f'Reading {fil}')
data = load_data(fil, trie_filename_length, short_trie_filename)
data_grouper = [data_cleanup(x) for x in data.groupby(level=0)]
num_groups = len(data_grouper)
data_grouper = [data_grouper[i: i + chunksize] for i in range(0, len(data_grouper), chunksize)]
num_chunks = len(data_grouper)
logger.info(f'Finished processing {fil} with {num_groups} groups into {num_chunks} chunks')
with open(FILE_LOG_PATH, 'a+') as fh:
fh.write(fil + '\n')
for chunk in data_grouper:
queue.put(chunk)
# queue.put('DONE')
return
def process_data(base_filename, smiles_list, output_dir, lock):
"""Write SMILES to files.
"""
logger = multiprocessing.get_logger()
filename = os.path.join(output_dir, base_filename)
num_entries = len(smiles_list)
if num_entries >= 100:
logger.info(f'Working on {filename} with {num_entries} entries')
chunksize = 100
smiles_list = [smiles_list[i: i + chunksize] for i in range(0, len(smiles_list), chunksize)]
lock.acquire()
with open(filename, 'a+') as fh:
for sl in smiles_list:
fh.write('\n'.join(sl) + '\n')
lock.release()
# if num_entries >= 100:
# logger.info(f'Saved {filename} with {num_entries} entries')
return
def worker(queue, lock, output_dir):
"""
Process batches of data from the queue
Params:
queue: multiprocessing queue of batches
lock: ensure only one process modifies the file at a time
"""
logger = multiprocessing.get_logger()
while True:
batch = queue.get(True)
for data in batch:
filename, smiles_list = data
process_data(filename, smiles_list, output_dir, lock)
if __name__ == '__main__':
for subdir in ['train', 'val', 'test']:
dir = os.path.join(ZINC_TRIE_DIR, subdir)
Path(dir).mkdir(parents=True, exist_ok=True)
# Setup logging that is compatible with multiprocessing
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
fmt = '%(asctime)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(fmt)
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
fh = logging.FileHandler(LOG_PATH)
fh.setFormatter(formatter)
logger.addHandler(fh)
# Gather list of filenames
filelist = sorted(glob.glob(os.path.join(ZINC_CSV_DIR, '*.csv')))
logger.info(ZINC_CSV_DIR)
logger.info(filelist)
import sys
sys.exit(0)
n_files = len(filelist)
logger.info(f'Identified {n_files} files')
# Setup worker for multiprocessing
manager = multiprocessing.Manager()
queue = manager.Queue(QUEUE_SIZE)
logger.info(f'Starting queue with maximum size of {QUEUE_SIZE}')
producer = multiprocessing.Process(target=listener,
args=(queue, filelist, TRIE_FILENAME_LENGTH, SHORT_TRIE_FILENAME))
producer.start()
# Setup listener
logger.info(f'Starting {NUM_PROCESSES} listeners')
pool = multiprocessing.Pool(NUM_PROCESSES)
lock = manager.Lock()
results = []
for id_ in range(NUM_PROCESSES):
results.append(pool.apply_async(worker, args=(queue, lock, ZINC_TRIE_DIR)))
producer.join()
pool.terminate()
logger.info(f'Finished processing {n_files} files.')
| cheminformatics-master | cuchem/cuchem/datasets/create_ZINC15_trie_multiprocessing.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pandas as pd
from cuchemcommon.data.helper.chembldata import ChEmblData
from cuchemcommon.fingerprint import calc_morgan_fingerprints
DATA_BENCHMARK_DIR = '/workspace/cuchem/cuchem/cheminformatics/data'
DEFAULT_MAX_SEQ_LEN = 512
if __name__ == '__main__':
num_samples = 20000
benchmark_df = pd.DataFrame(ChEmblData.fetch_random_samples(num_samples, DEFAULT_MAX_SEQ_LEN))
benchmark_df.rename(columns={'len': 'length'}, inplace=True)
# TODO: benchmark SMILES have not been canonicalized. Should this be done?
fp = calc_morgan_fingerprints(benchmark_df)
fp.columns = fp.columns.astype(np.int64)
fp.index = fp.index.astype(np.int64)
for col in fp.columns:
fp[col] = fp[col].astype(np.int)
# fp[col] = fp[col].astype(np.float32)
# Write results
benchmark_df.reset_index(inplace=True) # For consistency with approved drugs, only one has index reset
benchmark_df.to_csv(os.path.join(DATA_BENCHMARK_DIR, 'benchmark_ChEMBL_random_sampled_drugs.csv'))
fp.to_csv(os.path.join(DATA_BENCHMARK_DIR, 'fingerprints_ChEMBL_random_sampled_drugs.csv'))
| cheminformatics-master | cuchem/cuchem/datasets/prepare_ChEMBL_tanimoto.py |
import logging
import os
import pathlib
import cudf
logger = logging.getLogger(__name__)
class GenericCSVDataset():
def __init__(self):
self.name = None
self.index_col = None
self.index = None
self.max_len = None
self.data_path = None
self.data = None
def _load_csv(self, columns, length_columns=None, return_remaining=False):
columns = [columns] if not isinstance(columns, list) else columns
data = cudf.read_csv(self.data_path).drop_duplicates(subset=columns)
if self.index_col:
data = data.set_index(self.index_col).sort_index()
if self.index is not None:
data = data.loc[self.index]
elif self.max_len:
length_columns = [length_columns] if not isinstance(length_columns, list) else length_columns
assert len(length_columns) == len(columns)
mask = data[length_columns].max(axis=1) <= self.max_len
data = data[mask]
out_col = ['smiles1'] if len(columns) == 1 else ['smiles1', 'smiles2']
renamer = dict(zip(columns, out_col))
data.rename(columns=renamer, inplace=True)
if len(out_col) == 1:
cleaned_data = data[out_col[0]] # Series
else:
cleaned_data = data[out_col] # DataFrame
if return_remaining:
if length_columns:
remain_columns = [x for x in data.columns if (x not in out_col) & (x not in length_columns)]
else:
remain_columns = [x for x in data.columns if (x not in out_col)]
other_data = data[remain_columns]
else:
other_data = None
return cleaned_data, other_data
def load(self, columns=['canonical_smiles'], length_columns=['length']):
self.data, _ = self._load_csv(columns, length_columns)
class GenericFingerprintDataset():
def __init__(self):
self.name = None
self.index_col = None
self.data = None
self.data_path = None
def load(self, index=None):
data = cudf.read_csv(self.data_path)
if self.index_col:
data = data.set_index(self.index_col).sort_index()
if index is not None:
data = data.loc[index]
self.data = data
return
class ChEMBL_Approved_Drugs(GenericCSVDataset):
def __init__(self, index_col='molregno', max_len=None, index=None):
self.name = 'ChEMBL Approved Drugs (Phase III/IV)'
assert (max_len is None) | (index is None)
self.index_col = index_col
self.max_len = max_len
self.index = index
self.length = None
data_path = pathlib.Path(__file__).absolute()
while 'cuchem/nvidia' in data_path.as_posix(): # stop at cuchem base path
data_path = data_path.parent
self.data_path = os.path.join(data_path,
'tests',
'data',
'benchmark_approved_drugs.csv')
assert os.path.exists(self.data_path)
class ChEMBL_20K_Samples(GenericCSVDataset):
def __init__(self, index_col='molregno', max_len=None, index=None):
self.name = 'ChEMBL 20K Samples'
assert (max_len is None) | (index is None)
self.index_col = index_col
self.max_len = max_len
self.index = index
self.data = None
self.length = None
self.data_path = os.path.join(pathlib.Path(__file__).parent.parent.absolute(),
'data',
'benchmark_ChEMBL_random_sampled_drugs.csv')
assert os.path.exists(self.data_path)
class ChEMBL_20K_Fingerprints(GenericFingerprintDataset):
def __init__(self, index_col='molregno'):
self.name = 'ChEMBL 20K Fingerprints'
self.index_col = index_col
self.data = None
self.data_path = os.path.join(pathlib.Path(__file__).parent.parent.absolute(),
'data',
'fingerprints_ChEMBL_random_sampled_drugs.csv')
assert os.path.exists(self.data_path)
class ZINC15_TestSplit_20K_Samples(GenericCSVDataset):
def __init__(self, index_col='index', max_len=None, index=None):
self.name = 'ZINC15 Test Split 20K Samples'
assert (max_len is None) | (index is None)
self.index_col = index_col
self.max_len = max_len
self.index = index
self.data = None
self.length = None
self.data_path = os.path.join(pathlib.Path(__file__).parent.parent.absolute(),
'data',
'benchmark_zinc15_test.csv')
assert os.path.exists(self.data_path)
def load(self, columns=['canonical_smiles'], length_columns=['length']):
self.data, self.properties = self._load_csv(columns, length_columns, return_remaining=True)
class ZINC15_TestSplit_20K_Fingerprints(GenericFingerprintDataset):
def __init__(self, index_col='index'):
self.name = 'ZINC15 Test Split 20K Fingerprints'
self.index_col = index_col
self.data = None
self.data_path = os.path.join(pathlib.Path(__file__).parent.parent.absolute(),
'data',
'fingerprints_zinc15_test.csv')
assert os.path.exists(self.data_path) | cheminformatics-master | cuchem/cuchem/datasets/loaders.py |
cheminformatics-master | cuchem/cuchem/datasets/__init__.py |
|
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cudf
import numpy as np
import pandas as pd
from cuchemcommon.data.helper.chembldata import ChEmblData
from cuchemcommon.fingerprint import calc_morgan_fingerprints
DATA_BENCHMARK_DIR = '/workspace/cuchem/tests/data'
if __name__ == '__main__':
benchmark_df = pd.DataFrame(ChEmblData.fetch_approved_drugs())
# TODO: benchmark SMILES have not been canonicalized. Should this be done?
fp = calc_morgan_fingerprints(benchmark_df)
fp.columns = fp.columns.astype(np.int64)
fp.index = fp.index.astype(np.int64)
for col in fp.columns:
fp[col] = fp[col].astype(np.float32)
# Write results
benchmark_df.to_csv(os.path.join(DATA_BENCHMARK_DIR, 'benchmark_approved_drugs.csv'))
fp.to_csv(os.path.join(DATA_BENCHMARK_DIR, 'fingerprints_approved_drugs.csv'))
fp_hdf5 = cudf.DataFrame(fp)
fp_hdf5.to_hdf(os.path.join(DATA_BENCHMARK_DIR, 'filter_00.h5', 'fingerprints', format='table'))
| cheminformatics-master | cuchem/cuchem/datasets/prepare_ChEMBL_approved_drugs_data.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import glob
import pandas as pd
import numpy as np
import multiprocessing as mp
from cuchemcommon.fingerprint import calc_morgan_fingerprints
from cuchem.utils.dataset import ZINC_TRIE_DIR
from rdkit.Chem.Crippen import MolLogP
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit import Chem
DATA_BENCHMARK_DIR = '/workspace/cuchem/nvidia/cheminformatics/data'
NUM_PROCESSES = (mp.cpu_count() * 2) - 1 # --> max num proceses, but needs more memory
NUM_DATA = 20000
def calc_properties(smiles_list):
logp_vals, mw_vals = [], []
for smiles in smiles_list:
mol = Chem.MolFromSmiles(smiles)
logp_vals.append(MolLogP(mol, True))
mw_vals.append(ExactMolWt(mol))
return pd.DataFrame({'logp': logp_vals, 'mw': mw_vals})
if __name__ == '__main__':
# Read data
zinc_test_filelist = glob.glob(os.path.join(ZINC_TRIE_DIR, 'test', '*.txt'))
benchmark_df = list()
for fil in zinc_test_filelist:
benchmark_df.append(pd.read_csv(fil, names=['canonical_smiles']))
benchmark_df = pd.concat(benchmark_df, axis=0).reset_index(drop=True)
benchmark_df = benchmark_df.sample(n=NUM_DATA, replace=False, random_state=0).reset_index(drop=True)
assert NUM_DATA <= len(benchmark_df)
benchmark_df['length'] = benchmark_df['canonical_smiles'].map(len)
# Calculate properties -- parallelized
pool = mp.Pool(processes=NUM_PROCESSES)
chunks = benchmark_df['canonical_smiles'].to_numpy()
chunks = np.array_split(chunks, NUM_PROCESSES)
outputs = pool.map(calc_properties, chunks)
outputs = pd.concat(outputs, axis=0).reset_index(drop=True)
benchmark_df = pd.concat([benchmark_df, outputs], axis=1)
benchmark_df.index.name = 'index'
fp = calc_morgan_fingerprints(benchmark_df[['canonical_smiles']])
fp.columns = fp.columns.astype(np.int64)
fp.index = fp.index.astype(np.int64)
fp.index.name = 'index'
for col in fp.columns:
fp[col] = fp[col].astype(np.float32)
# Write results
benchmark_df.reset_index().to_csv(os.path.join(DATA_BENCHMARK_DIR, 'benchmark_zinc15_test.csv'), index=False)
fp.reset_index().to_csv(os.path.join(DATA_BENCHMARK_DIR, 'fingerprints_zinc15_test.csv'), index=False)
# fp_hdf5 = cudf.DataFrame(fp)
# fp_hdf5.to_hdf(os.path.join(DATA_BENCHMARK_DIR, 'filter_00.h5', 'fingerprints', format='table'))
| cheminformatics-master | cuchem/cuchem/datasets/create_ZINC15_test_split.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
from math import isnan
import cudf
import cupy
import numpy
from cuchemcommon.data.helper.chembldata import BATCH_SIZE
from numba import cuda
from sklearn.metrics import silhouette_score
logger = logging.getLogger(__name__)
def batched_silhouette_scores(embeddings, clusters, batch_size=BATCH_SIZE):
"""Calculate silhouette score in batches on the CPU. Compatible with data on GPU or CPU
Args:
embeddings (cudf.DataFrame or cupy.ndarray): input features to clustering
clusters (cudf.DataFrame or cupy.ndarray): cluster values for each data point
batch_size (int, optional): Size for batching.
Returns:
float: mean silhouette score from batches
"""
# Function to calculate batched results
def _silhouette_scores(input_data):
embeddings, clusters = input_data
return silhouette_score(cupy.asnumpy(embeddings), cupy.asnumpy(clusters))
if hasattr(embeddings, 'values'):
embeddings = embeddings.values
embeddings = cupy.asarray(embeddings)
if hasattr(clusters, 'values'):
clusters = clusters.values
clusters = cupy.asarray(clusters)
n_data = len(embeddings)
msg = 'Calculating silhouette score on {} molecules'.format(n_data)
if batch_size < n_data:
msg += ' with batch size of {}'.format(batch_size)
logger.info(msg + ' ...')
n_chunks = int(math.ceil(n_data / batch_size))
embeddings_chunked = cupy.array_split(embeddings, n_chunks)
clusters_chunked = cupy.array_split(clusters, n_chunks)
# Calculate scores on batches and return the average
scores = list(map(_silhouette_scores, zip(embeddings_chunked, clusters_chunked)))
return numpy.nanmean(numpy.array(scores))
def rankdata(data, method='average', na_option='keep', axis=1, is_symmetric=False):
"""Rank observations for a series of samples, with tie handling
NOTE: due to a bug with cudf ranking, data will be transposed if row-wise ranking is
selected
Parameters
----------
data : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. Default is 1 -- samples in rows,
observations in columns
is_symmetric : {False, bool}, optional
Will be used to avoid additional data transpose steps if axis = 1
Returns
-------
ranks : cupy ndarray
An array of size equal to the size of `a`, containing rank
scores.
See also scipy.stats.rankdata, for which this function is a replacement
"""
dtype = cupy.result_type(data.dtype, cupy.float64)
data = cupy.asarray(data, dtype=dtype)
if is_symmetric:
assert data.ndim == 2
assert data.shape[0] == data.shape[1]
if data.ndim < 2:
data = data[:, None]
elif (data.ndim == 2) & (axis == 1) & (not is_symmetric):
data = data.T
ranks = cudf.DataFrame(data).rank(axis=0, method=method, na_option=na_option)
ranks = ranks.values
if axis == 1:
ranks = ranks.T
return ranks
@cuda.jit()
def _get_kth_unique_kernel(data, kth_values, k, axis):
"""Numba kernel to get the kth unique rank from a sorted array"""
i = cuda.grid(1)
if axis == 1:
vector = data[i, :]
else:
vector = data[:, i]
pos = 0
prev_val = cupy.NaN
for val in vector:
if not isnan(val):
if val != prev_val:
prev_val = val
pos += 1
if pos == k:
break
kth_values[i] = prev_val
def get_kth_unique_value(data, k, axis=1):
"""Find the kth value along an axis of a matrix on the GPU
Parameters
----------
data : array_like
The array of values to be ranked.
k : {int} kth unique value to be found
axis : {None, int}, optional
Axis along which to perform the ranking. Default is 1 -- samples in rows,
observations in columns
Returns
-------
kth_values : cupy ndarray
An array of kth values.
"""
# Coerce data into array -- make a copy since it needs to be sorted
# TODO -- should the sort be done in Numba kernel (and how to do it)?
dtype = cupy.result_type(data, cupy.float64)
data_id = id(data)
data = cupy.ascontiguousarray(data, dtype=dtype)
if data_id == id(data): # Ensure sort is being done on a copy
data = data.copy()
assert data.ndim <= 2
if data.ndim < 2:
if axis == 0:
data = data[:, None]
else:
data = data[None, :]
if axis == 0:
n_obs, n_samples = data.shape
else:
n_samples, n_obs = data.shape
data.sort(axis=axis)
kth_values = cupy.zeros(n_samples, dtype=data.dtype)
_get_kth_unique_kernel.forall(n_samples, 1)(data, kth_values, k, axis)
if axis == 0:
kth_values = kth_values[None, :]
else:
kth_values = kth_values[:, None]
return kth_values
def corr_pairwise(x, y, return_pearson=False):
"""Covariance and Pearson product-moment correlation coefficients on the GPU for paired data with tolerance of NaNs.
Curently only supports rows as samples and columns as observations.
Parameters
----------
x : array_like
The baseline array of values.
y : array_like
The comparison array of values.
Returns
-------
corr : cupy ndarray
Array of correlation values
"""
def _cov_pairwise(x1, x2, factor):
return cupy.nansum(x1 * x2, axis=1, keepdims=True) * cupy.true_divide(1, factor)
# Coerce arrays into 2D format and set dtype
dtype = cupy.result_type(x, y, cupy.float64)
x = cupy.asarray(x, dtype=dtype)
y = cupy.asarray(y, dtype=dtype)
assert x.shape == y.shape
if x.ndim < 2:
x = x[None, :]
y = y[None, :]
n_samples, n_obs = x.shape
# Calculate degrees of freedom for each sample pair
ddof = 1
nan_count = (cupy.isnan(x) | cupy.isnan(y)).sum(axis=1, keepdims=True)
fact = n_obs - nan_count - ddof
# Mean normalize
x -= cupy.nanmean(x, axis=1, keepdims=True)
y -= cupy.nanmean(y, axis=1, keepdims=True)
# Calculate covariance matrix
corr = _cov_pairwise(x, y, fact)
if return_pearson:
x_corr = _cov_pairwise(x, x, fact)
y_corr = _cov_pairwise(y, y, fact)
auto_corr = cupy.sqrt(x_corr) * cupy.sqrt(y_corr)
corr = corr / auto_corr
corr = cupy.clip(corr.real, -1, 1, out=corr.real)
return corr
return corr.squeeze()
def spearmanr(x, y, axis=1, top_k=None):
"""GPU implementation of Spearman R correlation coefficient for paired data with NaN support
Parameters
----------
x : array_like
The baseline array of values.
y : array_like
The comparison array of values.
axis : {None, int}, optional
Axis along which to perform the ranking. Default is 1 -- samples in rows,
observations in columns
top_k : {int} kth unique value to be found
Returns
-------
spearmanr_array : cupy ndarray
Array of spearmanr rank correlation values
"""
if hasattr(x, 'values'):
x = x.values
x = cupy.array(x, copy=True)
if hasattr(y, 'values'):
y = y.values
y = cupy.array(y, copy=True)
assert x.ndim <= 2
assert x.shape == y.shape
if x.ndim < 2:
if axis == 0:
x = x[:, None]
y = y[:, None]
else:
x = x[None, :]
y = y[None, :]
if axis == 0:
n_obs, n_samples = x.shape
else:
n_samples, n_obs = x.shape
n_obs -= 1
assert n_obs > 2
msg = 'Calculating Spearman correlation coefficient on {} molecules'.format(n_samples)
if top_k is not None:
msg += ' with selection of top {} molecules'.format(top_k)
logger.info(msg + ' ...')
# Force diagonal to be last in ranking so it can be ignored
cupy.fill_diagonal(x, cupy.NaN)
cupy.fill_diagonal(y, cupy.NaN)
ranks_x = rankdata(x, axis=axis, method='average', na_option='keep')
ranks_y = rankdata(y, axis=axis, method='average', na_option='keep')
# cudf does not currently preserve the NaNs, even with na_option='keep' so add them back
cupy.fill_diagonal(ranks_x, cupy.NaN)
cupy.fill_diagonal(ranks_y, cupy.NaN)
# Filter out values above top k
if top_k is not None:
if top_k <= n_obs:
top_k_values = get_kth_unique_value(ranks_x, top_k, axis=axis)
mask = ranks_x > top_k_values
ranks_x[mask] = cupy.NaN
ranks_y[mask] = cupy.NaN
spearmanr_array = corr_pairwise(ranks_x, ranks_y, return_pearson=True).squeeze()
return spearmanr_array
| cheminformatics-master | cuchem/cuchem/utils/metrics.py |
import logging
import sys
import traceback
from functools import wraps
# import dask
# import dask_cudf
import dash
import numpy as np
logger = logging.getLogger(__name__)
# DELAYED_DF_TYPES = Union[dask.dataframe.core.DataFrame, dask_cudf.core.DataFrame]
def generate_colors(num_colors):
"""
Generates evenly disributed colors
"""
a = ((np.random.random(size=num_colors) * 255))
b = ((np.random.random(size=num_colors) * 255))
return [
"#%02x%02x%02x" % (int(r), int(g), 125) for r, g in zip(a, b)
]
def report_ui_error(num_returns):
"""
Excepts all exceptions from the wrapped function and manages the error msg
for UI. The error msg is always added as the last return value. All other
return values are set to dash.no_update. The function decorator needs to
pass number of return values.
"""
def _report_ui_error(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except dash.exceptions.PreventUpdate as e:
raise e
except Exception as e:
traceback.print_exception(*sys.exc_info())
ret_value = [dash.no_update for i in range(num_returns - 1)]
ret_value.append(str(e))
return ret_value
return func_wrapper
return _report_ui_error
| cheminformatics-master | cuchem/cuchem/utils/__init__.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import cupy
from numba import cuda
@cuda.jit
def compute_norms(data, norms):
"""Compute norms
Args:
data (matrix): matrix with data and samples in rows
norms (matrix): matrix for norms
"""
i = cuda.grid(1)
norms[i] = len(data[i])
for j in range(len(data[i])):
if data[i][j] != 0:
value = j + 1
data[i][j] = value
norms[i] = norms[i] + (value ** 2)
if norms[i] != 0:
norms[i] = math.sqrt(norms[i])
@cuda.jit
def compute_tanimoto_similarity_matrix(data, norms, dist_array):
"""Numba kernel to calculate tanimoto similarity according to the wikipedia definition
Args:
data (matrix): data with samples in rows
norms (matrix): matrix with samples in rows
dist_array (matrix): square matrix to hold pairwise distance
"""
x = cuda.grid(1)
rows = len(data)
i = x // rows
j = x % rows
if i == j:
dist_array[i][j] = 1.0
return
a = data[i]
b = data[j]
prod = 0
for k in range(len(a)):
prod = prod + (a[k] * b[k])
a_norm = norms[i]
b_norm = norms[j]
dist_array[i][j] = (prod / ((a_norm ** 2 + b_norm ** 2) - prod))
@cuda.jit
def compute_rdkit_tanimoto_similarity_matrix(data, dist_array):
"""Numba kernel to calculate tanimoto similarity according to the RDKit definition
Args:
data (matrix): data with samples in rows
dist_array (matrix): square matrix to hold pairwise distance
"""
x = cuda.grid(1)
rows = len(data)
i = x // rows
j = x % rows
if i == j:
dist_array[i][j] = 1.0
return
a = data[i]
b = data[j]
intersections = 0
total = 0
for k in range(len(a)):
if a[k] and b[k]:
intersections += 1
total += 2
elif a[k] or b[k]:
total += 1
dist_array[i][j] = intersections / float(total - intersections)
def tanimoto_calculate(fp, calc_distance=False):
"""Calculate tanimoto similarity or distance
Args:
fp (cupy array or cudf dataframe): fingerprints with samples in rows
calc_distance (bool, optional): Calculate distance metric. Defaults to False.
Returns:
array: pairwise tanimoto distance
"""
dist_array = cupy.zeros((fp.shape[0], fp.shape[0]), cupy.float32)
compute_rdkit_tanimoto_similarity_matrix.forall(fp.shape[0] * fp.shape[0], 1)(fp, dist_array)
if calc_distance:
dist_array = 1.0 - dist_array
return dist_array
| cheminformatics-master | cuchem/cuchem/utils/distance.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# ZINC dataset parameters
ZINC_CSV_DIR = '/data/zinc_csv/train'
# ZINC trie parameters
ZINC_TRIE_DIR = '/data/zinc_trie'
TRIE_FILENAME_LENGTH = 10
SHORT_TRIE_FILENAME = 'SHORT_SMILES'
TRIE_FILENAME_REGEX = re.compile(
r'[/\\]+') # re.compile(r'[^\w]+') # Alternative to strip all non-alphabet/non-numerical characters
def generate_trie_filename(smiles):
"""Generate appropriate filename for the trie"""
# TODO smiles string should be cleaned before testing length -- will require regeneration of trie index
filename_extractor = lambda x: re.sub(TRIE_FILENAME_REGEX, '', x)[:TRIE_FILENAME_LENGTH]
if len(smiles) < TRIE_FILENAME_LENGTH:
filename = SHORT_TRIE_FILENAME
else:
filename = filename_extractor(smiles)
return filename + '.txt'
| cheminformatics-master | cuchem/cuchem/utils/dataset.py |
import logging
import cupy
from dask.distributed import Client, LocalCluster
from dask_cuda import initialize, LocalCUDACluster
from dask_cuda.local_cuda_cluster import cuda_visible_devices
from dask_cuda.utils import get_n_gpus
logger = logging.getLogger(__name__)
def initialize_cluster(use_gpu=True, n_cpu=None, n_gpu=-1):
enable_tcp_over_ucx = True
enable_nvlink = True
enable_infiniband = True
logger.info('Starting dash cluster...')
if use_gpu:
initialize.initialize(create_cuda_context=True,
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_nvlink=enable_nvlink,
enable_infiniband=enable_infiniband)
if n_gpu == -1:
n_gpu = get_n_gpus()
device_list = cuda_visible_devices(1, range(n_gpu)).split(',')
CUDA_VISIBLE_DEVICES = []
for device in device_list:
try:
CUDA_VISIBLE_DEVICES.append(int(device))
except ValueError as vex:
logger.warn(vex)
logger.info('Using GPUs {} ...'.format(CUDA_VISIBLE_DEVICES))
cluster = LocalCUDACluster(protocol="ucx",
dashboard_address=':8787',
CUDA_VISIBLE_DEVICES=CUDA_VISIBLE_DEVICES,
enable_tcp_over_ucx=enable_tcp_over_ucx,
enable_nvlink=enable_nvlink,
enable_infiniband=enable_infiniband)
else:
logger.info('Using {} CPUs ...'.format(n_cpu))
cluster = LocalCluster(dashboard_address=':8787',
n_workers=n_cpu,
threads_per_worker=4)
client = Client(cluster)
client.run(cupy.cuda.set_allocator)
return client
| cheminformatics-master | cuchem/cuchem/utils/dask.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import sys
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
# defaults to categorize steps for sorting
STEP_TYPE_DICT = {'dim_reduction': ['pca', 'svd'],
'clustering': ['kmeans'],
'embedding': ['umap'],
'workflow': ['workflow'],
'stats': ['total', 'acceleration']}
STEP_TYPE_CAT = pd.CategoricalDtype(
['n_molecules', 'benchmark_type', 'n_workers', 'dim_reduction', 'clustering', 'embedding', 'workflow', 'stats'],
ordered=True)
NV_PALETTE = ['#8F231C', '#3D8366', '#541E7D', '#1B36B6', '#7B1D56', '#86B637']
def parse_args():
parser = argparse.ArgumentParser(description='Analyze and plot benchmark data')
parser.add_argument('-b', '--benchmark_file',
dest='benchmark_file',
type=str,
default='/workspace/benchmark/benchmark.csv',
help='Path to the CSV file containing benchmark results')
parser.add_argument('-o', '--output_path',
dest='output_path',
type=str,
default='/workspace/benchmark/benchmark.png',
help='Output directory for plot')
args = parser.parse_args(sys.argv[1:])
return args
def prepare_benchmark_df(benchmark_file, step_type_dict=STEP_TYPE_DICT, step_type_cat=STEP_TYPE_CAT):
"""Read and prepare the benchmark data as a dataframe"""
logger.info('Processing %s...', benchmark_file)
# Load and format data
with open(benchmark_file, 'r') as fh:
machine_config = pd.Series({'Machine Config': fh.readlines()[0].replace('#', '').strip()})
bench_df = pd.read_csv(benchmark_file, infer_datetime_format=True, comment='#').rename(
columns={'time(hh:mm:ss.ms)': 'time'})
bench_df['date'] = pd.to_datetime(bench_df['date'])
bench_df['time'] = pd.to_timedelta(
bench_df['time']).map(lambda x: x.total_seconds())
bench_df['benchmark_type'] = pd.Categorical(
bench_df['benchmark_type'].str.upper())
# Calculate total later by different method
bench_df = bench_df[bench_df['step'] != 'total']
# assign step type as a category to control display order
bench_df['step_type'] = ''
for key in step_type_dict:
bench_df.loc[bench_df['step'].str.lower().isin(
step_type_dict[key]), 'step_type'] = key
bench_df['step_type'] = bench_df['step_type'].astype(step_type_cat)
# convert to a pivot table with columns containing consecutive steps
bench_time_df = (bench_df
.drop(['metric_name', 'metric_value'], axis=1)
.pivot(index=['n_molecules', 'benchmark_type', 'n_workers'],
columns=['step_type', 'step'],
values='time'))
bench_time_df[('stats', 'total')] = bench_time_df.sum(axis=1)
# Create dataframe to normalize totals to max workers for CPU
# Requires manipulation of pivot table index formats
bench_time_df_norm = bench_time_df.copy()
bench_time_df_norm.columns = pd.MultiIndex.from_tuples(
bench_time_df.columns)
bench_time_df_norm.reset_index(inplace=True)
mask_indexes = bench_time_df_norm.groupby(['n_molecules', 'benchmark_type'])[
'n_workers'].transform(lambda x: x == x.max())
norm_df = bench_time_df_norm[mask_indexes].groupby(
['n_molecules', 'benchmark_type', 'n_workers']).mean()[('stats', 'total')].dropna()
cpu_only_mask = norm_df.index.get_level_values(
level='benchmark_type') == 'CPU'
norm_df = norm_df[cpu_only_mask].reset_index(
level=['n_workers', 'benchmark_type'], drop=True) # Normalize by n_molecules only
# Do the normalization
bench_time_df[('stats', 'acceleration')] = bench_time_df[(
'stats', 'total')].div(norm_df).pow(-1)
# Standardize columns for output
bench_time_df_output = bench_time_df.copy().round(2)
columns = bench_time_df_output.columns.get_level_values('step').to_list()
bench_time_df_output.columns = pd.Categorical(
columns, categories=['n_molecules', 'benchmark_type', 'n_workers'] + columns, ordered=True)
basename = os.path.splitext(benchmark_file)[0]
with pd.ExcelWriter(basename + '.xlsx') as writer:
bench_time_df_output.to_excel(writer, sheet_name='Benchmark')
machine_config.to_excel(writer, sheet_name='Machine Config')
with open(basename + '.md', 'w') as fh:
filelines = f'# {machine_config.values[0]}\n\n'
filelines += bench_time_df_output.reset_index().to_markdown(index=False)
fh.write(filelines)
return bench_time_df, machine_config
def prepare_acceleration_stacked_plot(df, machine_config, output_path, palette=NV_PALETTE):
"""Prepare single plot of acceleration as stacked bars (by molecule) and hardware workers"""
grouper = df['stats'].groupby(level='n_molecules')
n_groups = len(grouper)
n_rows = min(2, n_groups)
n_cols = int(n_groups / n_rows + 0.5)
if (n_rows, n_cols) == (2, 1):
n_rows, n_cols = n_cols, n_rows
fig, axList = plt.subplots(nrows=n_rows, ncols=n_cols)
fig.set_size_inches(6 * n_cols, 6 * n_rows)
if n_groups == 1:
axList = [axList]
else:
axList = axList.flatten()
df_plot = df[('stats', 'total')].reset_index(
level='n_molecules').pivot(columns='n_molecules')
df_plot.columns = df_plot.columns.get_level_values(level='n_molecules')
df_plot = df_plot.T
bar_width = 1.0
for ax, (n_molecules, dat) in zip(axList, df_plot.iterrows()):
dat.plot(kind='bar', ax=ax, color=palette, width=bar_width)
bars = [rect for rect in ax.get_children() if isinstance(rect, matplotlib.patches.Rectangle)]
indexes = [tuple([n_molecules] + list(x)) for x in dat.index.to_list()]
# Assemble index and label bars
for bar, index in zip(bars, indexes):
total = df.loc[index, ('stats', 'total')]
accel = df.loc[index, ('stats', 'acceleration')]
label = '{:.0f} s'.format(total)
if (not np.isnan(accel)) & (index[1] == 'GPU'):
label += '\n{:.0f}X'.format(accel)
ypos = bar.get_height()
xpos = bar.get_x() + (bar.get_width() / 2.0)
ax.text(xpos, ypos, label, horizontalalignment='center',
verticalalignment='bottom')
xticklabels = [f'{x[1]} CPU cores' if x[0] == 'CPU' else f'{x[1]} GPU(s)' for x in dat.index.to_list()]
ax.set_xticklabels(xticklabels, rotation=25)
if n_molecules == -1:
n_molecules = 'ALL'
ax.set(title=f'{n_molecules:} Molecules', xlabel='')
if ax.is_first_col():
ax.set(ylabel='Compute Time (s)\nfor RAPIDS / Sklearn Workflow')
title = f'Cheminformatics Visualization Benchmark\n{machine_config.values[0]}\n'
fig.suptitle(title)
plt.tight_layout()
fig.savefig(output_path, dpi=300, transparent=False)
return
if __name__ == '__main__':
args = parse_args()
# Read and prepare the dataframe then plot
bench_df, machine_config = prepare_benchmark_df(benchmark_file=args.benchmark_file, step_type_dict=STEP_TYPE_DICT,
step_type_cat=STEP_TYPE_CAT)
prepare_acceleration_stacked_plot(bench_df, machine_config, output_path=args.output_path)
| cheminformatics-master | cuchem/cuchem/utils/plot_benchmark_results.py |
import logging
import os
import shutil
from subprocess import run
from cuchemcommon.context import Context
CDDD_MODEL_SCRIPT = 'https://raw.githubusercontent.com/jrwnter/cddd/master/download_default_model.sh'
logger = logging.getLogger(__name__)
def download_cddd_models():
"""
Downloads CDDD model
"""
context = Context()
target_dir = context.get_config('data_mount_path', default='/data')
target_dir = os.path.join(target_dir, 'mounts', 'cddd')
if os.path.exists(os.path.join(target_dir, 'default_model', 'hparams.json')):
logger.warning('Directory already exists. To re-download please delete %s', target_dir)
return os.path.join(target_dir, 'default_model')
else:
shutil.rmtree(os.path.join(target_dir, 'default_model'), ignore_errors=True)
download_script = '/opt/cddd/download_default_model.sh'
if not os.path.exists(download_script):
download_script = '/tmp/download_default_model.sh'
run(['bash', '-c',
'wget --quiet -O %s %s && chmod +x %s' % (download_script, CDDD_MODEL_SCRIPT, download_script)])
run(['bash', '-c',
'mkdir -p %s && cd %s; %s' % (target_dir, target_dir, download_script)],
check=True)
return os.path.join(target_dir, 'default_model')
| cheminformatics-master | cuchem/cuchem/utils/data_peddler.py |
# Copyright 2020 NVIDIA Corporation
# SPDX-License-Identifier: Apache-2.0
import base64
import json
import logging
from io import StringIO
from pydoc import locate
import cupy
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from cuchemcommon.data.helper.chembldata import ChEmblData, IMP_PROPS
from cuchemcommon.utils.singleton import Singleton
from dash.dependencies import Input, Output, State, ALL
from flask import Response
from cuchem.decorator import LipinskiRuleOfFiveDecorator
from cuchem.decorator import MolecularStructureDecorator
from cuchem.utils import generate_colors, report_ui_error
from rdkit import Chem
from rdkit.Chem import Draw, PandasTools
logger = logging.getLogger(__name__)
main_fig_height = 700
CHEMBL_DB = '/data/db/chembl_27.db'
PAGE_SIZE = 10
DOT_SIZE = 5
LEVEL_TO_STYLE = {
'info': {'color': 'black'},
'warning': {'color': 'orange'},
'error': {'color': 'red'}
}
PROP_DISP_NAME = {
'chembl_id': 'ChEMBL Id',
'mw_freebase': 'Molecular Weight (Free Base)',
'alogp': 'AlogP',
'hba': 'H-Bond Acceptors',
'hbd': 'H-Bond Donors',
'psa': 'Polar Surface Area',
'rtb': 'Rotatable Area',
'ro3_pass': 'Rule of 3 Passes',
'num_ro5_violations': 'Lipinski Ro5 Violation',
'cx_most_apka': 'Acidic pKa (ChemAxon)',
'cx_most_bpka': 'Basic pKa (ChemAxon)',
'cx_logp': 'logP (ChemAxon)',
'cx_logd': 'LogD pKa (ChemAxon)',
'molecular_species': 'Molecular Species',
'full_mwt': 'MW (Full)',
'aromatic_rings': 'Aromatic Rings',
'heavy_atoms': 'Heavy Atoms',
'qed_weighted': 'QED (Weighted)',
'mw_monoisotopic': 'MW (Mono)',
'full_molformula': 'Full Formula',
'hba_lipinski': 'H-Bond Acceptors (Lipinski)',
'hbd_lipinski': 'H-Bond Donors (Lipinski)',
'num_lipinski_ro5_violations': 'Lipinski Ro5 Violations',
'standard_inchi': 'Standard InChi',
'standard_inchi_key': 'Standard InChi Key'
}
app = dash.Dash(__name__, external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.css', dbc.themes.BOOTSTRAP])
@app.server.route('/cheminfo/downloadSDF')
def download_sdf():
logger.info('Exporting generated data...')
vis = ChemVisualization()
output = StringIO()
valid_idx = []
col_list = ['SMILES', 'Molecular Weight', 'LogP', 'H-Bond Donors', 'H-Bond Acceptors', 'Rotatable Bonds']
for row, data in vis.genreated_df.iterrows():
mol = Chem.MolFromSmiles(data['SMILES'])
if (mol is not None):
valid_idx.append(row)
valid_df = vis.genreated_df.iloc[valid_idx]
valid_df = valid_df[col_list]
PandasTools.AddMoleculeColumnToFrame(valid_df, 'SMILES')
PandasTools.WriteSDF(valid_df, output, properties=list(valid_df.columns))
output.seek(0)
return Response(
output.getvalue(),
mimetype="text/application",
headers={"Content-disposition":
"attachment; filename=download.sdf"})
class ChemVisualization(metaclass=Singleton):
def __init__(self, cluster_wf):
self.app = app
self.cluster_wf = cluster_wf
self.n_clusters = cluster_wf.n_clusters
self.chem_data = ChEmblData()
self.genreated_df = None
self.cluster_wf_cls = 'cuchem.wf.cluster.gpukmeansumap.GpuKmeansUmapHybrid'
self.generative_wf_cls = 'cuchem.wf.generative.MegatronMolBART'
# Store colors to avoid plots changes colors on events such as
# molecule selection, etc.
self.cluster_colors = generate_colors(self.n_clusters)
# Construct the UI
self.app.layout = self.constuct_layout()
# Register callbacks for selection inside main figure
self.app.callback(
[Output('selected_clusters', 'value'),
Output('selected_point_cnt', 'children')],
[Input('main-figure', 'clickData'),
Input('main-figure', 'selectedData'),
Input('bt_recluster_clusters', 'n_clicks'),
Input('bt_recluster_points', 'n_clicks'),
Input('northstar_cluster', 'children')],
[State("selected_clusters", "value")])(self.handle_data_selection)
# Register callbacks for buttons for reclustering selected data
self.app.callback(
[Output('main-figure', 'figure'),
Output('northstar_cluster', 'children'),
Output('refresh_moi_prop_table', 'children'),
Output('recluster_error', 'children')],
[Input('bt_recluster_clusters', 'n_clicks'),
Input('bt_recluster_points', 'n_clicks'),
Input('bt_north_star', 'n_clicks'),
Input('sl_prop_gradient', 'value'),
Input('sl_nclusters', 'value'),
Input('refresh_main_fig', 'children')],
[State("selected_clusters", "value"),
State("main-figure", "selectedData"),
State('north_star', 'value'),
State('refresh_moi_prop_table', 'children')])(self.handle_re_cluster)
# Register callbacks for selection inside main figure to update module details
self.app.callback(
[Output('tb_selected_molecules', 'children'),
Output('sl_mol_props', 'options'),
Output('current_page', 'children'),
Output('total_page', 'children'),
Output('show_selected_mol', 'children'),
Output('mol_selection_error', 'children')],
[Input('main-figure', 'selectedData'),
Input('sl_mol_props', 'value'),
Input('bt_page_prev', 'n_clicks'),
Input('bt_page_next', 'n_clicks'),
Input('refresh_moi_prop_table', 'children')],
[State('north_star', 'value'),
State('current_page', 'children'),
State('show_selected_mol', 'children'),
State('sl_prop_gradient', 'value')])(self.handle_molecule_selection)
self.app.callback(
Output("refresh_main_fig", "children"),
[Input("bt_reset", "n_clicks"),
Input("bt_apply_wf", "n_clicks")],
[State("refresh_main_fig", "children"),
State("sl_wf", "value")])(self.handle_reset)
self.app.callback(
Output('north_star', 'value'),
Input({'role': 'bt_star_candidate', 'chemblId': ALL, 'molregno': ALL}, 'n_clicks'),
State('north_star', 'value'))(self.handle_mark_north_star)
self.app.callback(
[Output('error_msg', 'children'),
Output('md_error', 'is_open')],
[Input('recluster_error', 'children'),
Input('interpolation_error', 'children'),
Input('bt_close_err', 'n_clicks')])(self.handle_error)
self.app.callback(
Output('genration_candidates', 'children'),
[Input({'role': 'bt_add_candidate', 'chemblId': ALL, 'molregno': ALL}, 'n_clicks'),
Input('bt_reset_candidates', 'n_clicks'), ],
State('genration_candidates', 'children'))(self.handle_add_candidate)
self.app.callback(
Output('ckl_candidate_mol_id', 'options'),
Input('genration_candidates', 'children'))(self.handle_construct_candidates)
self.app.callback(
[Output('ckl_candidate_mol_id', 'value'),
Output('mk_selection_msg', 'children')],
[Input('ckl_candidate_mol_id', 'value'),
Input('rd_generation_type', 'value')])(self.handle_ckl_selection)
self.app.callback(
[Output('table_generated_molecules', 'children'),
Output('show_generated_mol', 'children'),
Output('msg_generated_molecules', 'children'),
Output('interpolation_error', 'children')],
[Input("bt_generate", "n_clicks"), ],
[State('sl_generative_wf', 'value'),
State('ckl_candidate_mol_id', 'value'),
State('n2generate', 'value'),
State('scaled_radius', 'value'),
State('rd_generation_type', 'value'),
State('show_generated_mol', 'children')])(self.handle_generation)
self.app.callback(
[Output('section_generated_molecules', 'style'),
Output('section_selected_molecules', 'style'), ],
[Input('show_generated_mol', 'children'),
Input('show_selected_mol', 'children')])(self.handle_property_tables)
def handle_add_candidate(self, bt_add_candidate,
bt_reset_candidates,
genration_candidates):
comp_id, event_type = self._fetch_event_data()
if comp_id == 'bt_reset_candidates' and event_type == 'n_clicks':
return ''
if event_type != 'n_clicks' or dash.callback_context.triggered[0]['value'] == 0:
raise dash.exceptions.PreventUpdate
selected_candidates = []
if genration_candidates:
selected_candidates = genration_candidates.split(",")
comp_detail = json.loads(comp_id)
selected_chembl_id = comp_detail['chemblId']
if selected_chembl_id not in selected_candidates:
selected_candidates.append(selected_chembl_id)
return ','.join(selected_candidates)
def _fetch_event_data(self):
if not dash.callback_context.triggered:
raise dash.exceptions.PreventUpdate
prop_id = dash.callback_context.triggered[0]['prop_id']
split_at = prop_id.rindex('.')
return [prop_id[:split_at], prop_id[split_at + 1:]]
def handle_property_tables(self, show_generated_mol, show_selected_mol):
comp_id, event_type = self._fetch_event_data()
if comp_id == 'show_selected_mol' and event_type == 'children':
return {'display': 'none'}, {'display': 'block', 'width': '100%'}
elif comp_id == 'show_generated_mol' and event_type == 'children':
return {'display': 'block', 'width': '100%'}, {'display': 'none'}
return dash.no_update, dash.no_update
@report_ui_error(4)
def handle_generation(self, bt_generate,
sl_generative_wf, ckl_candidate_mol_id,
n2generate, scaled_radius, rd_generation_type, show_generated_mol):
comp_id, event_type = self._fetch_event_data()
chemble_ids = []
if comp_id == 'bt_generate' and event_type == 'n_clicks':
chemble_ids = ckl_candidate_mol_id
else:
return dash.no_update, dash.no_update
self.generative_wf_cls = sl_generative_wf
wf_class = locate(self.generative_wf_cls)
generative_wf = wf_class()
n2generate = int(n2generate)
scaled_radius = float(scaled_radius)
if rd_generation_type == 'SAMPLE':
if chemble_ids == None or len(chemble_ids) == 0:
raise ValueError('Please select at-least one molecule for Sampling.')
self.genreated_df = generative_wf.find_similars_smiles_by_id(chemble_ids,
num_requested=n2generate,
scaled_radius=scaled_radius,
force_unique=True)
else:
if chemble_ids == None or len(chemble_ids) < 2:
raise ValueError('Please select at-least two molecules for Interpolation.')
self.genreated_df = generative_wf.interpolate_by_id(chemble_ids,
num_points=n2generate,
scaled_radius=scaled_radius,
force_unique=True)
if show_generated_mol is None:
show_generated_mol = 0
show_generated_mol += 1
# Add other useful attributes to be added for rendering
self.genreated_df = MolecularStructureDecorator().decorate(self.genreated_df)
self.genreated_df = LipinskiRuleOfFiveDecorator().decorate(self.genreated_df)
# Create Table header
table_headers = []
columns = self.genreated_df.columns.to_list()
ignore_columns = ['embeddings', 'embeddings_dim']
for column in columns:
if column in ignore_columns:
continue
table_headers.append(html.Th(column, style={'fontSize': '150%', 'text-align': 'center'}))
prop_recs = [html.Tr(table_headers, style={'background': 'lightgray'})]
invalid_mol_cnt = 0
for row_idx in range(self.genreated_df.shape[0]):
td = []
try:
col_pos = columns.index('Chemical Structure')
col_data = self.genreated_df.iat[row_idx, col_pos]
if 'value' in col_data and col_data['value'] == MolecularStructureDecorator.ERROR_VALUE:
invalid_mol_cnt += 1
continue
except ValueError:
pass
for col_id in range(len(columns)):
col_data = self.genreated_df.iat[row_idx, col_id]
if columns[col_id] in ignore_columns:
continue
col_level = 'info'
if isinstance(col_data, dict):
col_value = col_data['value']
if 'level' in col_data:
col_level = col_data['level']
else:
col_value = col_data
if isinstance(col_value, str) and col_value.startswith('data:image/png;base64,'):
td.append(html.Td(html.Img(src=col_value)))
else:
td.append(html.Td(str(col_value),
style={'maxWidth': '300px',
'wordWrap': 'break-word',
'text-align': 'center',
'color': LEVEL_TO_STYLE[col_level]['color']
}
))
prop_recs.append(html.Tr(td, style={'fontSize': '125%'}))
msg_generated_molecules = ''
if invalid_mol_cnt > 0:
msg_generated_molecules = f'{invalid_mol_cnt} invalid molecules were created, which were eliminated from the result.'
return html.Table(prop_recs, style={'width': '100%',
'border': '1px solid lightgray'}), \
show_generated_mol, \
msg_generated_molecules, \
dash.no_update
def handle_ckl_selection(self, ckl_candidate_mol_id, rd_generation_type):
selection_msg = '**Please Select Two Molecules**'
selection_cnt = 2
if rd_generation_type == 'SAMPLE':
selection_msg = '**Please Select One Molecule**'
selection_cnt = 1
if ckl_candidate_mol_id and len(ckl_candidate_mol_id) > selection_cnt:
ckl_candidate_mol_id = ckl_candidate_mol_id[selection_cnt * -1:]
return ckl_candidate_mol_id, selection_msg
def handle_construct_candidates(self, north_star):
if not north_star:
return []
options = [{'label': i.strip(), 'value': i.strip()} for i in north_star.split(',')]
return options
def handle_reset(self, bt_reset, bt_apply_wf, refresh_main_fig, sl_wf):
comp_id, event_type = self._fetch_event_data()
if comp_id == 'bt_apply_wf' and event_type == 'n_clicks':
if self.cluster_wf_cls != sl_wf:
self.cluster_wf_cls = sl_wf
wf_class = locate(self.cluster_wf_cls)
self.cluster_wf = wf_class()
else:
raise dash.exceptions.PreventUpdate
if refresh_main_fig is None:
refresh_main_fig = 1
else:
refresh_main_fig = int(refresh_main_fig)
# Change the refresh variable to force main-figure refresh
return refresh_main_fig + 1
def recluster(self, filter_values=None, filter_column=None, reload_data=False):
self.cluster_wf.n_clusters = self.n_clusters
if reload_data:
return self.cluster_wf.cluster()
else:
return self.cluster_wf.recluster(filter_column, filter_values,
n_clusters=self.n_clusters)
def recluster_selection(self,
filter_value=None,
filter_column=None,
gradient_prop=None,
north_stars=None,
reload_data=False,
recluster_data=True,
color_col='cluster'):
if recluster_data or self.cluster_wf.df_embedding is None:
df_embedding = self.recluster(filter_values=filter_value,
filter_column=filter_column,
reload_data=reload_data)
else:
df_embedding = self.cluster_wf.df_embedding
return self.create_graph(df_embedding,
color_col=color_col,
gradient_prop=gradient_prop,
north_stars=north_stars)
def create_graph(self, ldf, color_col='cluster', north_stars=None, gradient_prop=None):
fig = go.Figure(layout={'colorscale': {}})
# Filter out relevant columns in this method.
if hasattr(ldf, 'compute'):
relevant_cols = ['id', 'x', 'y', 'cluster']
if gradient_prop:
relevant_cols.append(gradient_prop)
if color_col == 'cluster':
relevant_cols.append(color_col)
ldf = ldf.iloc[:, ldf.columns.isin(relevant_cols)]
ldf = ldf.compute()
moi_molregno = []
if north_stars:
moi_molregno = list(map(int, north_stars.split(",")))
moi_filter = ldf['id'].isin(moi_molregno)
# Create a map with MoI and cluster to which they belong
northstar_cluster = []
if gradient_prop is not None:
cmin = ldf[gradient_prop].min()
cmax = ldf[gradient_prop].max()
# Compute size of northstar and normal points
df_shape = moi_filter.copy()
df_size = (moi_filter * 18) + DOT_SIZE
df_shape = df_shape * 2
x_data = ldf['x']
y_data = ldf['y']
cluster = ldf['cluster']
customdata = ldf['id']
grad_prop = ldf[gradient_prop]
if self.cluster_wf.is_gpu_enabled():
x_data = x_data.to_array()
y_data = y_data.to_array()
cluster = cluster.to_array()
grad_prop = grad_prop.to_array()
customdata = customdata.to_array()
df_size = cupy.asnumpy(df_size)
df_shape = cupy.asnumpy(df_shape)
fig.add_trace(go.Scattergl({
'x': x_data,
'y': y_data,
'text': cluster,
'customdata': customdata,
'mode': 'markers',
'showlegend': False,
'marker': {
'size': df_size,
'symbol': df_shape,
'color': grad_prop,
'colorscale': 'Viridis',
'showscale': True,
'cmin': cmin,
'cmax': cmax,
}
}))
else:
clusters = ldf[color_col].unique()
if self.cluster_wf.is_gpu_enabled():
clusters = clusters.values_host
northstar_df = ldf[moi_filter]
scatter_traces = []
for cluster_id in clusters:
cdf = ldf.query('cluster == ' + str(cluster_id))
df_size = cdf['id'].isin(northstar_df['id'])
moi_present = False
if df_size.unique().shape[0] > 1:
northstar_cluster.append(str(cluster_id))
moi_present = True
# Compute size of northstar and normal points
df_shape = df_size.copy()
df_size = (df_size * 18) + DOT_SIZE
df_shape = df_shape * 2
x_data = cdf['x']
y_data = cdf['y']
cluster = cdf['cluster']
customdata = cdf['id']
if self.cluster_wf.is_gpu_enabled():
x_data = x_data.to_array()
y_data = y_data.to_array()
cluster = cluster.to_array()
customdata = customdata.to_array()
df_size = cupy.asnumpy(df_size)
df_shape = cupy.asnumpy(df_shape)
scatter_trace = go.Scattergl({
'x': x_data,
'y': y_data,
'text': cluster,
'customdata': customdata,
'name': 'Cluster ' + str(cluster_id),
'mode': 'markers',
'marker': {
'size': df_size,
'symbol': df_shape,
'color': self.cluster_colors[int(cluster_id) % len(self.cluster_colors)],
},
})
if moi_present:
# save to add later. This is to ensure the scatter is on top
scatter_traces.append(scatter_trace)
else:
fig.add_trace(scatter_trace)
for scatter_trace in scatter_traces:
fig.add_trace(scatter_trace)
# Change the title to indicate type of H/W in use
f_color = 'green' if self.cluster_wf.is_gpu_enabled() else 'blue'
fig.update_layout(
showlegend=True, clickmode='event', height=main_fig_height,
title='Clusters', dragmode='select',
title_font_color=f_color,
annotations=[
dict(x=0.5, y=-0.07, showarrow=False, text='x',
xref="paper", yref="paper"),
dict(x=-0.05, y=0.5, showarrow=False, text="y",
textangle=-90, xref="paper", yref="paper")])
del ldf
return fig, northstar_cluster
def start(self, host=None, port=5000):
return self.app.run_server(
debug=False, use_reloader=False, host=host, port=port)
def href_ify(self, molregno):
return html.A(molregno,
href='https://www.ebi.ac.uk/chembl/compound_report_card/' + str(molregno),
target='_blank')
def construct_molecule_detail(self, selected_points, display_properties,
page, pageSize=10, chembl_ids=None):
# Create Table header
table_headers = [
html.Th("Chemical Structure", style={'width': '30%', 'fontSize': '150%', 'text-align': 'center'}),
html.Th("SMILES", style={'maxWidth': '100px', 'fontSize': '150%', 'text-align': 'center'})]
for prop in display_properties:
if prop in PROP_DISP_NAME:
table_headers.append(html.Th(PROP_DISP_NAME[prop], style={'fontSize': '150%', 'text-align': 'center'}))
if chembl_ids:
table_headers.append(html.Th('ChEMBL', style={'fontSize': '150%', 'text-align': 'center'}))
else:
table_headers.append(html.Th("", style={'width': '10px'}))
table_headers.append(html.Th("", style={'width': '10px'}))
prop_recs = [html.Tr(table_headers, style={'background': 'lightgray'})]
if chembl_ids:
props, selected_molecules = self.chem_data.fetch_props_by_chemble(chembl_ids)
elif selected_points:
selected_molregno = []
for point in selected_points['points'][((page - 1) * pageSize): page * pageSize]:
if 'customdata' in point:
molregid = point['customdata']
selected_molregno.append(molregid)
props, selected_molecules = self.chem_data.fetch_props_by_molregno(
selected_molregno)
else:
return None, None
all_props = []
for k in props:
if k in PROP_DISP_NAME:
all_props.append({"label": PROP_DISP_NAME[k], "value": k})
for selected_molecule in selected_molecules:
td = []
selected_chembl_id = selected_molecule[1]
smiles = selected_molecule[props.index('canonical_smiles')]
m = Chem.MolFromSmiles(smiles)
drawer = Draw.rdMolDraw2D.MolDraw2DCairo(500, 125)
drawer.SetFontSize(1.0)
drawer.DrawMolecule(m)
drawer.FinishDrawing()
img_binary = "data:image/png;base64," + \
base64.b64encode(drawer.GetDrawingText()).decode("utf-8")
td.append(html.Td(html.Img(src=img_binary)))
td.append(html.Td(smiles, style={'wordWrap': 'break-word'}))
for key in display_properties:
if key in PROP_DISP_NAME:
td.append(html.Td(selected_molecule[props.index(key)],
style={'text-align': 'center'}))
molregno = selected_molecule[0]
if chembl_ids:
td.append(html.Td(selected_chembl_id))
else:
td.append(html.Td(
dbc.Button('Add as MoI',
id={'role': 'bt_star_candidate',
'chemblId': selected_chembl_id,
'molregno': str(molregno)
},
n_clicks=0)
))
td.append(html.Td(
dbc.Button('Add for Interpolation',
id={'role': 'bt_add_candidate',
'chemblId': selected_chembl_id,
'molregno': str(molregno)
},
style={'margin-right': '6px'},
n_clicks=0)
))
prop_recs.append(html.Tr(td, style={'fontSize': '125%'}))
return html.Table(prop_recs, style={'width': '100%', 'border': '1px solid lightgray'}), all_props
def constuct_layout(self):
# TODO: avoid calling self.cluster_wf.df_embedding
fig, _ = self.create_graph(self.cluster_wf.df_embedding)
return html.Div([
html.Div(className='row', children=[
dcc.Graph(id='main-figure', figure=fig,
className='nine columns',
style={'verticalAlign': 'text-top'}),
html.Div([
dcc.Markdown("""**Molecule(s) of Interest**"""),
dcc.Markdown("Please enter ChEMBL ID(s) separated by commas."),
html.Div(className='row', children=[
dcc.Input(id='north_star', type='text', debounce=True, className='nine columns'),
dbc.Button('Highlight',
id='bt_north_star', n_clicks=0,
className='three columns'),
], style={'marginLeft': 0, 'marginBottom': 18, }),
dcc.Tabs([
dcc.Tab(label='Cluster Molecules', children=[
dcc.Markdown("""**Select Workflow**""", style={'marginTop': 18, }),
html.Div(className='row', children=[
html.Div(children=[
dcc.Dropdown(id='sl_wf',
multi=False,
options=[{'label': 'GPU KMeans-UMAP - Single and Multiple GPUs',
'value': 'cuchem.wf.cluster.gpukmeansumap.GpuKmeansUmapHybrid'},
{'label': 'GPU KMeans-UMAP',
'value': 'cuchem.wf.cluster.gpukmeansumap.GpuKmeansUmap'},
{'label': 'GPU KMeans-Random Projection - Single GPU',
'value': 'cuchem.wf.cluster.gpurandomprojection.GpuWorkflowRandomProjection'},
{'label': 'CPU KMeans-UMAP',
'value': 'cuchem.wf.cluster.cpukmeansumap.CpuKmeansUmap'}, ],
value=self.cluster_wf_cls,
clearable=False),
], className='nine columns'),
dbc.Button('Apply',
id='bt_apply_wf', n_clicks=0,
className='three columns'),
], style={'marginLeft': 0, 'marginTop': 6, }),
dcc.Markdown("""**Cluster Selection**""", style={'marginTop': 18, }),
dcc.Markdown("Set number of clusters", style={'marginTop': 12, }),
dcc.Input(id='sl_nclusters', value=self.n_clusters),
dcc.Markdown("Click a point to select a cluster.", style={'marginTop': 12, }),
html.Div(className='row', children=[
dcc.Input(id='selected_clusters', type='text', className='nine columns'),
dbc.Button('Recluster',
id='bt_recluster_clusters', n_clicks=0,
className='three columns'),
], style={'marginLeft': 0}),
dcc.Markdown("""**Selection Points**""", style={'marginTop': 18, }),
dcc.Markdown("""Choose the lasso or rectangle tool in the graph's menu
bar and then select points in the graph.
""", style={'marginTop': 12, }),
dbc.Button('Recluster Selection', id='bt_recluster_points', n_clicks=0),
html.Div(children=[html.Div(id='selected_point_cnt'), ]),
dbc.Button('Reload', id='bt_reset', n_clicks=0, style={'marginLeft': 0, 'marginTop': 18, }),
]),
dcc.Tab(label='Generate Molecules', children=[
dcc.Markdown("""**Select Generative Model**""", style={'marginTop': 18, }),
html.Div(children=[
dcc.Dropdown(id='sl_generative_wf', multi=False,
options=[{'label': 'CDDD Model',
'value': 'cuchem.wf.generative.Cddd'},
{'label': 'MegaMolBART Model',
'value': 'cuchem.wf.generative.MegatronMolBART'},
],
value=self.generative_wf_cls,
clearable=False),
]),
dcc.RadioItems(
id='rd_generation_type',
options=[
{'label': 'Interpolate between two molecules', 'value': 'INTERPOLATE'},
{'label': 'Sample around one molecule', 'value': 'SAMPLE'},
],
value='INTERPOLATE',
style={'marginTop': 18},
inputStyle={'display': 'inline-block', 'marginLeft': 6, 'marginRight': 6},
labelStyle={'display': 'block', 'marginLeft': 6, 'marginRight': 6}
),
html.Div(className='row', children=[
dcc.Markdown("Number of molecules to generate",
style={'marginLeft': 10, 'marginTop': 12, 'width': '250px'}),
dcc.Input(id='n2generate', value=10),
], style={'marginLeft': 0}),
html.Div(className='row', children=[
dcc.Markdown("Scaled sampling radius (int, start with 1)",
style={'marginLeft': 10, 'marginTop': 12, 'width': '250px'}),
dcc.Input(id='scaled_radius', value=1),
], style={'marginLeft': 0, 'marginTop': '6px'}),
dcc.Markdown(children="""**Please Select Two**""",
id="mk_selection_msg",
style={'marginTop': 18}),
dcc.Checklist(
id='ckl_candidate_mol_id',
options=[],
value=[],
inputStyle={'display': 'inline-block', 'marginLeft': 6, 'marginRight': 6},
labelStyle={'display': 'block', 'marginLeft': 6, 'marginRight': 6}
),
html.Div(className='row', children=[
dbc.Button('Generate', id='bt_generate', n_clicks=0, style={'marginRight': 12}),
dbc.Button('Reset', id='bt_reset_candidates', n_clicks=0),
], style={'marginLeft': 0}),
]),
]),
html.Div(className='row', children=[
html.Label([
"Select molecular property for color gradient",
dcc.Dropdown(id='sl_prop_gradient', multi=False, clearable=True,
options=[{"label": PROP_DISP_NAME[p], "value": p} for p in IMP_PROPS], ),
], style={'marginTop': 18, 'marginLeft': 18})],
),
], className='three columns', style={'marginLeft': 18, 'marginTop': 90, 'verticalAlign': 'text-top', }),
]),
html.Div(className='row', children=[
html.Div(id='section_generated_molecules', children=[
html.Div(className='row', children=[
html.A('Export to SDF',
id='download-link',
download="rawdata.sdf",
href="/cheminfo/downloadSDF",
target="_blank",
n_clicks=0,
style={'fontSize': '150%'}
),
html.Div(id='msg_generated_molecules', children=[],
style={'color': 'red', 'fontWeight': 'bold', 'marginLeft': 12, 'fontSize': '150%'}),
], style={'marginLeft': 0, 'marginBottom': 18, }),
html.Div(id='table_generated_molecules', children=[], style={'width': '100%'})
], style={'display': 'none', 'width': '100%'}),
html.Div(id='section_selected_molecules', children=[
html.Div(className='row', children=[
html.Div(id='section_display_properties', children=[
html.Label([
"Select Molecular Properties",
dcc.Dropdown(id='sl_mol_props', multi=True,
options=[
{'label': 'alogp', 'value': 'alogp'}],
value=['alogp']),
])],
className='nine columns'),
html.Div(children=[
dbc.Button("<", id="bt_page_prev",
style={"height": "25px"}),
html.Span(children=1, id='current_page',
style={"paddingLeft": "6px"}),
html.Span(children=' of 1', id='total_page',
style={"paddingRight": "6px"}),
dbc.Button(">", id="bt_page_next",
style={"height": "25px"})
],
className='three columns',
style={'verticalAlign': 'text-bottom', 'text-align': 'right'}
),
], style={'margin': 12}),
html.Div(id='tb_selected_molecules', children=[], style={'width': '100%'})
], style={'display': 'none', 'width': '100%'}),
], style={'margin': 12}),
html.Div(id='refresh_main_fig', style={'display': 'none'}),
html.Div(id='northstar_cluster', style={'display': 'none'}),
html.Div(id='recluster_error', style={'display': 'none'}),
html.Div(id='mol_selection_error', style={'display': 'none'}),
html.Div(id='show_selected_mol', style={'display': 'none'}),
html.Div(id='show_generated_mol', style={'display': 'none'}),
html.Div(id='genration_candidates', style={'display': 'none'}),
html.Div(id='refresh_moi_prop_table', style={'display': 'none'}),
html.Div(id='interpolation_error', style={'display': 'none'}),
html.Div(className='row', children=[
dbc.Modal([
dbc.ModalHeader("Error"),
dbc.ModalBody(
html.Div(id='error_msg', style={'color': 'red'}),
),
dbc.ModalFooter(
dbc.Button("Close", id="bt_close_err", className="ml-auto")
),
], id="md_error"),
]),
])
def handle_error(self, recluster_error, interpolation_error, bt_close_err):
comp_id, event_type = self._fetch_event_data()
if comp_id == 'bt_close_err' and event_type == 'n_clicks':
return '', False
msg = None
if comp_id == 'interpolation_error' and event_type == 'children':
msg = interpolation_error
elif comp_id == 'recluster_error' and event_type == 'children':
msg = recluster_error
if msg is None:
raise dash.exceptions.PreventUpdate
return msg, True
@report_ui_error(6)
def handle_molecule_selection(self, mf_selected_data, selected_columns,
prev_click, next_click, refresh_moi_prop_table,
north_star, current_page, show_selected_mol,
sl_prop_gradient):
comp_id, event_type = self._fetch_event_data()
module_details = None
chembl_ids = None
# Code to support pagination
if comp_id == 'bt_page_prev' and event_type == 'n_clicks':
if current_page == 1:
raise dash.exceptions.PreventUpdate
current_page -= 1
elif comp_id == 'bt_page_next' and event_type == 'n_clicks':
if len(mf_selected_data['points']) < PAGE_SIZE * (current_page + 1):
raise dash.exceptions.PreventUpdate
current_page += 1
elif north_star and \
((comp_id == 'refresh_moi_prop_table' and event_type == 'children')):
chembl_ids = north_star.split(",")
elif (comp_id == 'main-figure' and event_type == 'selectedData') or \
(comp_id == 'sl_mol_props' and event_type == 'value'):
pass
else:
raise dash.exceptions.PreventUpdate
if selected_columns and sl_prop_gradient:
if sl_prop_gradient not in selected_columns:
selected_columns.append(sl_prop_gradient)
module_details, all_props = self.construct_molecule_detail(
mf_selected_data, selected_columns, current_page,
pageSize=PAGE_SIZE, chembl_ids=chembl_ids)
if module_details is None and all_props is None:
return dash.no_update, dash.no_update, dash.no_update, \
dash.no_update, dash.no_update, dash.no_update,
if chembl_ids:
last_page = ''
else:
last_page = ' of ' + str(len(mf_selected_data['points']) // PAGE_SIZE)
if show_selected_mol is None:
show_selected_mol = 0
show_selected_mol += 1
return module_details, all_props, current_page, last_page, show_selected_mol, dash.no_update
def handle_data_selection(self, mf_click_data, mf_selected_data,
bt_cluster_clicks, bt_point_clicks,
northstar_cluster,
curr_clusters):
comp_id, event_type = self._fetch_event_data()
selected_clusters = ''
selected_point_cnt = ''
if comp_id == 'main-figure' and event_type == 'clickData':
# Event - On selecting cluster on the main scatter plot
clusters = []
if curr_clusters:
clusters = list(map(int, curr_clusters.split(",")))
points = mf_click_data['points']
for point in points:
cluster = point['text']
if cluster in clusters:
clusters.remove(cluster)
else:
clusters.append(cluster)
selected_clusters = ','.join(map(str, clusters))
elif comp_id == 'main-figure' and event_type == 'selectedData':
# Event - On selection on the main scatterplot
if not mf_selected_data:
raise dash.exceptions.PreventUpdate
points = mf_selected_data['points']
selected_point_cnt = str(len(points)) + ' points selected'
clusters = {point['text'] for point in points}
selected_clusters = northstar_cluster
elif comp_id == 'northstar_cluster' and event_type == 'children':
selected_clusters = northstar_cluster
elif (comp_id == 'bt_recluster_clusters' and event_type == 'n_clicks') \
or (comp_id == 'bt_recluster_points' and event_type == 'n_clicks'):
selected_clusters = northstar_cluster
else:
raise dash.exceptions.PreventUpdate
return selected_clusters, selected_point_cnt
def handle_mark_north_star(self, bt_north_star_click, north_star):
comp_id, event_type = self._fetch_event_data()
if event_type != 'n_clicks' or dash.callback_context.triggered[0]['value'] == 0:
raise dash.exceptions.PreventUpdate
selected_north_star = []
selected_north_star_mol_reg_id = []
if north_star:
selected_north_star = north_star.split(",")
selected_north_star_mol_reg_id = [
str(row[0]) for row in self.chem_data.fetch_molregno_by_chemblId(selected_north_star)]
comp_detail = json.loads(comp_id)
selected_chembl_id = comp_detail['chemblId']
if selected_chembl_id not in selected_north_star:
selected_north_star.append(selected_chembl_id)
selected_north_star_mol_reg_id.append(comp_detail['molregno'])
return ','.join(selected_north_star)
@report_ui_error(4)
def handle_re_cluster(self, bt_cluster_clicks, bt_point_clicks, bt_north_star_clicks,
sl_prop_gradient, sl_nclusters, refresh_main_fig,
selected_clusters, selected_points, north_star, refresh_moi_prop_table):
comp_id, event_type = self._fetch_event_data()
if comp_id == 'sl_nclusters':
if sl_nclusters:
self.n_clusters = int(sl_nclusters)
self.cluster_colors = generate_colors(self.n_clusters)
raise dash.exceptions.PreventUpdate
filter_values = None
filter_column = None
reload_data = False
recluster_data = True
moi_molregno = None
_refresh_moi_prop_table = dash.no_update
if comp_id == 'bt_recluster_clusters' and event_type == 'n_clicks':
if selected_clusters:
filter_values = list(map(int, selected_clusters.split(",")))
filter_column = 'cluster'
elif selected_points and comp_id == 'bt_recluster_points' and event_type == 'n_clicks':
filter_values = []
for point in selected_points['points']:
if 'customdata' in point:
filter_values.append(point['customdata'])
filter_column = 'id'
elif comp_id == 'bt_north_star' and event_type == 'n_clicks':
if north_star:
north_star = north_star.split(',')
missing_mols, molregnos, _ = self.cluster_wf.add_molecules(north_star)
recluster_data = len(missing_mols) > 0
logger.info("%d missing molecules added...", len(missing_mols))
logger.debug("Missing molecules werew %s", missing_mols)
moi_molregno = " ,".join(list(map(str, molregnos)))
if refresh_moi_prop_table is None:
refresh_moi_prop_table = 0
_refresh_moi_prop_table = refresh_moi_prop_table + 1
else:
raise dash.exceptions.PreventUpdate
elif comp_id == 'refresh_main_fig' and event_type == 'children':
reload_data = True
recluster_data = True
else:
# Event that are expected to reach this block are
# 'sl_prop_gradient' and event_type == 'value':
reload_data = False
recluster_data = False
if north_star and moi_molregno is None:
molregnos = [row[0] for row in self.cluster_wf.dao.fetch_id_from_chembl(north_star.split(','))]
moi_molregno = " ,".join(list(map(str, molregnos)))
figure, northstar_cluster = self.recluster_selection(
filter_value=filter_values,
filter_column=filter_column,
gradient_prop=sl_prop_gradient,
north_stars=moi_molregno,
color_col='cluster',
reload_data=reload_data,
recluster_data=recluster_data)
return figure, ','.join(northstar_cluster), _refresh_moi_prop_table, dash.no_update
| cheminformatics-master | cuchem/cuchem/interactive/chemvisualize.py |
cheminformatics-master | cuchem/cuchem/interactive/__init__.py |
|
import logging
import os
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask import Blueprint, Flask, send_file, json
from flask.globals import current_app
from flask.helpers import safe_join
from flask_restplus import Api
from werkzeug.exceptions import HTTPException
from werkzeug.http import HTTP_STATUS_CODES
logger = logging.getLogger(__name__)
class CustomApi(Api):
"""This class overrides 'handle_error' method of 'Api' class ,
to extend global exception handing functionality of 'flask-restful'.
"""
def handle_error(self, err):
"""It helps preventing writing unnecessary
try/except block though out the application
"""
logger.exception(err) # log every exception raised in the application
# Handle HTTPExceptions
if isinstance(err, HTTPException):
return json.jsonify({
'message': getattr(
err, 'description', HTTP_STATUS_CODES.get(err.code, '')
)
}), err.code
# If msg attribute is not set,
# consider it as Python core exception and
# hide sensitive error info from end user
if not getattr(err, 'message', None):
return json.jsonify({
'message': str(err)
}), 500
# Handle application specific custom exceptions
return json.jsonify(**err.kwargs), err.http_status_code
api_bp = Blueprint('api_bp', __name__, url_prefix='/api')
api_rest = CustomApi(api_bp)
app = Flask(__name__, static_folder='../../public/')
app.register_blueprint(api_bp)
@app.route('/')
def index_client():
return send_file('../../public/index.html')
def _send_static_file(dirname, path):
filename = safe_join(dirname, path)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
return send_file(filename)
@app.route('/js/<path:path>')
def send_js(path):
return _send_static_file('../../public/js', path)
@app.route('/css/<path:path>')
def send_css(path):
return _send_static_file('../../public/css', path)
@app.route('/fonts/<path:path>')
def send_fonts(path):
return _send_static_file('../../public/fonts', path)
@app.route('/imgs/<path:path>')
def send_imgs(path):
return _send_static_file('../../public/imgs', path)
@api_bp.after_request
def add_header(response):
response.headers['Access-Control-Allow-Headers'] = \
'Content-Type,Authorization'
return response
from cuchem.api.interpolator import *
| cheminformatics-master | cuchem/cuchem/api/__init__.py |
import logging
from flask import jsonify
from flask_restplus import Resource
from cuchem.api import api_rest
from cuchem.wf.generative import Cddd, MolBART
logger = logging.getLogger(__name__)
@api_rest.route('/interpolator/<string:model>/<string:smiles>/<int:num_requested>')
class Interpolator(Resource):
"""
Exposes all Request related operations thru a REST endpoint.
"""
def get(self, model, smiles, num_requested=10):
if model == 'CDDD':
generated_smiles, neighboring_embeddings, pad_mask = \
Cddd().find_similars_smiles_list(
smiles,
num_requested=num_requested,
force_unique=True)
else:
generated_smiles, neighboring_embeddings, pad_mask = \
MolBART().find_similars_smiles_list(
smiles,
num_requested=num_requested,
force_unique=True)
return jsonify(generated_smiles)
| cheminformatics-master | cuchem/cuchem/api/interpolator.py |
cheminformatics-master | cuchem/cuchem/wf/__init__.py |
|
from typing import List
import cupy
import numpy
from cuchemcommon.data.helper.chembldata import ADDITIONAL_FEILD, IMP_PROPS
from cuml.metrics import pairwise_distances
from cuchem.utils.distance import tanimoto_calculate
from cuchem.utils.metrics import spearmanr
class BaseClusterWorkflow:
def __init__(self):
self.df_embedding = None
def _remove_ui_columns(self, embedding):
for col in ['x', 'y', 'cluster', 'filter_col', 'index', 'molregno']:
if col in embedding.columns:
embedding = embedding.drop([col], axis=1)
return embedding
def _remove_non_numerics(self, embedding):
embedding = self._remove_ui_columns(embedding)
other_props = ['id'] + IMP_PROPS + ADDITIONAL_FEILD
# Tempraryly store columns not required during processesing
prop_series = {}
for col in other_props:
if col in embedding.columns:
prop_series[col] = embedding[col]
if len(prop_series) > 0:
embedding = embedding.drop(other_props, axis=1)
return embedding, prop_series
def _random_sample_from_arrays(self, *input_array_list, n_samples=None, index=None):
assert (n_samples is not None) != (index is not None) # XOR -- must specify one or the other, but not both
# Ensure array sizes are all the same
sizes = []
output_array_list = []
for array in input_array_list:
if hasattr(array, 'compute'):
array = array.compute()
sizes.append(array.shape[0])
output_array_list.append(array)
assert all([x == sizes[0] for x in sizes])
size = sizes[0]
if index is not None:
assert (index.max() < size) & (len(index) <= size)
else:
# Sample from data / shuffle
n_samples = min(size, n_samples)
numpy.random.seed(self.seed)
index = numpy.random.choice(numpy.arange(size), size=n_samples, replace=False)
for pos, array in enumerate(output_array_list):
if hasattr(array, 'values'):
output_array_list[pos] = array.iloc[index]
else:
output_array_list[pos] = array[index]
if len(output_array_list) == 1:
output_array_list = output_array_list[0]
return output_array_list, index
def _compute_spearman_rho(self, fp_sample, Xt_sample, top_k=100):
if hasattr(fp_sample, 'values'):
fp_sample = fp_sample.values
dist_array_tani = tanimoto_calculate(fp_sample, calc_distance=True)
dist_array_eucl = pairwise_distances(Xt_sample)
return cupy.nanmean(spearmanr(dist_array_tani, dist_array_eucl, top_k=top_k))
def is_gpu_enabled(self):
return True
def cluster(self, embedding):
"""
Runs clustering workflow on the data fetched from database/cache.
"""
raise NotImplementedError
def recluster(self,
filter_column=None,
filter_values=None,
n_clusters=None):
"""
Runs reclustering on original dataframe or on the new dataframe passed.
The new dataframe is usually a subset of the original dataframe.
Caller may ask to include additional molecules.
"""
raise NotImplementedError
def add_molecules(self, chemblids: List):
"""
ChembleId's accepted as argument to the existing database. Duplicates
must be ignored.
"""
raise NotImplementedError
def compute_qa_matric(self):
"""
Collects all quality matrix and log.
"""
NotImplemented
| cheminformatics-master | cuchem/cuchem/wf/cluster/__init__.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import singledispatch
from typing import List
import cudf
import cupy
import dask
import dask_cudf
import pandas
from cuchemcommon.context import Context
from cuchemcommon.data import ClusterWfDAO
from cuchemcommon.data.cluster_wf import ChemblClusterWfDao
from cuchemcommon.fingerprint import MorganFingerprint
from cuchemcommon.utils.logger import MetricsLogger
from cuchemcommon.utils.singleton import Singleton
from cuml import SparseRandomProjection, KMeans
from cuchem.utils.metrics import batched_silhouette_scores
from cuchem.wf.cluster import BaseClusterWorkflow
logger = logging.getLogger(__name__)
@singledispatch
def _gpu_random_proj_wrapper(embedding, self):
return NotImplemented
@_gpu_random_proj_wrapper.register(dask.dataframe.core.DataFrame)
def _(embedding, self):
logger.info('Converting from dask.dataframe.core.DataFrame...')
embedding = embedding.compute()
return _gpu_random_proj_wrapper(embedding, self)
@_gpu_random_proj_wrapper.register(dask_cudf.core.DataFrame)
def _(embedding, self):
logger.info('Converting from dask_cudf.core.DataFrame...')
embedding = embedding.compute()
return _gpu_random_proj_wrapper(embedding, self)
@_gpu_random_proj_wrapper.register(pandas.DataFrame)
def _(embedding, self):
logger.info('Converting from pandas.DataFrame...')
embedding = cudf.from_pandas(embedding)
return _gpu_random_proj_wrapper(embedding, self)
@_gpu_random_proj_wrapper.register(cudf.DataFrame)
def _(embedding, self):
return self._cluster(embedding)
class GpuWorkflowRandomProjection(BaseClusterWorkflow, metaclass=Singleton):
def __init__(self,
n_molecules: int = None,
dao: ClusterWfDAO = ChemblClusterWfDao(MorganFingerprint),
n_clusters=7,
seed=0):
super(GpuWorkflowRandomProjection, self).__init__()
self.dao = dao
self.n_molecules = n_molecules
self.n_clusters = n_clusters
self.pca = None
self.seed = seed
self.n_silhouette = 500000
self.context = Context()
self.srp_embedding = SparseRandomProjection(n_components=2)
def rand_jitter(self, arr):
"""
Introduces random displacements to spread the points
"""
stdev = .023 * cupy.subtract(cupy.max(arr), cupy.min(arr))
for i in range(arr.shape[1]):
rnd = cupy.multiply(cupy.random.randn(len(arr)), stdev)
arr[:, i] = cupy.add(arr[:, i], rnd)
return arr
def _cluster(self, embedding):
logger.info('Computing cluster...')
embedding = embedding.reset_index()
n_molecules = embedding.shape[0]
# Before reclustering remove all columns that may interfere
embedding, prop_series = self._remove_non_numerics(embedding)
with MetricsLogger('random_proj', n_molecules) as ml:
srp = self.srp_embedding.fit_transform(embedding.values)
ml.metric_name = 'spearman_rho'
ml.metric_func = self._compute_spearman_rho
ml.metric_func_args = (embedding, embedding, srp)
with MetricsLogger('kmeans', n_molecules) as ml:
kmeans_cuml = KMeans(n_clusters=self.n_clusters)
kmeans_cuml.fit(srp)
kmeans_labels = kmeans_cuml.predict(srp)
ml.metric_name = 'silhouette_score'
ml.metric_func = batched_silhouette_scores
ml.metric_func_kwargs = {}
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
(srp_sample, kmeans_labels_sample), _ = self._random_sample_from_arrays(
srp, kmeans_labels, n_samples=self.n_silhouette)
ml.metric_func_args = (srp_sample, kmeans_labels_sample)
# Add back the column required for plotting and to correlating data
# between re-clustering
srp = self.rand_jitter(srp)
embedding['cluster'] = kmeans_labels
embedding['x'] = srp[:, 0]
embedding['y'] = srp[:, 1]
# Add back the prop columns
for col in prop_series.keys():
embedding[col] = prop_series[col]
return embedding
def cluster(self, df_mol_embedding=None):
logger.info("Executing GPU workflow...")
if df_mol_embedding is None:
self.n_molecules = self.context.n_molecule
df_mol_embedding = self.dao.fetch_molecular_embedding(
self.n_molecules,
cache_directory=self.context.cache_directory)
df_mol_embedding = df_mol_embedding.persist()
self.df_embedding = _gpu_random_proj_wrapper(df_mol_embedding, self)
return self.df_embedding
def recluster(self,
filter_column=None,
filter_values=None,
n_clusters=None):
if filter_values is not None:
self.df_embedding['filter_col'] = self.df_embedding[filter_column].isin(filter_values)
self.df_embedding = self.df_embedding.query('filter_col == True')
if n_clusters is not None:
self.n_clusters = n_clusters
self.df_embedding = _gpu_random_proj_wrapper(self.df_embedding, self)
return self.df_embedding
def add_molecules(self, chemblids: List):
chem_mol_map = {row[0]: row[1] for row in self.dao.fetch_id_from_chembl(chemblids)}
molregnos = list(chem_mol_map.keys())
self.df_embedding['id_exists'] = self.df_embedding['id'].isin(molregnos)
ldf = self.df_embedding.query('id_exists == True')
if hasattr(ldf, 'compute'):
ldf = ldf.compute()
self.df_embedding = self.df_embedding.drop(['id_exists'], axis=1)
missing_mol = set(molregnos).difference(ldf['id'].to_array())
chem_mol_map = {id: chem_mol_map[id] for id in missing_mol}
missing_molregno = chem_mol_map.keys()
if len(missing_molregno) > 0:
new_fingerprints = self.dao.fetch_molecular_embedding_by_id(missing_molregno)
new_fingerprints = new_fingerprints.compute()
self.df_embedding = self._remove_ui_columns(self.df_embedding)
self.df_embedding = self.df_embedding.append(new_fingerprints)
return chem_mol_map, molregnos, self.df_embedding
| cheminformatics-master | cuchem/cuchem/wf/cluster/gpurandomprojection.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import sklearn.cluster
import umap
from cuchemcommon.context import Context
from cuchemcommon.data import ClusterWfDAO
from cuchemcommon.data.cluster_wf import ChemblClusterWfDao
from cuchemcommon.data.helper.chembldata import ADDITIONAL_FEILD, IMP_PROPS
from cuchemcommon.fingerprint import MorganFingerprint
from cuchemcommon.utils.logger import MetricsLogger
from dask_ml.decomposition import PCA as dask_PCA
from cuchem.utils.metrics import batched_silhouette_scores
from . import BaseClusterWorkflow
logger = logging.getLogger(__name__)
class CpuKmeansUmap(BaseClusterWorkflow):
def __init__(self,
n_molecules=None,
dao: ClusterWfDAO = ChemblClusterWfDao(MorganFingerprint),
n_pca=64,
n_clusters=7,
seed=0):
super(CpuKmeansUmap, self).__init__()
self.dao = dao
self.n_molecules = n_molecules
self.n_pca = n_pca
self.n_clusters = n_clusters
self.seed = seed
self.context = Context()
self.n_spearman = 5000
self.n_silhouette = 500000
def is_gpu_enabled(self):
return False
def cluster(self,
df_molecular_embedding=None):
logger.info("Executing CPU workflow...")
cache_directory = self.context.cache_directory
if df_molecular_embedding is None:
self.n_molecules = self.context.n_molecule
df_molecular_embedding = self.dao.fetch_molecular_embedding(
self.n_molecules,
cache_directory=cache_directory)
ids = df_molecular_embedding['id']
df_molecular_embedding = df_molecular_embedding.persist()
self.n_molecules = df_molecular_embedding.compute().shape[0]
# self.n_molecules = self.context.n_molecule
for col in ['id', 'index', 'molregno']:
if col in df_molecular_embedding.columns:
df_molecular_embedding = df_molecular_embedding.drop([col], axis=1)
other_props = IMP_PROPS + ADDITIONAL_FEILD
df_molecular_embedding = df_molecular_embedding.drop(other_props, axis=1)
if self.context.is_benchmark:
molecular_embedding_sample, spearman_index = self._random_sample_from_arrays(
df_molecular_embedding, n_samples=self.n_spearman)
if self.n_pca:
with MetricsLogger('pca', self.n_molecules) as ml:
pca = dask_PCA(n_components=self.n_pca)
df_embedding = pca.fit_transform(df_molecular_embedding.to_dask_array(lengths=True))
else:
df_embedding = df_molecular_embedding
with MetricsLogger('kmeans', self.n_molecules, ) as ml:
# kmeans_float = dask_KMeans(n_clusters=self.n_clusters)
kmeans_float = sklearn.cluster.KMeans(n_clusters=self.n_clusters)
kmeans_float.fit(df_embedding)
kmeans_labels = kmeans_float.labels_
ml.metric_name = 'silhouette_score'
ml.metric_func = batched_silhouette_scores
ml.metric_func_kwargs = {}
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
(embedding_sample, kmeans_labels_sample), _ = self._random_sample_from_arrays(
df_embedding, kmeans_labels, n_samples=self.n_silhouette)
ml.metric_func_args = (embedding_sample, kmeans_labels_sample)
with MetricsLogger('umap', self.n_molecules) as ml:
df_molecular_embedding = df_molecular_embedding.compute()
umap_model = umap.UMAP() # TODO: Use dask to distribute umap. https://github.com/dask/dask/issues/5229
X_train = umap_model.fit_transform(df_embedding)
ml.metric_name = 'spearman_rho'
ml.metric_func = self._compute_spearman_rho
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
X_train_sample, _ = self._random_sample_from_arrays(
X_train, index=spearman_index)
ml.metric_func_args = (molecular_embedding_sample, X_train_sample)
df_molecular_embedding['x'] = X_train[:, 0]
df_molecular_embedding['y'] = X_train[:, 1]
# df_molecular_embedding['cluster'] = kmeans_labels.compute()
df_molecular_embedding['cluster'] = kmeans_labels
df_molecular_embedding['id'] = ids
self.df_embedding = df_molecular_embedding
return self.df_embedding
| cheminformatics-master | cuchem/cuchem/wf/cluster/cpukmeansumap.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import singledispatch
from typing import List
import cudf
import cuml
import dask
import dask_cudf
from cuchemcommon.context import Context
from cuchemcommon.data import ClusterWfDAO
from cuchemcommon.data.cluster_wf import ChemblClusterWfDao
from cuchemcommon.fingerprint import MorganFingerprint
from cuchemcommon.utils.logger import MetricsLogger
from cuchemcommon.utils.singleton import Singleton
from cuml.dask.cluster import KMeans as cuDaskKMeans
from cuml.dask.decomposition import PCA as cuDaskPCA
from cuml.dask.manifold import UMAP as cuDaskUMAP
from cuml.manifold import UMAP as cuUMAP
from cuchem.utils.metrics import batched_silhouette_scores
from . import BaseClusterWorkflow
logger = logging.getLogger(__name__)
MIN_RECLUSTER_SIZE = 200
@singledispatch
def _gpu_cluster_wrapper(embedding, n_pca, self):
return NotImplemented
@_gpu_cluster_wrapper.register(dask.dataframe.core.DataFrame)
def _(embedding, n_pca, self):
embedding = dask_cudf.from_dask_dataframe(embedding)
return _gpu_cluster_wrapper(embedding, n_pca, self)
@_gpu_cluster_wrapper.register(cudf.DataFrame)
def _(embedding, n_pca, self):
embedding = dask_cudf.from_cudf(embedding,
chunksize=int(embedding.shape[0] * 0.1))
return _gpu_cluster_wrapper(embedding, n_pca, self)
@_gpu_cluster_wrapper.register(dask_cudf.core.DataFrame)
def _(embedding, n_pca, self):
embedding = embedding.persist()
return self._cluster(embedding, n_pca)
class GpuKmeansUmap(BaseClusterWorkflow, metaclass=Singleton):
def __init__(self,
n_molecules: int = None,
dao: ClusterWfDAO = ChemblClusterWfDao(MorganFingerprint),
pca_comps=64,
n_clusters=7,
seed=0):
super().__init__()
self.dao = dao
self.n_molecules = n_molecules
self.pca_comps = pca_comps
self.pca = None
self.n_clusters = n_clusters
self.df_embedding = None
self.seed = seed
self.context = Context()
self.n_spearman = 5000
self.n_silhouette = 500000
def _cluster(self, embedding, n_pca):
"""
Generates UMAP transformation on Kmeans labels generated from
molecular fingerprints.
"""
dask_client = self.context.dask_client
embedding = embedding.reset_index()
# Before reclustering remove all columns that may interfere
embedding, prop_series = self._remove_non_numerics(embedding)
self.n_molecules, n_obs = embedding.compute().shape
if self.context.is_benchmark:
molecular_embedding_sample, spearman_index = self._random_sample_from_arrays(
embedding, n_samples=self.n_spearman)
if n_pca and n_obs > n_pca:
with MetricsLogger('pca', self.n_molecules) as ml:
if self.pca is None:
self.pca = cuDaskPCA(client=dask_client, n_components=n_pca)
self.pca.fit(embedding)
embedding = self.pca.transform(embedding)
embedding = embedding.persist()
with MetricsLogger('kmeans', self.n_molecules) as ml:
if self.n_molecules < MIN_RECLUSTER_SIZE:
raise Exception('Reclustering less than %d molecules is not supported.' % MIN_RECLUSTER_SIZE)
kmeans_cuml = cuDaskKMeans(client=dask_client,
n_clusters=self.n_clusters)
kmeans_cuml.fit(embedding)
kmeans_labels = kmeans_cuml.predict(embedding)
ml.metric_name = 'silhouette_score'
ml.metric_func = batched_silhouette_scores
ml.metric_func_kwargs = {}
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
(embedding_sample, kmeans_labels_sample), _ = self._random_sample_from_arrays(
embedding, kmeans_labels, n_samples=self.n_silhouette)
ml.metric_func_args = (embedding_sample, kmeans_labels_sample)
with MetricsLogger('umap', self.n_molecules) as ml:
X_train = embedding.compute()
local_model = cuUMAP()
local_model.fit(X_train)
umap_model = cuDaskUMAP(local_model,
n_neighbors=100,
a=1.0,
b=1.0,
learning_rate=1.0,
client=dask_client)
Xt = umap_model.transform(embedding)
ml.metric_name = 'spearman_rho'
ml.metric_func = self._compute_spearman_rho
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
X_train_sample, _ = self._random_sample_from_arrays(
X_train, index=spearman_index)
ml.metric_func_args = (molecular_embedding_sample, X_train_sample)
# Add back the column required for plotting and to correlating data
# between re-clustering
embedding['cluster'] = kmeans_labels
embedding['x'] = Xt[0]
embedding['y'] = Xt[1]
# Add back the prop columns
for col in prop_series.keys():
embedding[col] = prop_series[col]
return embedding
def cluster(self, df_mol_embedding=None):
logger.info("Executing GPU workflow...")
if df_mol_embedding is None:
self.n_molecules = self.context.n_molecule
df_mol_embedding = self.dao.fetch_molecular_embedding(
self.n_molecules,
cache_directory=self.context.cache_directory,
)
df_mol_embedding = df_mol_embedding.persist()
self.df_embedding = _gpu_cluster_wrapper(df_mol_embedding,
self.pca_comps,
self)
return self.df_embedding
def recluster(self,
filter_column=None,
filter_values=None,
n_clusters=None):
df_embedding = self.df_embedding
if filter_values is not None:
filter = df_embedding[filter_column].isin(filter_values)
df_embedding['filter_col'] = filter
df_embedding = df_embedding.query('filter_col == True')
if n_clusters is not None:
self.n_clusters = n_clusters
self.df_embedding = _gpu_cluster_wrapper(df_embedding, None, self)
return self.df_embedding
def add_molecules(self, chemblids: List):
chemblids = [x.strip().upper() for x in chemblids]
chem_mol_map = {row[0]: row[1] for row in self.dao.fetch_id_from_chembl(chemblids)}
molregnos = list(chem_mol_map.keys())
self.df_embedding['id_exists'] = self.df_embedding['id'].isin(molregnos)
ldf = self.df_embedding.query('id_exists == True')
if hasattr(ldf, 'compute'):
ldf = ldf.compute()
self.df_embedding = self.df_embedding.drop(['id_exists'], axis=1)
missing_mol = set(molregnos).difference(ldf['id'].to_array())
chem_mol_map = {id: chem_mol_map[id] for id in missing_mol}
missing_molregno = chem_mol_map.keys()
if self.pca and len(missing_molregno) > 0:
new_fingerprints = self.dao.fetch_molecular_embedding_by_id(missing_molregno)
new_fingerprints, prop_series = self._remove_non_numerics(new_fingerprints)
if isinstance(self.pca, cuml.PCA) and hasattr(new_fingerprints, 'compute'):
new_fingerprints = new_fingerprints.compute()
new_fingerprints = self.pca.transform(new_fingerprints)
# Add back the prop columns
for col in prop_series.keys():
prop_ser = prop_series[col]
if isinstance(self.pca, cuml.PCA) and hasattr(prop_ser, 'compute'):
prop_ser = prop_ser.compute()
new_fingerprints[col] = prop_ser
self.df_embedding = self._remove_ui_columns(self.df_embedding)
self.df_embedding = self.df_embedding.append(new_fingerprints)
if hasattr(self.df_embedding, 'compute'):
self.df_embedding = self.df_embedding.compute()
logger.info(self.df_embedding.shape)
return chem_mol_map, molregnos, self.df_embedding
class GpuKmeansUmapHybrid(GpuKmeansUmap, metaclass=Singleton):
def __init__(self,
n_molecules: int = None,
dao: ClusterWfDAO = ChemblClusterWfDao(MorganFingerprint),
pca_comps=64,
n_clusters=7,
seed=0):
super().__init__(n_molecules=n_molecules,
dao=dao,
pca_comps=pca_comps,
n_clusters=n_clusters,
seed=seed)
def _cluster(self, embedding, n_pca):
"""
Generates UMAP transformation on Kmeans labels generated from
molecular fingerprints.
"""
if hasattr(embedding, 'compute'):
embedding = embedding.compute()
embedding = embedding.reset_index()
# Before reclustering remove all columns that may interfere
embedding, prop_series = self._remove_non_numerics(embedding)
self.n_molecules, n_obs = embedding.shape
if self.context.is_benchmark:
molecular_embedding_sample, spearman_index = self._random_sample_from_arrays(
embedding, n_samples=self.n_spearman)
if n_pca and n_obs > n_pca:
with MetricsLogger('pca', self.n_molecules) as ml:
if self.pca == None:
self.pca = cuml.PCA(n_components=n_pca)
self.pca.fit(embedding)
embedding = self.pca.transform(embedding)
with MetricsLogger('kmeans', self.n_molecules) as ml:
if self.n_molecules < MIN_RECLUSTER_SIZE:
raise Exception('Reclustering less than %d molecules is not supported.' % MIN_RECLUSTER_SIZE)
kmeans_cuml = cuml.KMeans(n_clusters=self.n_clusters)
kmeans_cuml.fit(embedding)
kmeans_labels = kmeans_cuml.predict(embedding)
ml.metric_name = 'silhouette_score'
ml.metric_func = batched_silhouette_scores
ml.metric_func_kwargs = {}
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
(embedding_sample, kmeans_labels_sample), _ = self._random_sample_from_arrays(
embedding, kmeans_labels, n_samples=self.n_silhouette)
ml.metric_func_args = (embedding_sample, kmeans_labels_sample)
with MetricsLogger('umap', self.n_molecules) as ml:
umap = cuml.manifold.UMAP()
Xt = umap.fit_transform(embedding)
ml.metric_name = 'spearman_rho'
ml.metric_func = self._compute_spearman_rho
ml.metric_func_args = (None, None)
if self.context.is_benchmark:
X_train_sample, _ = self._random_sample_from_arrays(
embedding, index=spearman_index)
ml.metric_func_args = (molecular_embedding_sample, X_train_sample)
# Add back the column required for plotting and to correlating data
# between re-clustering
embedding['cluster'] = kmeans_labels
embedding['x'] = Xt[0]
embedding['y'] = Xt[1]
# Add back the prop columns
for col in prop_series.keys():
embedding[col] = prop_series[col]
return embedding
def recluster(self,
filter_column=None,
filter_values=None,
n_clusters=None):
df_embedding = self.df_embedding
if filter_values is not None:
filter = df_embedding[filter_column].isin(filter_values)
df_embedding['filter_col'] = filter
df_embedding = df_embedding.query('filter_col == True')
if n_clusters is not None:
self.n_clusters = n_clusters
self.df_embedding = self._cluster(df_embedding, None)
return self.df_embedding
| cheminformatics-master | cuchem/cuchem/wf/cluster/gpukmeansumap.py |
import logging
from typing import List
import numpy as np
import pandas as pd
from cuchemcommon.data import GenerativeWfDao
from cuchemcommon.data.generative_wf import ChemblGenerativeWfDao
from cuchemcommon.fingerprint import Embeddings
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.workflow import BaseGenerativeWorkflow
from cuchem.utils.data_peddler import download_cddd_models
logger = logging.getLogger(__name__)
class Cddd(BaseGenerativeWorkflow, metaclass=Singleton):
def __init__(self, dao: GenerativeWfDao = ChemblGenerativeWfDao(None)) -> None:
super().__init__(dao)
self.default_model_loc = download_cddd_models()
self.dao = dao
self.cddd_embeddings = Embeddings(model_dir=self.default_model_loc)
self.min_jitter_radius = 0.5
def smiles_to_embedding(self, smiles: str, padding: int):
embedding = self.cddd_embeddings.func.seq_to_emb(smiles).squeeze()
return embedding
def embedding_to_smiles(self,
embedding,
dim: int,
pad_mask):
return self.cddd_embeddings.inverse_transform(embedding)
def find_similars_smiles_list(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
radius = self._compute_radius(scaled_radius)
embedding = self.cddd_embeddings.func.seq_to_emb(smiles).squeeze()
embeddings = self.addjitter(embedding, radius, cnt=num_requested)
neighboring_embeddings = np.concatenate([embedding.reshape(1, embedding.shape[0]),
embeddings])
embeddings = [embedding] + embeddings
return self.cddd_embeddings.inverse_transform(neighboring_embeddings), embeddings
def find_similars_smiles(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
generated_mols, neighboring_embeddings = self.find_similars_smiles_list(smiles,
num_requested=num_requested,
scaled_radius=scaled_radius,
force_unique=force_unique)
dims = []
for neighboring_embedding in neighboring_embeddings:
dims.append(neighboring_embedding.shape)
generated_df = pd.DataFrame({'SMILES': generated_mols,
'embeddings': neighboring_embeddings,
'embeddings_dim': dims,
'Generated': [True for i in range(len(generated_mols))]})
generated_df.iat[0, 2] = False
if force_unique:
generated_df = self.compute_unique_smiles(generated_df,
self.cddd_embeddings.inverse_transform,
scaled_radius=scaled_radius)
return generated_df
def interpolate_smiles(self,
smiles: List,
num_points: int = 10,
scaled_radius=None,
force_unique=False):
num_points = int(num_points) + 2
if len(smiles) < 2:
raise Exception('At-least two or more smiles are expected')
def linear_interpolate_points(embedding, num_points):
return np.linspace(embedding[0], embedding[1], num_points)
result_df = []
for idx in range(len(smiles) - 1):
data = pd.DataFrame({'transformed_smiles': [smiles[idx], smiles[idx + 1]]})
input_embeddings = np.asarray(self.cddd_embeddings.transform(data))
interp_embeddings = np.apply_along_axis(linear_interpolate_points,
axis=0,
arr=input_embeddings,
num_points=num_points)
generated_mols = self.cddd_embeddings.inverse_transform(interp_embeddings)
interp_embeddings = interp_embeddings.tolist()
dims = []
embeddings = []
for interp_embedding in interp_embeddings:
dims.append(input_embeddings.shape)
interp_embedding = np.asarray(interp_embedding)
embeddings.append(interp_embedding)
interp_df = pd.DataFrame({'SMILES': generated_mols,
'embeddings': embeddings,
'embeddings_dim': dims,
'Generated': [True for i in range(num_points)]})
# Mark the source and desinations as not generated
interp_df.iat[0, 2] = False
interp_df.iat[-1, 2] = False
if force_unique:
interp_df = self.compute_unique_smiles(interp_df,
self.cddd_embeddings.inverse_transform,
scaled_radius=scaled_radius)
result_df.append(interp_df)
return pd.concat(result_df)
| cheminformatics-master | cuchem/cuchem/wf/generative/cddd.py |
import logging
import os
import grpc
import pandas as pd
from typing import List
from generativesampler_pb2_grpc import GenerativeSamplerStub
from generativesampler_pb2 import GenerativeSpec, EmbeddingList, GenerativeModel, google_dot_protobuf_dot_empty__pb2
from cuchemcommon.data import GenerativeWfDao
from cuchemcommon.data.generative_wf import ChemblGenerativeWfDao
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.workflow import BaseGenerativeWorkflow
logger = logging.getLogger(__name__)
class MegatronMolBART(BaseGenerativeWorkflow, metaclass=Singleton):
def __init__(self, dao: GenerativeWfDao = ChemblGenerativeWfDao(None)) -> None:
super().__init__(dao)
self.min_jitter_radius = 1
channel = grpc.insecure_channel(os.getenv('Megamolbart', 'megamolbart:50051'))
self.stub = GenerativeSamplerStub(channel)
def get_iteration(self):
result = self.stub.GetIteration(google_dot_protobuf_dot_empty__pb2.Empty())
return result.iteration
def smiles_to_embedding(self,
smiles: str,
padding: int,
scaled_radius=None,
num_requested: int = 10):
spec = GenerativeSpec(smiles=[smiles],
padding=padding,
radius=scaled_radius,
numRequested=num_requested)
result = self.stub.SmilesToEmbedding(spec)
return result
def embedding_to_smiles(self,
embedding,
dim: int,
pad_mask):
spec = EmbeddingList(embedding=embedding,
dim=dim,
pad_mask=pad_mask)
return self.stub.EmbeddingToSmiles(spec)
def find_similars_smiles(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False,
sanitize=True):
spec = GenerativeSpec(model=GenerativeModel.MegaMolBART,
smiles=smiles,
radius=scaled_radius,
numRequested=num_requested,
forceUnique=force_unique,
sanitize=sanitize)
result = self.stub.FindSimilars(spec)
generatedSmiles = result.generatedSmiles
embeddings = []
dims = []
for embedding in result.embeddings:
embeddings.append(list(embedding.embedding))
dims.append(embedding.dim)
generated_df = pd.DataFrame({'SMILES': generatedSmiles,
'embeddings': embeddings,
'embeddings_dim': dims,
'Generated': [True for i in range(len(generatedSmiles))]})
generated_df['Generated'].iat[0] = False
return generated_df
def interpolate_smiles(self,
smiles: List,
num_points: int = 10,
scaled_radius=None,
force_unique=False):
spec = GenerativeSpec(model=GenerativeModel.MegaMolBART,
smiles=smiles,
radius=scaled_radius,
numRequested=num_points,
forceUnique=force_unique)
result = self.stub.Interpolate(spec)
result = result.generatedSmiles
generated_df = pd.DataFrame({'SMILES': result,
'Generated': [True for i in range(len(result))]})
generated_df.iat[0, 1] = False
generated_df.iat[-1, 1] = False
return generated_df
| cheminformatics-master | cuchem/cuchem/wf/generative/megatronmolbart.py |
from cuchem.wf.generative.cddd import Cddd as Cddd
from cuchem.wf.generative.megatronmolbart import MegatronMolBART as MegatronMolBART | cheminformatics-master | cuchem/cuchem/wf/generative/__init__.py |
import logging
from cuchemcommon.data import GenerativeWfDao
from cuchemcommon.data.generative_wf import ChemblGenerativeWfDao
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.workflow import BaseGenerativeWorkflow
logger = logging.getLogger(__name__)
class MolBART(BaseGenerativeWorkflow, metaclass=Singleton):
def __init__(self, dao: GenerativeWfDao = ChemblGenerativeWfDao()) -> None:
pass | cheminformatics-master | cuchem/cuchem/wf/generative/molbart.py |
import os
from setuptools import setup, find_packages
requirementPath = 'requirements.txt'
install_requires = []
if os.path.isfile(requirementPath):
with open(requirementPath) as f:
install_requires = f.read().splitlines()
setup(
name='cuchem-commons',
version='0.1',
packages=find_packages(),
description='Common components used in Nvidia ChemInformatics',
install_requires=install_requires
) | cheminformatics-master | common/setup.py |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: generativesampler.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='generativesampler.proto',
package='nvidia.cheminformatics.grpc',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x17generativesampler.proto\x12\x1bnvidia.cheminformatics.grpc\x1a\x1bgoogle/protobuf/empty.proto\"\x99\x02\n\x0eGenerativeSpec\x12;\n\x05model\x18\x01 \x01(\x0e\x32,.nvidia.cheminformatics.grpc.GenerativeModel\x12\x0e\n\x06smiles\x18\x02 \x03(\t\x12\x13\n\x06radius\x18\x03 \x01(\x02H\x00\x88\x01\x01\x12\x19\n\x0cnumRequested\x18\x04 \x01(\x05H\x01\x88\x01\x01\x12\x14\n\x07padding\x18\x05 \x01(\x05H\x02\x88\x01\x01\x12\x18\n\x0b\x66orceUnique\x18\x06 \x01(\x08H\x03\x88\x01\x01\x12\x15\n\x08sanitize\x18\x07 \x01(\x08H\x04\x88\x01\x01\x42\t\n\x07_radiusB\x0f\n\r_numRequestedB\n\n\x08_paddingB\x0e\n\x0c_forceUniqueB\x0b\n\t_sanitize\"e\n\nSmilesList\x12\x17\n\x0fgeneratedSmiles\x18\x01 \x03(\t\x12>\n\nembeddings\x18\x02 \x03(\x0b\x32*.nvidia.cheminformatics.grpc.EmbeddingList\"A\n\rEmbeddingList\x12\x11\n\tembedding\x18\x01 \x03(\x02\x12\x0b\n\x03\x64im\x18\x02 \x03(\x05\x12\x10\n\x08pad_mask\x18\x03 \x03(\x08\"!\n\x0cIterationVal\x12\x11\n\titeration\x18\x01 \x01(\x05*:\n\x0fGenerativeModel\x12\x08\n\x04\x43\x44\x44\x44\x10\x00\x12\x0f\n\x0bMegaMolBART\x10\x01\x12\x0c\n\x07MolBART\x10\x90N2\x93\x04\n\x11GenerativeSampler\x12n\n\x11SmilesToEmbedding\x12+.nvidia.cheminformatics.grpc.GenerativeSpec\x1a*.nvidia.cheminformatics.grpc.EmbeddingList\"\x00\x12j\n\x11\x45mbeddingToSmiles\x12*.nvidia.cheminformatics.grpc.EmbeddingList\x1a\'.nvidia.cheminformatics.grpc.SmilesList\"\x00\x12\x66\n\x0c\x46indSimilars\x12+.nvidia.cheminformatics.grpc.GenerativeSpec\x1a\'.nvidia.cheminformatics.grpc.SmilesList\"\x00\x12\x65\n\x0bInterpolate\x12+.nvidia.cheminformatics.grpc.GenerativeSpec\x1a\'.nvidia.cheminformatics.grpc.SmilesList\"\x00\x12S\n\x0cGetIteration\x12\x16.google.protobuf.Empty\x1a).nvidia.cheminformatics.grpc.IterationVal\"\x00\x62\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_GENERATIVEMODEL = _descriptor.EnumDescriptor(
name='GenerativeModel',
full_name='nvidia.cheminformatics.grpc.GenerativeModel',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='CDDD', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MolBART', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='MegaMolBART', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=574,
serialized_end=632,
)
_sym_db.RegisterEnumDescriptor(_GENERATIVEMODEL)
GenerativeModel = enum_type_wrapper.EnumTypeWrapper(_GENERATIVEMODEL)
CDDD = 0
MolBART = 1
MegaMolBART = 2
_GENERATIVESPEC = _descriptor.Descriptor(
name='GenerativeSpec',
full_name='nvidia.cheminformatics.grpc.GenerativeSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='model', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.model', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='smiles', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.smiles', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='radius', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.radius', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='numRequested', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.numRequested', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='padding', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.padding', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='forceUnique', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.forceUnique', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sanitize', full_name='nvidia.cheminformatics.grpc.GenerativeSpec.sanitize', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_radius', full_name='nvidia.cheminformatics.grpc.GenerativeSpec._radius',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_numRequested', full_name='nvidia.cheminformatics.grpc.GenerativeSpec._numRequested',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_padding', full_name='nvidia.cheminformatics.grpc.GenerativeSpec._padding',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_forceUnique', full_name='nvidia.cheminformatics.grpc.GenerativeSpec._forceUnique',
index=3, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_sanitize', full_name='nvidia.cheminformatics.grpc.GenerativeSpec._sanitize',
index=4, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=86,
serialized_end=367,
)
_SMILESLIST = _descriptor.Descriptor(
name='SmilesList',
full_name='nvidia.cheminformatics.grpc.SmilesList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='generatedSmiles', full_name='nvidia.cheminformatics.grpc.SmilesList.generatedSmiles', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='embeddings', full_name='nvidia.cheminformatics.grpc.SmilesList.embeddings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=369,
serialized_end=470,
)
_EMBEDDINGLIST = _descriptor.Descriptor(
name='EmbeddingList',
full_name='nvidia.cheminformatics.grpc.EmbeddingList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='embedding', full_name='nvidia.cheminformatics.grpc.EmbeddingList.embedding', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=472,
serialized_end=537,
)
_ITERATIONVAL = _descriptor.Descriptor(
name='IterationVal',
full_name='nvidia.cheminformatics.grpc.IterationVal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='iteration', full_name='nvidia.cheminformatics.grpc.IterationVal.iteration', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=539,
serialized_end=572,
)
_GENERATIVESPEC.fields_by_name['model'].enum_type = _GENERATIVEMODEL
_GENERATIVESPEC.oneofs_by_name['_radius'].fields.append(
_GENERATIVESPEC.fields_by_name['radius'])
_GENERATIVESPEC.fields_by_name['radius'].containing_oneof = _GENERATIVESPEC.oneofs_by_name['_radius']
_GENERATIVESPEC.oneofs_by_name['_numRequested'].fields.append(
_GENERATIVESPEC.fields_by_name['numRequested'])
_GENERATIVESPEC.fields_by_name['numRequested'].containing_oneof = _GENERATIVESPEC.oneofs_by_name['_numRequested']
_GENERATIVESPEC.oneofs_by_name['_padding'].fields.append(
_GENERATIVESPEC.fields_by_name['padding'])
_GENERATIVESPEC.fields_by_name['padding'].containing_oneof = _GENERATIVESPEC.oneofs_by_name['_padding']
_GENERATIVESPEC.oneofs_by_name['_forceUnique'].fields.append(
_GENERATIVESPEC.fields_by_name['forceUnique'])
_GENERATIVESPEC.fields_by_name['forceUnique'].containing_oneof = _GENERATIVESPEC.oneofs_by_name['_forceUnique']
_GENERATIVESPEC.oneofs_by_name['_sanitize'].fields.append(
_GENERATIVESPEC.fields_by_name['sanitize'])
_GENERATIVESPEC.fields_by_name['sanitize'].containing_oneof = _GENERATIVESPEC.oneofs_by_name['_sanitize']
_SMILESLIST.fields_by_name['embeddings'].message_type = _EMBEDDINGLIST
DESCRIPTOR.message_types_by_name['GenerativeSpec'] = _GENERATIVESPEC
DESCRIPTOR.message_types_by_name['SmilesList'] = _SMILESLIST
DESCRIPTOR.message_types_by_name['EmbeddingList'] = _EMBEDDINGLIST
DESCRIPTOR.message_types_by_name['IterationVal'] = _ITERATIONVAL
DESCRIPTOR.enum_types_by_name['GenerativeModel'] = _GENERATIVEMODEL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenerativeSpec = _reflection.GeneratedProtocolMessageType('GenerativeSpec', (_message.Message,), {
'DESCRIPTOR' : _GENERATIVESPEC,
'__module__' : 'generativesampler_pb2'
# @@protoc_insertion_point(class_scope:nvidia.cheminformatics.grpc.GenerativeSpec)
})
_sym_db.RegisterMessage(GenerativeSpec)
SmilesList = _reflection.GeneratedProtocolMessageType('SmilesList', (_message.Message,), {
'DESCRIPTOR' : _SMILESLIST,
'__module__' : 'generativesampler_pb2'
# @@protoc_insertion_point(class_scope:nvidia.cheminformatics.grpc.SmilesList)
})
_sym_db.RegisterMessage(SmilesList)
EmbeddingList = _reflection.GeneratedProtocolMessageType('EmbeddingList', (_message.Message,), {
'DESCRIPTOR' : _EMBEDDINGLIST,
'__module__' : 'generativesampler_pb2'
# @@protoc_insertion_point(class_scope:nvidia.cheminformatics.grpc.EmbeddingList)
})
_sym_db.RegisterMessage(EmbeddingList)
IterationVal = _reflection.GeneratedProtocolMessageType('IterationVal', (_message.Message,), {
'DESCRIPTOR' : _ITERATIONVAL,
'__module__' : 'generativesampler_pb2'
# @@protoc_insertion_point(class_scope:nvidia.cheminformatics.grpc.IterationVal)
})
_sym_db.RegisterMessage(IterationVal)
_GENERATIVESAMPLER = _descriptor.ServiceDescriptor(
name='GenerativeSampler',
full_name='nvidia.cheminformatics.grpc.GenerativeSampler',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=635,
serialized_end=1166,
methods=[
_descriptor.MethodDescriptor(
name='SmilesToEmbedding',
full_name='nvidia.cheminformatics.grpc.GenerativeSampler.SmilesToEmbedding',
index=0,
containing_service=None,
input_type=_GENERATIVESPEC,
output_type=_EMBEDDINGLIST,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='FindSimilars',
full_name='nvidia.cheminformatics.grpc.GenerativeSampler.FindSimilars',
index=1,
containing_service=None,
input_type=_GENERATIVESPEC,
output_type=_SMILESLIST,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='Interpolate',
full_name='nvidia.cheminformatics.grpc.GenerativeSampler.Interpolate',
index=2,
containing_service=None,
input_type=_GENERATIVESPEC,
output_type=_SMILESLIST,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetIteration',
full_name='nvidia.cheminformatics.grpc.GenerativeSampler.GetIteration',
index=3,
containing_service=None,
input_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
output_type=_ITERATIONVAL,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_GENERATIVESAMPLER)
DESCRIPTOR.services_by_name['GenerativeSampler'] = _GENERATIVESAMPLER
# @@protoc_insertion_point(module_scope)
| cheminformatics-master | common/generated/generativesampler_pb2.py |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import generativesampler_pb2 as generativesampler__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class GenerativeSamplerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SmilesToEmbedding = channel.unary_unary(
'/nvidia.cheminformatics.grpc.GenerativeSampler/SmilesToEmbedding',
request_serializer=generativesampler__pb2.GenerativeSpec.SerializeToString,
response_deserializer=generativesampler__pb2.EmbeddingList.FromString,
)
self.FindSimilars = channel.unary_unary(
'/nvidia.cheminformatics.grpc.GenerativeSampler/FindSimilars',
request_serializer=generativesampler__pb2.GenerativeSpec.SerializeToString,
response_deserializer=generativesampler__pb2.SmilesList.FromString,
)
self.Interpolate = channel.unary_unary(
'/nvidia.cheminformatics.grpc.GenerativeSampler/Interpolate',
request_serializer=generativesampler__pb2.GenerativeSpec.SerializeToString,
response_deserializer=generativesampler__pb2.SmilesList.FromString,
)
self.GetIteration = channel.unary_unary(
'/nvidia.cheminformatics.grpc.GenerativeSampler/GetIteration',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=generativesampler__pb2.IterationVal.FromString,
)
class GenerativeSamplerServicer(object):
"""Missing associated documentation comment in .proto file."""
def SmilesToEmbedding(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FindSimilars(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Interpolate(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetIteration(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GenerativeSamplerServicer_to_server(servicer, server):
rpc_method_handlers = {
'SmilesToEmbedding': grpc.unary_unary_rpc_method_handler(
servicer.SmilesToEmbedding,
request_deserializer=generativesampler__pb2.GenerativeSpec.FromString,
response_serializer=generativesampler__pb2.EmbeddingList.SerializeToString,
),
'FindSimilars': grpc.unary_unary_rpc_method_handler(
servicer.FindSimilars,
request_deserializer=generativesampler__pb2.GenerativeSpec.FromString,
response_serializer=generativesampler__pb2.SmilesList.SerializeToString,
),
'Interpolate': grpc.unary_unary_rpc_method_handler(
servicer.Interpolate,
request_deserializer=generativesampler__pb2.GenerativeSpec.FromString,
response_serializer=generativesampler__pb2.SmilesList.SerializeToString,
),
'GetIteration': grpc.unary_unary_rpc_method_handler(
servicer.GetIteration,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=generativesampler__pb2.IterationVal.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'nvidia.cheminformatics.grpc.GenerativeSampler', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class GenerativeSampler(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def SmilesToEmbedding(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nvidia.cheminformatics.grpc.GenerativeSampler/SmilesToEmbedding',
generativesampler__pb2.GenerativeSpec.SerializeToString,
generativesampler__pb2.EmbeddingList.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FindSimilars(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nvidia.cheminformatics.grpc.GenerativeSampler/FindSimilars',
generativesampler__pb2.GenerativeSpec.SerializeToString,
generativesampler__pb2.SmilesList.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Interpolate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nvidia.cheminformatics.grpc.GenerativeSampler/Interpolate',
generativesampler__pb2.GenerativeSpec.SerializeToString,
generativesampler__pb2.SmilesList.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetIteration(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/nvidia.cheminformatics.grpc.GenerativeSampler/GetIteration',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
generativesampler__pb2.IterationVal.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| cheminformatics-master | common/generated/generativesampler_pb2_grpc.py |
import logging
import os
from abc import ABC
from enum import Enum
import numpy as np
import pandas as pd
from cddd.inference import InferenceModel
from cuchem.utils.data_peddler import download_cddd_models
from rdkit import Chem
from rdkit.Chem import AllChem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logger = logging.getLogger(__name__)
def calc_morgan_fingerprints(dataframe, smiles_col='canonical_smiles'):
"""Calculate Morgan fingerprints on SMILES strings
Args:
dataframe (pd.DataFrame): dataframe containing a SMILES column for calculation
Returns:
pd.DataFrame: new dataframe containing fingerprints
"""
mf = MorganFingerprint()
fp = mf.transform(dataframe, col_name=smiles_col)
fp = pd.DataFrame(fp)
fp.index = dataframe.index
return fp
class TransformationDefaults(Enum):
MorganFingerprint = {'radius': 2, 'nBits': 512}
Embeddings = {}
class BaseTransformation(ABC):
def __init__(self, **kwargs):
self.name = None
self.kwargs = None
self.func = None
def transform(self, data):
return NotImplemented
def transform_many(self, data):
return list(map(self.transform, data))
def __len__(self):
return NotImplemented
class MorganFingerprint(BaseTransformation):
def __init__(self, **kwargs):
self.name = __class__.__name__.split('.')[-1]
self.kwargs = TransformationDefaults[self.name].value
self.kwargs.update(kwargs)
self.func = AllChem.GetMorganFingerprintAsBitVect
def transform(self, data, col_name='transformed_smiles'):
data = data[col_name]
fp_array = []
for mol in data:
m = Chem.MolFromSmiles(mol)
fp = self.func(m, **self.kwargs)
fp_array.append(list(fp.ToBitString()))
fp_array = np.asarray(fp_array)
return fp_array
def __len__(self):
return self.kwargs['nBits']
class Embeddings(BaseTransformation):
def __init__(self, use_gpu=True, cpu_threads=5, model_dir=None, **kwargs):
self.name = __class__.__name__.split('.')[-1]
self.kwargs = TransformationDefaults[self.name].value
self.kwargs.update(kwargs)
model_dir = download_cddd_models()
self.func = InferenceModel(model_dir, use_gpu=use_gpu, cpu_threads=cpu_threads)
def transform(self, data):
data = data['transformed_smiles']
return self.func.seq_to_emb(data).squeeze()
def inverse_transform(self, embeddings):
"Embedding array -- individual compound embeddings are in rows"
embeddings = np.asarray(embeddings)
return self.func.emb_to_seq(embeddings)
def __len__(self):
return self.func.hparams.emb_size
| cheminformatics-master | common/cuchemcommon/fingerprint.py |
cheminformatics-master | common/cuchemcommon/__init__.py |
|
import logging
import os
from configparser import RawConfigParser
from io import StringIO
from cuchemcommon.utils.singleton import Singleton
logger = logging.getLogger(__name__)
CONFIG_FILE = '.env'
class Context(metaclass=Singleton):
def __init__(self):
self.dask_client = None
self.compute_type = 'gpu'
self.is_benchmark = False
self.benchmark_file = None
self.cache_directory = None
self.n_molecule = None
self.batch_size = 10000
self.config = {}
if os.path.exists(CONFIG_FILE):
logger.info('Reading properties from %s...', CONFIG_FILE)
self.config = self._load_properties_file(CONFIG_FILE)
else:
logger.warn('Could not locate %s', CONFIG_FILE)
def _load_properties_file(self, properties_file):
"""
Reads a properties file using ConfigParser.
:param propertiesFile/configFile:
"""
config_file = open(properties_file, 'r')
config_content = StringIO('[root]\n' + config_file.read())
config = RawConfigParser()
config.read_file(config_content)
return config._sections['root']
def get_config(self, config_name, default=None):
"""
Returns values from local configuration.
"""
try:
return self.config[config_name]
except KeyError:
logger.warn('%s not found, returing default.', config_name)
return default
| cheminformatics-master | common/cuchemcommon/context.py |
# import os
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# import logging
# from abc import ABC
# from rdkit.Chem.SaltRemover import SaltRemover
# from cddd.preprocessing import remove_salt_stereo, filter_smiles
# logger = logging.getLogger(__name__)
# class BaseTransformation(ABC):
# def __init__(self):
# pass
# def transform(self, data):
# return NotImplemented
# def transform_many(self, data):
# return list(map(self.transform, data))
# #return [self.filter(x) for x in data]
# class RemoveSalt(BaseTransformation):
# def __init__(self, remover=SaltRemover()):
# self.name = __class__.__name__.split('.')[-1]
# self.remover = remover
# def transform(self, data):
# return remove_salt_stereo(data, self.remover)
# class PreprocessSmiles(BaseTransformation):
# def __init__(self):
# self.name = __class__.__name__.split('.')[-1]
# def transform(self, data):
# return filter_smiles(data)
| cheminformatics-master | common/cuchemcommon/smiles.py |
import logging
# import torch
from functools import singledispatch
from typing import List
import numpy as np
from cuchemcommon.data import GenerativeWfDao
from rdkit.Chem import PandasTools, CanonSmiles
logger = logging.getLogger(__name__)
@singledispatch
def add_jitter(embedding, radius, cnt, shape):
return NotImplemented
@add_jitter.register(np.ndarray)
def _(embedding, radius, cnt, shape):
distorteds = []
for i in range(cnt):
noise = np.random.normal(0, radius, embedding.shape)
distorted = noise + embedding
distorteds.append(distorted)
return distorteds
class BaseGenerativeWorkflow:
def __init__(self, dao: GenerativeWfDao = None) -> None:
self.dao = dao
self.min_jitter_radius = None
def get_iteration(self):
NotImplemented
def smiles_to_embedding(self,
smiles: str,
padding: int):
NotImplemented
def embedding_to_smiles(self,
embedding: float,
dim: int,
pad_mask):
NotImplemented
def interpolate_smiles(self,
smiles: List,
num_points: int = 10,
scaled_radius=None,
force_unique=False):
NotImplemented
def find_similars_smiles_list(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
NotImplemented
def find_similars_smiles(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
NotImplemented
def _compute_radius(self, scaled_radius):
if scaled_radius:
return float(scaled_radius * self.min_jitter_radius)
else:
return self.min_jitter_radius
def addjitter(self,
embedding,
radius=None,
cnt=1,
shape=None):
radius = radius if radius else self.radius_scale
return add_jitter(embedding, radius, cnt, shape)
def compute_unique_smiles(self,
interp_df,
embedding_funct,
scaled_radius=None):
"""
Identify duplicate SMILES and distorts the embedding. The input df
must have columns 'SMILES' and 'Generated' at 0th and 1st position.
'Generated' colunm must contain boolean to classify SMILES into input
SMILES(False) and generated SMILES(True).
This function does not make any assumptions about order of embeddings.
Instead it simply orders the df by SMILES to identify the duplicates.
"""
distance = self._compute_radius(scaled_radius)
embeddings = interp_df['embeddings']
embeddings_dim = interp_df['embeddings_dim']
for index, row in interp_df.iterrows():
smile_string = row['SMILES']
try:
canonical_smile = CanonSmiles(smile_string)
except:
# If a SMILES cannot be canonicalized, just use the original
canonical_smile = smile_string
row['SMILES'] = canonical_smile
for i in range(5):
smiles = interp_df['SMILES'].sort_values()
duplicates = set()
for idx in range(0, smiles.shape[0] - 1):
if smiles.iat[idx] == smiles.iat[idx + 1]:
duplicates.add(smiles.index[idx])
duplicates.add(smiles.index[idx + 1])
if len(duplicates) > 0:
for dup_idx in duplicates:
if interp_df.iat[dup_idx, 3]:
# add jitter to generated molecules only
distored = self.addjitter(embeddings[dup_idx],
distance,
cnt=1,
shape=embeddings_dim[dup_idx])
embeddings[dup_idx] = distored[0]
interp_df['SMILES'] = embedding_funct(embeddings.to_list())
interp_df['embeddings'] = embeddings
else:
break
# Ensure all generated molecules are valid.
for i in range(5):
PandasTools.AddMoleculeColumnToFrame(interp_df, 'SMILES')
invalid_mol_df = interp_df[interp_df['ROMol'].isnull()]
if not invalid_mol_df.empty:
invalid_index = invalid_mol_df.index.to_list()
for idx in invalid_index:
embeddings[idx] = self.addjitter(embeddings[idx],
distance,
cnt=1,
shape=embeddings_dim[idx])[0]
interp_df['SMILES'] = embedding_funct(embeddings.to_list())
interp_df['embeddings'] = embeddings
else:
break
# Cleanup
if 'ROMol' in interp_df.columns:
interp_df = interp_df.drop('ROMol', axis=1)
return interp_df
def interpolate_by_id(self,
ids: List,
id_type: str = 'chembleid',
num_points=10,
force_unique=False,
scaled_radius: int = 1):
smiles = None
if not self.min_jitter_radius:
raise Exception('Property `radius_scale` must be defined in model class.')
if id_type.lower() == 'chembleid':
smiles = [row[2] for row in self.dao.fetch_id_from_chembl(ids)]
if len(smiles) != len(ids):
raise Exception('One of the ids is invalid %s', ids)
else:
raise Exception('id type %s not supported' % id_type)
return self.interpolate_smiles(smiles,
num_points=num_points,
scaled_radius=scaled_radius,
force_unique=force_unique)
def find_similars_smiles_by_id(self,
chemble_id: str,
id_type: str = 'chembleid',
num_requested=10,
force_unique=False,
scaled_radius: int = 1):
smiles = None
if not self.min_jitter_radius:
raise Exception('Property `radius_scale` must be defined in model class.')
if id_type.lower() == 'chembleid':
smiles = [row[2] for row in self.dao.fetch_id_from_chembl(chemble_id)]
if len(smiles) != len(chemble_id):
raise Exception('One of the ids is invalid %s' + chemble_id)
else:
raise Exception('id type %s not supported' % id_type)
return self.find_similars_smiles(smiles[0],
num_requested=num_requested,
scaled_radius=scaled_radius,
force_unique=force_unique)
| cheminformatics-master | common/cuchemcommon/workflow.py |
from cuchemcommon.utils.singleton import Singleton | cheminformatics-master | common/cuchemcommon/utils/__init__.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime
from cuchemcommon.context import Context
from .sysinfo import get_machine_config, print_machine_config
BENCHMARK_FILE = '/data/benchmark.csv'
logger = logging.getLogger(__name__)
def initialize_logfile(benchmark_file=BENCHMARK_FILE):
"""Initialize benchmark file with header if needed"""
config = get_machine_config()
config_message = print_machine_config(config)
if not os.path.exists(benchmark_file):
with open(benchmark_file, 'w') as fh:
fh.write(f'# {config_message}\n')
fh.write('date,benchmark_type,step,time(hh:mm:ss.ms),n_molecules,n_workers,metric_name,metric_value\n')
return benchmark_file
class MetricsLogger(object):
def __init__(self,
task_name,
n_molecules):
self.task_name = task_name
self.n_molecules = n_molecules
self.start_time = None
self.metric_name = None
self.metric_value = None
self.metric_func = None
self.metric_func_args = None
self.metric_func_kwargs = {}
def __enter__(self):
self.start_time = datetime.now()
return self
def __exit__(self, type, value, traceback):
context = Context()
runtime = datetime.now() - self.start_time
logger.info('### Runtime {} time (hh:mm:ss.ms) {}'.format(self.task_name, runtime))
n_workers = len(context.dask_client.cluster.workers)
if self.metric_func and context.is_benchmark:
self.metric_value = self.metric_func(*self.metric_func_args,
**self.metric_func_kwargs)
if self.metric_value is None:
self.metric_name = ''
self.metric_value = ''
else:
logger.info('Calculated {} is {}'.format(self.metric_name, self.metric_value))
log_results(self.start_time, context.compute_type, self.task_name,
runtime,
n_molecules=self.n_molecules,
n_workers=n_workers,
metric_name=self.metric_name,
metric_value=self.metric_value,
benchmark_file=context.benchmark_file)
def log_results(date,
benchmark_type,
step,
time,
n_molecules,
n_workers,
metric_name='',
metric_value='',
benchmark_file=BENCHMARK_FILE):
"""Log benchmark results to a file"""
out_list = [date, benchmark_type, step, time, n_molecules, n_workers, metric_name, metric_value]
out_fmt = ','.join(['{}'] * len(out_list)) + '\n'
with open(benchmark_file, 'a') as fh:
out_string = out_fmt.format(*out_list)
fh.write(out_string)
| cheminformatics-master | common/cuchemcommon/utils/logger.py |
# singleton.py
import logging
"""
Metaclass for singletons.
"""
logger = logging.getLogger(__name__)
class Singleton(type):
"""
Ensures single instance of a class.
Example Usage:
class MySingleton(metaclass=Singleton)
pass
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(
*args, **kwargs)
return cls._instances[cls]
| cheminformatics-master | common/cuchemcommon/utils/singleton.py |
#!/opt/conda/envs/rapids/bin/python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import psutil
import pynvml as nv
def get_machine_config():
"""Get machine config for CPU and GPU(s)"""
# CPU config
physical_cores = psutil.cpu_count(logical=False)
logical_cores = psutil.cpu_count(logical=True)
cpufreq = psutil.cpu_freq()
cpufreq_max = cpufreq.max # Mhz
cpufreq_min = cpufreq.min
cpufreq_cur = cpufreq.current
svmem = psutil.virtual_memory()
mem_total = svmem.total / (1024.0 ** 3) # GB
mem_avail = svmem.available / (1024.0 ** 3)
# GPU config
nv.nvmlInit()
driver_version = nv.nvmlSystemGetDriverVersion()
deviceCount = nv.nvmlDeviceGetCount()
gpu_devices, gpu_mems = [], []
for i in range(deviceCount):
handle = nv.nvmlDeviceGetHandleByIndex(i)
gpu_devices.append(nv.nvmlDeviceGetName(handle).decode("utf-8"))
gpu_mem = nv.nvmlDeviceGetMemoryInfo(handle).total / (1024.0 ** 3)
gpu_mems.append(gpu_mem)
return {'cpu': {'physical_cores': physical_cores, 'logical_cores': logical_cores,
'min_freq_MHz': cpufreq_min, 'max_freq_MHz': cpufreq_max, 'cur_freq_MHz': cpufreq_cur,
'total_mem_GB': mem_total, 'avail_mem_GB': mem_avail},
'gpu': {'devices': gpu_devices, 'mem_GB': gpu_mems}}
def print_machine_config(config):
"""Printable version of config"""
cpu_cores = config['cpu']['physical_cores']
cpu_freq = int(round(config['cpu']['max_freq_MHz'], 0))
ram = int(round(config['cpu']['total_mem_GB'], 0))
cpu_config_message = f'{cpu_freq} MHz CPU with {cpu_cores} cores, {ram} GB RAM'
gpu_devices = Counter([(x, int(round(y, 0))) for x, y in zip(config['gpu']['devices'], config['gpu']['mem_GB'])])
gpu_config_message = ''
for (handle, mem), count in gpu_devices.items():
gpu_config_message += f'{count} x {handle} GPU(s)'
return ', '.join([cpu_config_message, gpu_config_message])
| cheminformatics-master | common/cuchemcommon/utils/sysinfo.py |
from typing import List
class ClusterWfDAO(object):
"""
Base class for all DAO for fetching data for Clustering Workflows
"""
def meta_df(self):
"""
Returns df with dtype set for structure without any column filter.
"""
return NotImplemented
def fetch_molecular_embedding(self, n_molecules: int, cache_directory: str = None):
"""
Fetch molecular properties from database/cache into a dask array.
"""
return NotImplemented
def fetch_molecular_embedding_by_id(self, molecule_id: List):
"""
Fetch molecular properties from database for the given id. Id depends on
the backend databse. For chemble DB it should be molregid.
"""
return NotImplemented
def fetch_id_from_smile(self, new_molecules: List):
"""
Fetch molecular details for a list of molecules. The values in the list
of molecules depends on database/service used. For e.g. it could be
ChemblId or molreg_id for Chemble database.
"""
return NotImplemented
class GenerativeWfDao(object):
def fetch_id_from_chembl(self, id: List):
"""
Fetch molecular details for a list of molecules. The values in the list
of molecules depends on database/service used. For e.g. it could be
ChemblId or molreg_id for Chemble database.
"""
return NotImplemented
| cheminformatics-master | common/cuchemcommon/data/__init__.py |
import logging
from typing import List
from cuchemcommon.data.helper.chembldata import ChEmblData
from cuchemcommon.utils.singleton import Singleton
from . import GenerativeWfDao
logger = logging.getLogger(__name__)
class ChemblGenerativeWfDao(GenerativeWfDao, metaclass=Singleton):
def __init__(self, fp_type):
self.chem_data = ChEmblData(fp_type)
def fetch_id_from_chembl(self, id: List):
logger.debug('Fetch ChEMBL ID using molregno...')
return self.chem_data.fetch_id_from_chembl(id)
| cheminformatics-master | common/cuchemcommon/data/generative_wf.py |
import logging
import math
import os
from typing import List
import cudf
import dask
import dask_cudf
from cuchemcommon.context import Context
from cuchemcommon.data.helper.chembldata import BATCH_SIZE, ChEmblData
from cuchemcommon.utils.singleton import Singleton
from . import ClusterWfDAO
logger = logging.getLogger(__name__)
FINGER_PRINT_FILES = 'filter_*.h5'
class ChemblClusterWfDao(ClusterWfDAO, metaclass=Singleton):
def __init__(self, fp_type):
self.chem_data = ChEmblData(fp_type)
def meta_df(self):
chem_data = ChEmblData()
return chem_data._meta_df()
def fetch_molecular_embedding(self,
n_molecules: int,
cache_directory: str = None):
context = Context()
if cache_directory:
hdf_path = os.path.join(cache_directory, FINGER_PRINT_FILES)
logger.info('Reading %d rows from %s...', n_molecules, hdf_path)
mol_df = dask.dataframe.read_hdf(hdf_path, 'fingerprints')
if n_molecules > 0:
npartitions = math.ceil(n_molecules / BATCH_SIZE)
mol_df = mol_df.head(n_molecules, compute=False, npartitions=npartitions)
else:
logger.info('Reading molecules from database...')
mol_df = self.chem_data.fetch_mol_embedding(num_recs=n_molecules,
batch_size=context.batch_size)
return mol_df
def fetch_molecular_embedding_by_id(self, molecule_id: List):
context = Context()
meta = self.chem_data._meta_df()
fp_df = self.chem_data._fetch_mol_embedding(molregnos=molecule_id,
batch_size=context.batch_size) \
.astype(meta.dtypes)
fp_df = cudf.from_pandas(fp_df)
fp_df = dask_cudf.from_cudf(fp_df, npartitions=1).reset_index()
return fp_df
def fetch_id_from_chembl(self, new_molecules: List):
logger.debug('Fetch ChEMBL ID using molregno...')
return self.chem_data.fetch_id_from_chembl(new_molecules)
| cheminformatics-master | common/cuchemcommon/data/cluster_wf.py |
cheminformatics-master | common/cuchemcommon/data/helper/__init__.py |
|
import os
import warnings
import pandas
import sqlite3
import logging
from typing import List
from dask import delayed, dataframe
from contextlib import closing
from cuchemcommon.utils.singleton import Singleton
from cuchemcommon.context import Context
warnings.filterwarnings("ignore", message=r"deprecated", category=FutureWarning)
logger = logging.getLogger(__name__)
BATCH_SIZE = 100000
ADDITIONAL_FEILD = ['canonical_smiles', 'transformed_smiles']
IMP_PROPS = [
'alogp',
'aromatic_rings',
'full_mwt',
'psa',
'rtb']
IMP_PROPS_TYPE = [pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='float64'),
pandas.Series([], dtype='int64')]
ADDITIONAL_FEILD_TYPE = [pandas.Series([], dtype='object'),
pandas.Series([], dtype='object')]
SQL_MOLECULAR_PROP = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
"""
# DEPRECATED. Please add code to DAO classes.
class ChEmblData(object, metaclass=Singleton):
def __init__(self, fp_type):
context = Context()
db_file = context.get_config('data_mount_path', default='/data')
db_file = os.path.join(db_file, 'db/chembl_27.db')
if not os.path.exists(db_file):
logger.error('%s not found', db_file)
raise Exception('{} not found'.format(db_file))
self.fp_type = fp_type
self.chembl_db = 'file:%s?mode=ro' % db_file
logger.info('ChEMBL database: %s...' % self.chembl_db)
def fetch_props_by_molregno(self, molregnos):
"""
Returns compound properties and structure filtered by ChEMBL IDs along
with a list of columns.
"""
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = SQL_MOLECULAR_PROP % " ,".join(list(map(str, molregnos)))
cur.execute(select_stmt)
cols = list(map(lambda x: x[0], cur.description))
return cols, cur.fetchall()
def fetch_props_by_chemble(self, chemble_ids):
"""
Returns compound properties and structure filtered by ChEMBL IDs along
with a list of columns.
"""
sql_stml = """
SELECT md.molregno as molregno, md.chembl_id, cp.*, cs.*
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.chembl_id in (%s)
"""
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = sql_stml % "'%s'" % "','".join([x.strip().upper() for x in chemble_ids])
cur.execute(select_stmt)
cols = list(map(lambda x: x[0], cur.description))
return cols, cur.fetchall()
def fetch_molregno_by_chemblId(self, chemblIds):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT md.molregno as molregno
FROM compound_properties cp,
compound_structures cs,
molecule_dictionary md
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.chembl_id in (%s)
''' % "'%s'" % "','".join(chemblIds)
cur.execute(select_stmt)
return cur.fetchall()
def fetch_id_from_chembl(self, new_molecules: List):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT cs.molregno as molregno, md.chembl_id as chembl_id,
cs.canonical_smiles as smiles
FROM compound_structures cs,
molecule_dictionary md
WHERE md.molregno = cs.molregno
AND md.chembl_id in (%s)
''' % "'%s'" % "','".join([x.strip().upper() for x in new_molecules])
cur.execute(select_stmt)
return cur.fetchall()
def fetch_chemblId_by_molregno(self, molregnos):
logger.debug('Fetch ChEMBL ID using molregno...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT md.chembl_id as chembl_id
FROM molecule_dictionary md
WHERE md.molregno in (%s)
''' % ", ".join(list(map(str, molregnos)))
cur.execute(select_stmt)
return cur.fetchall()
def fetch_approved_drugs(self):
"""Fetch approved drugs with phase >=3 as dataframe
Args:
chembl_db_path (string): path to chembl sqlite database
Returns:
pd.DataFrame: dataframe containing SMILES strings and molecule index
"""
logger.debug('Fetching ChEMBL approved drugs...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = """SELECT
di.molregno,
cs.canonical_smiles,
di.max_phase_for_ind
FROM
drug_indication AS di
LEFT JOIN compound_structures AS cs ON di.molregno = cs.molregno
WHERE
di.max_phase_for_ind >= 3
AND cs.canonical_smiles IS NOT NULL;"""
cur.execute(select_stmt)
return cur.fetchall()
def fetch_random_samples(self, num_samples, max_len):
"""Fetch random samples from ChEMBL as dataframe
Args:
num_samples (int): number of samples to select
chembl_db_path (string): path to chembl sqlite database
Returns:
pd.DataFrame: dataframe containing SMILES strings and molecule index
"""
logger.debug('Fetching ChEMBL random samples...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = """SELECT
cs.molregno,
cs.canonical_smiles,
LENGTH(cs.canonical_smiles) as len
FROM
compound_structures AS cs
WHERE
cs.canonical_smiles IS NOT NULL
AND
len <= """ + f'{max_len}' + """
ORDER BY RANDOM()
LIMIT """ + f'{num_samples};'
cur.execute(select_stmt)
return cur.fetchall()
def fetch_molecule_cnt(self):
logger.debug('Finding number of molecules...')
with closing(sqlite3.connect(self.chembl_db, uri=True)) as con, con, \
closing(con.cursor()) as cur:
select_stmt = '''
SELECT count(*)
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
'''
cur.execute(select_stmt)
return cur.fetchone()[0]
def _meta_df(self, **transformation_kwargs):
transformation = self.fp_type(**transformation_kwargs)
prop_meta = {'id': pandas.Series([], dtype='int64')}
prop_meta.update(dict(zip(IMP_PROPS + ADDITIONAL_FEILD,
IMP_PROPS_TYPE + ADDITIONAL_FEILD_TYPE)))
prop_meta.update({i: pandas.Series([], dtype='float32') for i in range(len(transformation))})
return pandas.DataFrame(prop_meta)
def _fetch_mol_embedding(self,
start=0,
batch_size=BATCH_SIZE,
molregnos=None,
**transformation_kwargs):
"""
Returns compound properties and structure for the first N number of
records in a dataframe.
"""
logger.info('Fetching %d records starting %d...' % (batch_size, start))
imp_cols = ['cp.' + col for col in IMP_PROPS]
if molregnos is None:
select_stmt = '''
SELECT md.molregno, %s, cs.canonical_smiles
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
LIMIT %d, %d
''' % (', '.join(imp_cols), start, batch_size)
else:
select_stmt = '''
SELECT md.molregno, %s, cs.canonical_smiles
FROM compound_properties cp,
molecule_dictionary md,
compound_structures cs
WHERE cp.molregno = md.molregno
AND md.molregno = cs.molregno
AND md.molregno in (%s)
LIMIT %d, %d
''' % (', '.join(imp_cols), " ,".join(list(map(str, molregnos))), start, batch_size)
df = pandas.read_sql(select_stmt,
sqlite3.connect(self.chembl_db, uri=True))
# Smiles -> Smiles transformation and filtering
# TODO: Discuss internally to find use or refactor this code to remove
# model specific filtering
df['transformed_smiles'] = df['canonical_smiles']
# if smiles_transforms is not None:
# if len(smiles_transforms) > 0:
# for xf in smiles_transforms:
# df['transformed_smiles'] = df['transformed_smiles'].map(xf.transform)
# df.dropna(subset=['transformed_smiles'], axis=0, inplace=True)
# Conversion to fingerprints or embeddings
# transformed_smiles = df['transformed_smiles']
transformation = self.fp_type(**transformation_kwargs)
cache_data = transformation.transform(df)
return_df = pandas.DataFrame(cache_data)
return_df = pandas.DataFrame(
return_df,
columns=pandas.RangeIndex(start=0,
stop=len(transformation))).astype('float32')
return_df = df.merge(return_df, left_index=True, right_index=True)
return_df.rename(columns={'molregno': 'id'}, inplace=True)
return return_df
def fetch_mol_embedding(self,
num_recs=None,
batch_size=BATCH_SIZE,
molregnos=None,
**transformation_kwargs):
"""
Returns compound properties and structure for the first N number of
records in a dataframe.
"""
logger.debug('Fetching properties for all molecules...')
if num_recs is None or num_recs < 0:
num_recs = self.fetch_molecule_cnt()
logger.info('num_recs %d', num_recs)
logger.info('batch_size %d', batch_size)
meta_df = self._meta_df(**transformation_kwargs)
dls = []
for start in range(0, num_recs, batch_size):
bsize = min(num_recs - start, batch_size)
dl_data = delayed(self._fetch_mol_embedding)(start=start,
batch_size=bsize,
molregnos=molregnos,
**transformation_kwargs)
dls.append(dl_data)
return dataframe.from_delayed(dls, meta=meta_df)
def save_fingerprints(self, hdf_path='data/filter_*.h5', num_recs=None, batch_size=5000):
"""
Generates fingerprints for all ChEMBL ID's in the database
"""
logger.debug('Fetching molecules from database for fingerprints...')
mol_df = self.fetch_mol_embedding(num_recs=num_recs, batch_size=batch_size)
mol_df.to_hdf(hdf_path, 'fingerprints')
| cheminformatics-master | common/cuchemcommon/data/helper/chembldata.py |
#!/usr/bin/env python3
#
# Copyright (c) 2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
import shutil
import logging
import argparse
from subprocess import run
import grpc
import generativesampler_pb2_grpc
from concurrent import futures
from util import DEFAULT_VOCAB_PATH
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('megamolbart')
formatter = logging.Formatter('%(asctime)s %(name)s [%(levelname)s]: %(message)s')
from util import (DEFAULT_MAX_SEQ_LEN, DEFAULT_VOCAB_PATH, CHECKPOINTS_DIR,
DEFAULT_NUM_LAYERS, DEFAULT_D_MODEL, DEFAULT_NUM_HEADS)
class Launcher(object):
"""
Application launcher. This class can execute the workflows in headless (for
benchmarking and testing) and with UI.
"""
def __init__(self):
parser = argparse.ArgumentParser(description='MegaMolBART gRPC Service')
parser.add_argument('-p', '--port',
dest='port',
type=int,
default=50051,
help='GRPC server Port')
parser.add_argument('-l', '--max_decode_length',
dest='max_decode_length',
type=int,
default=DEFAULT_MAX_SEQ_LEN,
help='Maximum length of decoded sequence')
parser.add_argument('-d', '--debug',
dest='debug',
action='store_true',
default=False,
help='Show debug message')
args = parser.parse_args(sys.argv[1:])
if args.debug:
logger.setLevel(logging.DEBUG)
logger.info(f'Maximum decoded sequence length is set to {args.max_decode_length}')
if not os.path.exists('/models/megamolbart/checkpoints/'):
self.download_megamolbart_model()
from megamolbart.service import GenerativeSampler
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
generativesampler_pb2_grpc.add_GenerativeSamplerServicer_to_server(
GenerativeSampler(decoder_max_seq_len=args.max_decode_length), server)
server.add_insecure_port(f'[::]:{args.port}')
server.start()
server.wait_for_termination()
def download_megamolbart_model(self):
"""
Downloads MegaMolBART model from NGC.
"""
download_script = '/opt/nvidia/megamolbart/scripts/download_model.sh'
if os.path.exists(download_script):
logger.info('Triggering model download...')
result = run(['bash', '-c', download_script])
logger.info(f'Model download result: {result.stdout}')
logger.info(f'Model download result: {result.stderr}')
if result.returncode != 0:
raise Exception('Error downloading model')
def main():
Launcher()
if __name__ == '__main__':
main()
| cheminformatics-master | megamolbart/launch.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import sys
import grpc
from locust import task, User, constant
sys.path.insert(0, "generated")
import generativesampler_pb2
import generativesampler_pb2_grpc
from tests.utils import stopwatch
class GRPCLocust(User):
host = 'http://192.167.100.2'
wait_time = constant(.1)
@task
@stopwatch('GRPC_Sample')
def client_task(self):
with grpc.insecure_channel('192.167.100.2:50051') as channel:
stub = generativesampler_pb2_grpc.GenerativeSamplerStub(channel)
spec = generativesampler_pb2.GenerativeSpec(
model=generativesampler_pb2.GenerativeModel.MegaMolBART,
smiles='CN1C=NC2=C1C(=O)N(C(=O)N2C)C',
radius=0.0001,
numRequested=10)
response = stub.FindSimilars(spec)
| cheminformatics-master | megamolbart/tests/perf_grpc.py |
import sys
import grpc
import logging
from concurrent import futures
from contextlib import contextmanager
from megamolbart.service import GenerativeSampler
import generativesampler_pb2
import generativesampler_pb2_grpc
from util import (DEFAULT_NUM_LAYERS, DEFAULT_D_MODEL, DEFAULT_NUM_HEADS, CHECKPOINTS_DIR)
logger = logging.getLogger(__name__)
@contextmanager
def similarity(add_server_method, service_cls, stub_cls):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
add_server_method(service_cls(num_layers=DEFAULT_NUM_LAYERS,
hidden_size=DEFAULT_D_MODEL,
num_attention_heads=DEFAULT_NUM_HEADS,
checkpoints_dir=CHECKPOINTS_DIR,
vocab_path='/models/megamolbart/bart_vocab.txt',),
server)
port = server.add_insecure_port('[::]:0')
server.start()
try:
with grpc.insecure_channel('localhost:%d' % port) as channel:
yield stub_cls(channel)
finally:
server.stop(None)
def test_fetch_iterations():
sys.argv = [sys.argv[0]]
with similarity(generativesampler_pb2_grpc.add_GenerativeSamplerServicer_to_server,
GenerativeSampler,
generativesampler_pb2_grpc.GenerativeSamplerStub) as stub:
result = stub.GetIteration(generativesampler_pb2.google_dot_protobuf_dot_empty__pb2.Empty())
def test_dataframe_similar():
sys.argv = [sys.argv[0]]
with similarity(generativesampler_pb2_grpc.add_GenerativeSamplerServicer_to_server,
GenerativeSampler,
generativesampler_pb2_grpc.GenerativeSamplerStub) as stub:
spec = generativesampler_pb2.GenerativeSpec(
model=generativesampler_pb2.GenerativeModel.MegaMolBART,
smiles=['CC(=O)Nc1ccc(O)cc1'],
radius=5.0,
numRequested=10)
result = stub.FindSimilars(spec)
def test_dataframe_interpolate():
sys.argv = [sys.argv[0]]
with similarity(generativesampler_pb2_grpc.add_GenerativeSamplerServicer_to_server,
GenerativeSampler,
generativesampler_pb2_grpc.GenerativeSamplerStub) as stub:
spec = generativesampler_pb2.GenerativeSpec(
model=generativesampler_pb2.GenerativeModel.MegaMolBART,
smiles=['CC(=O)Nc1ccc(O)cc1', 'CC(=O)Nc1ccc(O)'],
radius=0.0005,
numRequested=10)
result = stub.Interpolate(spec)
| cheminformatics-master | megamolbart/tests/test_grpc.py |
#!/usr/bin/env python3
import pandas as pd
import torch
from megamolbart.inference import MegaMolBART
if __name__ == '__main__':
num_interp = 3
smiles1 = 'CC(=O)Nc1ccc(O)cc1'
smiles2 = 'CC(=O)Oc1ccccc1C(=O)O'
with torch.no_grad():
wf = MegaMolBART()
# Test each of the major functions
mols_list_1, _ = wf.interpolate_molecules(smiles1, smiles2, num_interp, wf.tokenizer)
assert len(mols_list_1) == num_interp
assert isinstance(mols_list_1, list)
assert isinstance(mols_list_1[0], str)
mols_list_2, _, _ = wf.find_similars_smiles_list(smiles1, num_requested=num_interp)
assert len(mols_list_2) == num_interp + 1
assert isinstance(mols_list_2, list)
assert isinstance(mols_list_2[0], str)
mols_df_1 = wf.find_similars_smiles(smiles2, num_interp)
assert len(mols_df_1) == num_interp + 1
assert isinstance(mols_df_1, pd.DataFrame)
assert isinstance(mols_df_1.loc[1, 'SMILES'], str)
mols_df_2 = wf.interpolate_smiles([smiles1, smiles2], num_interp)
assert len(mols_df_2) == num_interp + 2
assert isinstance(mols_df_2, pd.DataFrame)
assert isinstance(mols_df_2.loc[1, 'SMILES'], str)
| cheminformatics-master | megamolbart/tests/test_megamolbart.py |
#!/usr/bin/env python3
import os
import sys
import math
import time
import random
import argparse
from subprocess import run
from rdkit import Chem
from pathlib import Path
from concurrent.futures import ProcessPoolExecutor
MAX_LENGTH = 150
VAL_TEST_SPLIT = 0.005
def process_line(line):
if line is None or line == "":
return None
splits = line.split("\t")
if len(splits) < 2:
return None
smi, zinc_id = splits[0], splits[1]
try:
mol = Chem.MolFromSmiles(smi)
smi = Chem.MolToSmiles(mol, canonical=True)
except RuntimeError:
return None
if len(smi) > MAX_LENGTH:
return None
output = f"{zinc_id},{smi}"
return output
def process_file_text(text, executor):
lines = text.split("\n")
# Filter and canonicalise molecules
futures = [executor.submit(process_line, line) for line in lines]
outputs = [future.result() for future in futures]
output_lines = [output for output in outputs if output is not None]
# Assign set
mol_sets = ["train" for _ in range(len(output_lines))]
num_idxs = math.ceil(len(output_lines) * VAL_TEST_SPLIT)
val_idxs = random.sample(range(len(output_lines)), k=num_idxs)
for idx in val_idxs:
mol_sets[idx] = "val"
rem_idxs = set(range(len(output_lines))) - set(val_idxs)
test_idxs = random.sample(list(rem_idxs), k=num_idxs)
for idx in test_idxs:
mol_sets[idx] = "test"
# Generate file output
completed_lines = [f"{line},{dataset}" for line, dataset in zip(output_lines, mol_sets)]
output_text = "\n".join(completed_lines)
output_text = f"zinc_id,smiles,set\n{output_text}"
return output_text
def format_arr_idx(arr_idx):
return "x" + str(arr_idx).rjust(3, "0")
def process_file(zinc_dir, out_dir, arr_idx):
cpus = len(os.sched_getaffinity(0))
executor = ProcessPoolExecutor(cpus)
print(f"Using a pool of {str(cpus)} processes for execution.")
zinc_path = Path(zinc_dir)
filename = format_arr_idx(arr_idx)
file_path = zinc_path / filename
print(f"Processing file {str(file_path)}...")
text = file_path.read_text()
output_text = process_file_text(text, executor)
print("Successfully processed file.")
out_dir = Path(out_dir)
out_path = out_dir / (filename + ".csv")
out_path.write_text(output_text)
print(f"Successfully written to {str(out_path)}")
def main(args):
start_time = time.time()
if args.download_list:
# Download zinc files and perform all pre-processing steps. Following
# are preprocessing steps:
# - Merge all records
# - Shuffle records randomly
# - Spilt 100000 recs per file
start = time.time()
print('Downloading zinc database...')
download_cmd = f'parallel -j {args.threads} --gnu "wget -q --no-clobber {{}} -P {args.download_dir}" < <(cat {args.download_list})'
print(download_cmd)
process = run(['bash', '-c', download_cmd])
if process.returncode != 0:
print('ERROR downloading zinc database. Please make sure "parallel" is installed and check disk space.')
sys.exit(process.returncode)
print('Download complete. Time ', time.time() - start)
start = time.time()
shuffled_data = args.zinc_dir
split_cmd = f"mkdir -p {shuffled_data}; cd {shuffled_data}; tail -q -n +2 {args.download_dir}/** | cat | shuf | split -d -l 10000000 -a 3"
process = run(['bash', '-c', split_cmd])
if process.returncode != 0:
print('ERROR downloading zinc database. Please make sure "parallel" is installed and check disk space.')
sys.exit(process.returncode)
print('Shuffling and spliting files complete. Time ', time.time() - start)
print("Processing files...")
process_file(args.zinc_dir, args.output_dir, args.arr_idx)
print("Finished processing.")
print(f"Total time: {time.time() - start_time}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--zinc_dir", type=str)
parser.add_argument("--output_dir", type=str)
parser.add_argument("--arr_idx", type=str)
parser.add_argument("--download_list", type=str, default=None)
parser.add_argument("--download_dir", type=str, default=None)
parser.add_argument("-t", "--threads", type=int, default=8)
print("Running ZINC pre-processing script...")
args = parser.parse_args()
main(args)
print("Complete.")
| cheminformatics-master | megamolbart/scripts/process_file.py |
import logging
from generativesampler_pb2 import EmbeddingList, SmilesList, IterationVal
import generativesampler_pb2_grpc
from megamolbart.inference import MegaMolBART
from cuchemcommon.utils import Singleton
logger = logging.getLogger(__name__)
class GenerativeSampler(generativesampler_pb2_grpc.GenerativeSampler, metaclass=Singleton):
def __init__(self, *args, **kwargs):
decoder_max_seq_len = kwargs['decoder_max_seq_len'] if 'decoder_max_seq_len' in kwargs else None
self.megamolbart = MegaMolBART(decoder_max_seq_len=decoder_max_seq_len)
try:
iteration = int(self.megamolbart.iteration)
except:
iteration = 0
self.iteration = iteration
logger.info(f'Loaded iteration {self.iteration}')
# TODO update to accept batched input if similes2embedding does
# TODO how to handle length overrun for batch processing --> see also MegaMolBART.load_model in inference.py
def SmilesToEmbedding(self, spec, context):
smile_str = ''.join(spec.smiles)
embedding, pad_mask = self.megamolbart.smiles2embedding(smile_str,
pad_length=spec.padding)
dim = embedding.shape
embedding = embedding.flatten().tolist()
return EmbeddingList(embedding=embedding,
dim=dim,
pad_mask=pad_mask)
def EmbeddingToSmiles(self, embedding_spec, context):
'''
Converts input embedding to SMILES.
@param transform_spec: Input spec with embedding and mask.
'''
embedding = torch.FloatTensor(list(embedding_spec.embedding))
pad_mask = torch.BoolTensor(list(embedding_spec.pad_mask))
dim = tuple(embedding_spec.dim)
embedding = torch.reshape(embedding, dim).cuda()
pad_mask = torch.reshape(pad_mask, (dim[0], 1)).cuda()
generated_mols = self.megamolbart.inverse_transform(embedding, pad_mask)
return SmilesList(generatedSmiles=generated_mols)
def FindSimilars(self, spec, context):
smile_str = ''.join(spec.smiles)
generated_df = self.megamolbart.find_similars_smiles(
smile_str,
num_requested=spec.numRequested,
scaled_radius=spec.radius,
force_unique=False)
embeddings = []
for _, row in generated_df.iterrows():
embeddings.append(EmbeddingList(embedding=row.embeddings,
dim=row.embeddings_dim))
return SmilesList(generatedSmiles=generated_df['SMILES'],
embeddings=embeddings)
def Interpolate(self, spec, context):
_, generated_smiles = self.megamolbart.interpolate_smiles(
spec.smiles,
num_points=spec.numRequested,
scaled_radius=spec.radius,
force_unique=False)
return SmilesList(generatedSmiles=generated_smiles)
def GetIteration(self, spec, context):
return IterationVal(iteration=self.iteration)
| cheminformatics-master | megamolbart/megamolbart/service.py |
cheminformatics-master | megamolbart/megamolbart/__init__.py |
|
#!/usr/bin/env python3
import logging
from functools import partial
from pathlib import Path
from typing import List
from rdkit import Chem
import torch
import pandas as pd
from checkpointing import load_checkpoint
from cuchemcommon.workflow import BaseGenerativeWorkflow, add_jitter
from decoder import DecodeSampler
from megatron import get_args
from megatron.initialize import initialize_megatron
from megatron_bart import MegatronBART
from tokenizer import MolEncTokenizer
from util import (REGEX, DEFAULT_CHEM_TOKEN_START, DEFAULT_MAX_SEQ_LEN,
DEFAULT_VOCAB_PATH, CHECKPOINTS_DIR,
DEFAULT_NUM_LAYERS, DEFAULT_D_MODEL, DEFAULT_NUM_HEADS)
logger = logging.getLogger(__name__)
@add_jitter.register(torch.Tensor)
def _(embedding, radius, cnt, shape):
if shape is not None:
embedding = torch.reshape(embedding, (1, shape[0], shape[1])).to(embedding.device)
permuted_emb = embedding.permute(1, 0, 2)
distorteds = []
for i in range(cnt):
noise = torch.normal(0, radius, permuted_emb.shape).to(embedding.device)
distorted = (noise + permuted_emb).permute(1, 0, 2)
distorteds.append(distorted)
return distorteds
class MegaMolBART(BaseGenerativeWorkflow):
def __init__(self,
max_seq_len=DEFAULT_MAX_SEQ_LEN,
vocab_path=DEFAULT_VOCAB_PATH,
regex=REGEX,
default_chem_token_start=DEFAULT_CHEM_TOKEN_START,
checkpoints_dir=CHECKPOINTS_DIR,
num_layers=DEFAULT_NUM_LAYERS,
hidden_size=DEFAULT_D_MODEL,
num_attention_heads=DEFAULT_NUM_HEADS,
decoder_max_seq_len=None) -> None:
super().__init__()
torch.set_grad_enabled(False) # Testing this instead of `with torch.no_grad():` context since it doesn't exit
self.device = 'cuda' # Megatron arg loading seems to only work with GPU
self.min_jitter_radius = 1.0
self.max_model_position_embeddings = max_seq_len
args = {
'num_layers': num_layers,
'hidden_size': hidden_size,
'num_attention_heads': num_attention_heads,
'max_position_embeddings': self.max_model_position_embeddings,
'tokenizer_type': 'GPT2BPETokenizer',
'vocab_file': vocab_path,
'load': checkpoints_dir
}
with torch.no_grad():
initialize_megatron(args_defaults=args, ignore_unknown_args=True)
args = get_args()
self.tokenizer = self.load_tokenizer(args.vocab_file, regex, default_chem_token_start)
self.model = self.load_model(args, self.tokenizer, decoder_max_seq_len)
def _compute_radius(self, scaled_radius): # TODO REMOVE
if scaled_radius:
return float(scaled_radius * self.min_jitter_radius)
else:
return self.min_jitter_radius
def load_tokenizer(self, tokenizer_vocab_path, regex, default_chem_token_start):
"""Load tokenizer from vocab file
Params:
tokenizer_vocab_path: str, path to tokenizer vocab
Returns:
MolEncTokenizer tokenizer object
"""
tokenizer_vocab_path = Path(tokenizer_vocab_path)
tokenizer = MolEncTokenizer.from_vocab_file(
tokenizer_vocab_path,
regex,
default_chem_token_start)
return tokenizer
def load_model(self, args, tokenizer, decoder_max_seq_len=None):
"""Load saved model checkpoint
Params:
tokenizer: MolEncTokenizer tokenizer object
decoder_max_seq_len: int, maximum sequence length
args: Megatron initialized arguments
Returns:
MegaMolBART trained model
"""
vocab_size = len(tokenizer)
pad_token_idx = tokenizer.vocab[tokenizer.pad_token]
# TODO how to handle length overrun for batch processing
if not decoder_max_seq_len:
decoder_max_seq_len = args.max_position_embeddings
sampler = DecodeSampler(tokenizer, decoder_max_seq_len)
model = MegatronBART(
sampler,
pad_token_idx,
vocab_size,
args.hidden_size,
args.num_layers,
args.num_attention_heads,
args.hidden_size * 4,
args.max_position_embeddings,
dropout=0.1,
)
self.iteration = load_checkpoint(model, None, None)
model = model.cuda()
model.eval()
return model
def smiles2embedding(self, smiles, pad_length=None):
"""Calculate embedding and padding mask for smiles with optional extra padding
Params
smiles: string, input SMILES molecule
pad_length: optional extra
Returns
embedding array and boolean mask
"""
assert isinstance(smiles, str)
if pad_length:
assert pad_length >= len(smiles) + 2
tokens = self.tokenizer.tokenize([smiles], pad=True)
# Append to tokens and mask if appropriate
if pad_length:
for i in range(len(tokens['original_tokens'])):
n_pad = pad_length - len(tokens['original_tokens'][i])
tokens['original_tokens'][i] += [self.tokenizer.pad_token] * n_pad
tokens['masked_pad_masks'][i] += [1] * n_pad
token_ids = torch.tensor(self.tokenizer.convert_tokens_to_ids(tokens['original_tokens'])).cuda().T
pad_mask = torch.tensor(tokens['masked_pad_masks']).bool().cuda().T
encode_input = {"encoder_input": token_ids, "encoder_pad_mask": pad_mask}
embedding = self.model.encode(encode_input)
torch.cuda.empty_cache()
return embedding, pad_mask
def inverse_transform(self, embeddings, mem_pad_mask, k=1, sanitize=True):
mem_pad_mask = mem_pad_mask.clone()
smiles_interp_list = []
batch_size = 1 # TODO: parallelize this loop as a batch
with torch.no_grad():
for memory in embeddings:
if isinstance(memory, list):
memory = torch.FloatTensor(memory).cuda()
decode_fn = partial(self.model._decode_fn,
mem_pad_mask=mem_pad_mask.type(torch.LongTensor).cuda(),
memory=memory)
mol_strs, _ = self.model.sampler.beam_decode(decode_fn,
batch_size=batch_size,
device='cuda',
k=k)
mol_strs = sum(mol_strs, []) # flatten list
# TODO: add back sanitization and validity checking once model is trained
logger.warn('WARNING: MOLECULE VALIDATION AND SANITIZATION CURRENTLY DISABLED')
for smiles in mol_strs:
if sanitize:
mol = Chem.MolFromSmiles(smiles, sanitize=sanitize)
if mol:
sanitized_smiles = Chem.MolToSmiles(mol)
smiles_interp_list.append(sanitized_smiles)
logger.debug(f'Sanitized SMILES {sanitized_smiles} added...')
break
smiles_interp_list.append(smiles)
return smiles_interp_list
def interpolate_molecules(self, smiles1, smiles2, num_interp, tokenizer, k=1):
"""Interpolate between two molecules in embedding space.
Params
smiles1: str, input SMILES molecule
smiles2: str, input SMILES molecule
num_interp: int, number of molecules to interpolate
tokenizer: MolEncTokenizer tokenizer object
k: number of molecules for beam search, default 1. Can increase if there are issues with validity
Returns
list of interpolated smiles molecules
"""
pad_length = max(len(smiles1), len(smiles2)) + 2 # add 2 for start / stop
embedding1, pad_mask1 = self.smiles2embedding(smiles1,
pad_length=pad_length)
embedding2, pad_mask2 = self.smiles2embedding(smiles2,
pad_length=pad_length)
scale = torch.linspace(0.0, 1.0, num_interp + 2)[
1:-1] # skip first and last because they're the selected molecules
scale = scale.unsqueeze(0).unsqueeze(-1).cuda()
interpolated_emb = torch.lerp(embedding1, embedding2, scale).cuda() # dims: batch, tokens, embedding
combined_mask = (pad_mask1 & pad_mask2).bool().cuda()
embeddings = []
dims = []
for emb in interpolated_emb.permute(1, 0, 2):
dims.append(emb.shape)
embeddings.append(emb)
generated_mols = self.inverse_transform(embeddings,
combined_mask,
k=k,
sanitize=True)
generated_mols = [smiles1] + generated_mols + [smiles2]
embeddings = [embedding1] + embeddings + [embedding2]
dims = [embedding1.shape] + dims + [embedding2.shape]
return generated_mols, embeddings, combined_mask, dims
def find_similars_smiles_list(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
distance = self._compute_radius(scaled_radius)
logger.info(f'Computing with distance {distance}...')
embedding, pad_mask = self.smiles2embedding(smiles)
neighboring_embeddings = self.addjitter(embedding, distance, cnt=num_requested)
generated_mols = self.inverse_transform(neighboring_embeddings,
pad_mask.bool().cuda(),
k=1, sanitize=True)
if force_unique:
generated_mols = list(set(generated_mols))
generated_mols = [smiles] + generated_mols
neighboring_embeddings = [embedding] + neighboring_embeddings
return generated_mols, neighboring_embeddings, pad_mask
def find_similars_smiles(self,
smiles: str,
num_requested: int = 10,
scaled_radius=None,
force_unique=False):
generated_mols, neighboring_embeddings, pad_mask = \
self.find_similars_smiles_list(smiles,
num_requested=num_requested,
scaled_radius=scaled_radius,
force_unique=force_unique)
# Rest of the applications and libraries use RAPIDS and cuPY libraries.
# For interoperability, we need to convert the embeddings to cupy.
embeddings = []
dims = []
for neighboring_embedding in neighboring_embeddings:
dims.append(neighboring_embedding.shape)
embeddings.append(neighboring_embedding.flatten().tolist())
generated_df = pd.DataFrame({'SMILES': generated_mols,
'embeddings': embeddings,
'embeddings_dim': dims,
'Generated': [True for i in range(len(generated_mols))]})
generated_df.iat[0, 3] = False
if force_unique:
inv_transform_funct = partial(self.inverse_transform,
mem_pad_mask=pad_mask)
generated_df = self.compute_unique_smiles(generated_df,
inv_transform_funct,
scaled_radius=scaled_radius)
return generated_df
def interpolate_smiles(self,
smiles: List,
num_points: int = 10,
scaled_radius=None,
force_unique=False):
num_points = int(num_points)
if len(smiles) < 2:
raise Exception('At-least two or more smiles are expected')
k = 1
result_df = []
for idx in range(len(smiles) - 1):
interpolated_mol, interpolated_embeddings, combined_mask, dims = \
self.interpolate_molecules(smiles[idx],
smiles[idx + 1],
num_points,
self.tokenizer,
k=k)
# Rest of the applications and libraries use RAPIDS and cuPY libraries.
# For interoperability, we need to convert the embeddings to cupy.
embeddings = []
for interpolated_embedding in interpolated_embeddings:
embeddings.append(interpolated_embedding.cpu())
interp_df = pd.DataFrame({'SMILES': interpolated_mol,
'embeddings': embeddings,
'embeddings_dim': dims,
'Generated': [True for i in range(len(interpolated_mol))]})
inv_transform_funct = partial(self.inverse_transform, mem_pad_mask=combined_mask)
# Mark the source and desinations as not generated
interp_df.iat[0, 3] = False
interp_df.iat[-1, 3] = False
if force_unique:
interp_df = self.compute_unique_smiles(interp_df,
inv_transform_funct,
scaled_radius=scaled_radius)
result_df.append(interp_df)
result_df = pd.concat(result_df)
smile_list = list(result_df['SMILES'])
return result_df, smile_list
| cheminformatics-master | megamolbart/megamolbart/inference.py |
#!/usr/bin/env python
# Generate set of projects mk files.
# Usage: python generate_mk.py PROJECTS_MK_DIR THRUST_SOURCE_DIR
# The program scans through unit tests and examples in THRUST_SOURCE_DIR
# and generates project mk for each of the tests and examples in PROJECTS_MK_DIR
# A single example or unit test source file generates its own executable
# This program is called by a top level Makefile, but can also be used stand-alone for debugging
# This program also generates testing.mk, examples.mk and dependencies.mk
from __future__ import print_function
import sys
import shutil as sh
import os
import glob
import re
test_template = """
TEST_SRC := %(TEST_SRC)s
TEST_NAME := %(TEST_NAME)s
include $(ROOTDIR)/thrust/internal/build/generic_test.mk
"""
example_template = """
EXAMPLE_SRC := %(EXAMPLE_SRC)s
EXAMPLE_NAME := %(EXAMPLE_NAME)s
include $(ROOTDIR)/thrust/internal/build/generic_example.mk
"""
def Glob(pattern, directory,exclude='\B'):
src = glob.glob(os.path.join(directory,pattern))
p = re.compile(exclude)
src = [s for s in src if not p.match(s)]
return src
def generate_test_mk(mk_path, test_path, group, TEST_DIR):
print('Generating makefiles in "'+mk_path+'" for tests in "'+test_path+'"')
src_cu = Glob("*.cu", test_path, ".*testframework.cu$")
src_cxx = Glob("*.cpp", test_path)
src_cu.sort();
src_cxx.sort();
src_all = src_cu + src_cxx;
tests_all = []
dependencies_all = []
for s in src_all:
fn = os.path.splitext(os.path.basename(s));
t = "thrust."+group+"."+fn[0]
e = fn[1]
mkfile = test_template % {"TEST_SRC" : s, "TEST_NAME" : t}
f = open(os.path.join(mk_path,t+".mk"), 'w')
f.write(mkfile)
f.close()
tests_all.append(os.path.join(mk_path,t))
dependencies_all.append(t+": testframework")
return [tests_all, dependencies_all]
def generate_example_mk(mk_path, example_path, group, EXAMPLE_DIR):
print('Generating makefiles in "'+mk_path+'" for examples in "'+example_path+'"')
src_cu = Glob("*.cu", example_path)
src_cxx = Glob("*.cpp", example_path)
src_cu.sort();
src_cxx.sort();
src_all = src_cu + src_cxx;
examples_all = []
for s in src_all:
fn = os.path.splitext(os.path.basename(s));
t = "thrust."+group+"."+fn[0]
e = fn[1]
mkfile = example_template % {"EXAMPLE_SRC" : s, "EXAMPLE_NAME" : t}
f = open(os.path.join(mk_path,t+".mk"), 'w')
f.write(mkfile)
f.close()
examples_all.append(os.path.join(mk_path,t))
return examples_all
## relpath : backported from os.relpath form python 2.6+
def relpath(path, start):
"""Return a relative version of a path"""
import posixpath
if not path:
raise ValueError("no path specified")
start_list = posixpath.abspath(start).split(posixpath.sep)
path_list = posixpath.abspath(path).split(posixpath.sep)
# Work out how much of the filepath is shared by start and path.
i = len(posixpath.commonprefix([start_list, path_list]))
rel_list = [posixpath.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return posixpath.curdir
return posixpath.join(*rel_list)
mk_path=sys.argv[1]
REL_DIR="../../"
if (len(sys.argv) > 2):
root_path=sys.argv[2];
mk_path = relpath(mk_path, root_path)
REL_DIR = relpath(root_path,mk_path)
try:
sh.rmtree(mk_path)
except:
pass
os.makedirs(mk_path)
tests_all, dependencies_all = generate_test_mk(mk_path, "testing/", "test", REL_DIR)
tests_cu, dependencies_cu = generate_test_mk(mk_path, "testing/cuda/", "test.cuda", REL_DIR)
tests_all.extend(tests_cu)
dependencies_all.extend(dependencies_cu)
testing_mk = ""
for t in tests_all:
testing_mk += "PROJECTS += "+t+"\n"
testing_mk += "PROJECTS += internal/build/testframework\n"
f = open(os.path.join(mk_path,"testing.mk"),'w')
f.write(testing_mk)
f.close()
dependencies_mk = ""
for d in dependencies_all:
dependencies_mk += d + "\n"
f = open(os.path.join(mk_path,"dependencies.mk"),'w')
f.write(dependencies_mk)
f.close()
examples_mk = ""
examples_all = generate_example_mk(mk_path, "examples/", "example", REL_DIR)
examples_cuda = generate_example_mk(mk_path, "examples/cuda/", "example.cuda", REL_DIR)
examples_all.extend(examples_cuda)
for e in examples_all:
examples_mk += "PROJECTS += "+e+"\n"
f = open(os.path.join(mk_path,"examples.mk"),'w')
f.write(examples_mk)
f.close()
| thrust-master | generate_mk.py |
#! /usr/bin/env python
# Copyright (c) 2022 NVIDIA Corporation
# Reply-To: Allison Vacanti <[email protected]>
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Released under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
help_text = """%(prog)s [reference.json compare.json | reference_dir/ compare_dir/]
This script:
1. Runs `top -bco RES`, continuously extracting the memory usage of each process.
2. If a process uses more than `log_threshold` GiB and exceeds any other recorded
entry for the process, it is stored in `entries`.
3. When this script receives SIGINT, it writes two files:
* `log_file` will contain all recorded max-memory-per-process entries
* `fail_file` will contain all entries that exceed `fail_threshold`
"""
import argparse
import os
import re
import signal
import sys
from subprocess import Popen, PIPE, STDOUT
parser = argparse.ArgumentParser(prog='memmon.py', usage=help_text)
parser.add_argument('--log-threshold', type=float, dest='log_threshold',
default=0.5,
help='Logging threshold in GiB.')
parser.add_argument('--fail-threshold', type=float, dest='fail_threshold',
default=2,
help='Failure threshold in GiB.')
parser.add_argument('--log-file', type=str, dest='log_file', default='memmon_log',
help='Output file for log entries.')
args, unused = parser.parse_known_args()
entries = {}
def signal_handler(sig, frame):
# Sort by mem:
sortentries = sorted(entries.items(), key=lambda x: x[1], reverse=True)
lf = open(args.log_file, "w")
for com, mem in sortentries:
status = "PASS"
if mem >= args.fail_threshold:
status = "FAIL"
line = "%4s | %3.1f GiB | %s\n" % (status, mem, com)
lf.write(line)
lf.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Find the toprc config file and configure top's env.
# This config:
# - Hides all columns except for RES and COMMAND
# - Sorts by RES
# - Enables long command strings (-c)
script_dir = os.path.dirname(os.path.realpath(__file__))
config_dir = os.path.join(script_dir, 'memmon_config')
proc = Popen(["top", "-b", "-w", "512"],
stdin=PIPE, stdout=PIPE, stderr=STDOUT,
env={"XDG_CONFIG_HOME": config_dir})
regex = re.compile("^\\s*([0-9.]+[kmgtp]?)\\s+(.+)\\s*$")
# Convert a memory string from top into floating point GiB
def parse_mem(mem_str):
if mem_str[-1] == "k":
return float(mem_str[:-1]) / (1024 * 1024)
elif mem_str[-1] == "m":
return float(mem_str[:-1]) / (1024)
elif mem_str[-1] == "g":
return float(mem_str[:-1])
elif mem_str[-1] == "t":
return float(mem_str[:-1]) * 1024
elif mem_str[-1] == "p": # please no
return float(mem_str[:-1]) * 1024 * 1024
# bytes:
return float(mem_str) / (1024 * 1024 * 1024)
for line in proc.stdout:
line = line.decode()
match = regex.match(line)
if match:
mem = parse_mem(match.group(1))
if mem < args.log_threshold and mem < args.fail_threshold:
continue
com = match.group(2)
if com in entries and entries[com] > mem:
continue
if mem >= args.fail_threshold:
# Print a notice immediately -- this helps identify the failures
# as they happen, since `com` may not provide enough info.
print("memmon.py failure: Build step exceed memory threshold:\n"
" - Threshold: %3.1f GiB\n"
" - Usage: %3.1f GiB\n"
" - Command: %s" % (args.fail_threshold, mem, com))
entries[com] = mem
| thrust-master | ci/common/memmon.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash <[email protected]>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
###############################################################################
###############################################################################
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# XXX Put code shared with `compare_benchmark_results.py` in a common place.
# XXX Relative uncertainty.
from sys import exit, stdout
from os.path import splitext
from itertools import imap # Lazy map.
from math import sqrt, log10, floor
from collections import deque
from argparse import ArgumentParser as argument_parser
from csv import DictReader as csv_dict_reader
from csv import DictWriter as csv_dict_writer
from re import compile as regex_compile
###############################################################################
def unpack_tuple(f):
"""Return a unary function that calls `f` with its argument unpacked."""
return lambda args: f(*iter(args))
def strip_dict(d):
"""Strip leading and trailing whitespace from all keys and values in `d`."""
d.update({key: value.strip() for (key, value) in d.items()})
def merge_dicts(d0, d1):
"""Create a new `dict` that is the union of `dict`s `d0` and `d1`."""
d = d0.copy()
d.update(d1)
return d
def strip_list(l):
"""Strip leading and trailing whitespace from all values in `l`."""
for i, value in enumerate(l): l[i] = value.strip()
###############################################################################
def int_or_float(x):
"""Convert `x` to either `int` or `float`, preferring `int`.
Raises:
ValueError : If `x` is not convertible to either `int` or `float`
"""
try:
return int(x)
except ValueError:
return float(x)
def try_int_or_float(x):
"""Try to convert `x` to either `int` or `float`, preferring `int`. `x` is
returned unmodified if conversion fails.
"""
try:
return int_or_float(x)
except ValueError:
return x
###############################################################################
def find_significant_digit(x):
"""Return the significant digit of the number x. The result is the number of
digits after the decimal place to round to (negative numbers indicate rounding
before the decimal place)."""
if x == 0: return 0
return -int(floor(log10(abs(x))))
def round_with_int_conversion(x, ndigits = None):
"""Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less
than 1, convert the result to `int`. If `ndigits` is `None`, the significant
digit of `x` is used."""
if ndigits is None: ndigits = find_significant_digit(x)
x_rounded = round(x, ndigits)
return int(x_rounded) if ndigits < 1 else x_rounded
###############################################################################
class measured_variable(object):
"""A meta-variable representing measured data. It is composed of three raw
variables plus units meta-data.
Attributes:
quantity (`str`) :
Name of the quantity variable of this object.
uncertainty (`str`) :
Name of the uncertainty variable of this object.
sample_size (`str`) :
Name of the sample size variable of this object.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
class measured_value(object):
"""An object that represents a value determined by multiple measurements.
Attributes:
quantity (scalar) :
The quantity of the value, e.g. the arithmetic mean.
uncertainty (scalar) :
The measurement uncertainty, e.g. the sample standard deviation.
sample_size (`int`) :
The number of observations contributing to the value.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size = 1, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
###############################################################################
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X)
def sample_variance(X, u = None):
"""Computes the sample variance of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
"""
if u is None: u = arithmetic_mean(X)
return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1)
def sample_standard_deviation(X, u = None, v = None):
"""Computes the sample standard deviation of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
s &= \sqrt{v}
&= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
v (number) : The sample variance of `X`.
"""
if u is None: u = arithmetic_mean(X)
if v is None: v = sample_variance(X, u)
return sqrt(v)
def combine_sample_size(As):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
.. math::
n = \sum{i = 0}^{g - 1} n_i
"""
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As))
def combine_arithmetic_mean(As, n = None):
"""Computes the combined arithmetic mean of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
.. math::
u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n}
"""
if n is None: n = combine_sample_size(As)
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n
def combine_sample_variance(As, n = None, u = None):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
.. math::
v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
return sum(imap(unpack_tuple(
lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1)
), As)) / (n - 1)
def combine_sample_standard_deviation(As, n = None, u = None, v = None):
"""Computes the combined sample standard deviation of a group of
`measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
s &= \sqrt{v}
&= \sqrt{\frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
v (number) : The combined sample variance of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
if v is None: v = combine_sample_variance(As, n, u)
return sqrt(v)
###############################################################################
def process_program_arguments():
ap = argument_parser(
description = (
"Aggregates the results of multiple runs of benchmark results stored in "
"CSV format."
)
)
ap.add_argument(
"-d", "--dependent-variable",
help = ("Treat the specified three variables as a dependent variable. The "
"1st variable is the measured quantity, the 2nd is the uncertainty "
"of the measurement and the 3rd is the sample size. The defaults "
"are the dependent variables of Thrust's benchmark suite. May be "
"specified multiple times."),
action = "append", type = str, dest = "dependent_variables",
metavar = "QUANTITY,UNCERTAINTY,SAMPLES"
)
ap.add_argument(
"-p", "--preserve-whitespace",
help = ("Don't trim leading and trailing whitespace from each CSV cell."),
action = "store_true", default = False
)
ap.add_argument(
"-o", "--output-file",
help = ("The file that results are written to. If `-`, results are "
"written to stdout."),
action = "store", type = str, default = "-",
metavar = "OUTPUT"
)
ap.add_argument(
"input_files",
help = ("Input CSV files. The first two rows should be a header. The 1st "
"header row specifies the name of each variable, and the 2nd "
"header row specifies the units for that variable."),
type = str, nargs = "+",
metavar = "INPUTS"
)
return ap.parse_args()
###############################################################################
def filter_comments(f, s = "#"):
"""Return an iterator to the file `f` which filters out all lines beginning
with `s`."""
return filter(lambda line: not line.startswith(s), f)
###############################################################################
class io_manager(object):
"""Manages I/O operations and represents the input data as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`. It can be used with `with`.
Attributes:
preserve_whitespace (`bool`) :
If `False`, leading and trailing whitespace is stripped from each CSV cell.
writer (`csv_dict_writer`) :
CSV writer object that the output is written to.
output_file (`file` or `stdout`) :
The output `file` object.
readers (`list` of `csv_dict_reader`s) :
List of input files as CSV reader objects.
input_files (list of `file`s) :
List of input `file` objects.
variable_names (`list` of `str`s) :
Names of the variables, in order.
variable_units (`list` of `str`s) :
Units of the variables, in order.
"""
def __init__(self, input_files, output_file, preserve_whitespace = True):
"""Read input files and open the output file and construct a new `io_manager`
object.
If `preserve_whitespace` is `False`, leading and trailing whitespace is
stripped from each CSV cell.
Raises
AssertionError :
If `len(input_files) <= 0` or `type(preserve_whitespace) != bool`.
"""
assert len(input_files) > 0, "No input files provided."
assert type(preserve_whitespace) == bool
self.preserve_whitespace = preserve_whitespace
self.readers = deque()
self.variable_names = None
self.variable_units = None
self.input_files = deque()
for input_file in input_files:
input_file_object = open(input_file)
reader = csv_dict_reader(filter_comments(input_file_object))
if not self.preserve_whitespace:
strip_list(reader.fieldnames)
if self.variable_names is None:
self.variable_names = reader.fieldnames
else:
# Make sure all inputs have the same schema.
assert self.variable_names == reader.fieldnames, \
"Input file (`" + input_file + "`) variable schema `" + \
str(reader.fieldnames) + "` does not match the variable schema `" + \
str(self.variable_names) + "`."
# Consume the next row, which should be the second line of the header.
variable_units = reader.next()
if not self.preserve_whitespace:
strip_dict(variable_units)
if self.variable_units is None:
self.variable_units = variable_units
else:
# Make sure all inputs have the same units schema.
assert self.variable_units == variable_units, \
"Input file (`" + input_file + "`) units schema `" + \
str(variable_units) + "` does not match the units schema `" + \
str(self.variable_units) + "`."
self.readers.append(reader)
self.input_files.append(input_file_object)
if output_file == "-": # Output to stdout.
self.output_file = stdout
else: # Output to user-specified file.
self.output_file = open(output_file, "w")
self.writer = csv_dict_writer(
self.output_file, fieldnames = self.variable_names
)
def __enter__(self):
"""Called upon entering a `with` statement."""
return self
def __exit__(self, *args):
"""Called upon exiting a `with` statement."""
if self.output_file is stdout:
self.output_file = None
elif self.output_file is not None:
self.output_file.__exit__(*args)
for input_file in self.input_files:
input_file.__exit__(*args)
#############################################################################
# Input Stream.
def __iter__(self):
"""Return an iterator to the input sequence.
This is a requirement for the `Iterable` protocol.
"""
return self
def next(self):
"""Consume and return the next record (a `dict` representing a CSV row) in
the input.
This is a requirement for the `Iterator` protocol.
Raises:
StopIteration : If there is no more input.
"""
if len(self.readers) == 0:
raise StopIteration()
try:
row = self.readers[0].next()
if not self.preserve_whitespace: strip_dict(row)
return row
except StopIteration:
# The current reader is empty, so pop it, pop it's input file, close the
# input file, and then call ourselves again.
self.readers.popleft()
self.input_files.popleft().close()
return self.next()
#############################################################################
# Output.
def write_header(self):
"""Write the header for the output CSV file."""
# Write the first line of the header.
self.writer.writeheader()
# Write the second line of the header.
self.writer.writerow(self.variable_units)
def write(self, d):
"""Write a record (a `dict`) to the output CSV file."""
self.writer.writerow(d)
###############################################################################
class dependent_variable_parser(object):
"""Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument."""
#############################################################################
# Grammar
# Parse a variable_name.
variable_name_rule = r'[^,]+'
# Parse a variable classification.
dependent_variable_rule = r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')'
engine = regex_compile(dependent_variable_rule)
#############################################################################
def __call__(self, s):
"""Parses the string `s` with the form "AVG,STDEV,TRIALS".
Returns:
A `measured_variable`.
Raises:
AssertionError : If parsing fails.
"""
match = self.engine.match(s)
assert match is not None, \
"Dependent variable (-d) `" +s+ "` is invalid, the format is " + \
"`AVG,STDEV,TRIALS`."
return measured_variable(match.group(1), match.group(2), match.group(3))
###############################################################################
class record_aggregator(object):
"""Consumes and combines records and represents the result as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`.
Attributes:
dependent_variables (`list` of `measured_variable`s) :
A list of dependent variables provided on the command line.
dataset (`dict`) :
A mapping of distinguishing (e.g. control + independent) values (`tuple`s
of variable-quantity pairs) to `list`s of dependent values (`dict`s from
variables to lists of cells).
in_order_dataset_keys :
A list of unique dataset keys (e.g. distinguishing variables) in order of
appearance.
"""
parse_dependent_variable = dependent_variable_parser()
def __init__(self, raw_dependent_variables):
"""Parse dependent variables and construct a new `record_aggregator` object.
Raises:
AssertionError : If parsing of dependent variables fails.
"""
self.dependent_variables = []
if raw_dependent_variables is not None:
for variable in raw_dependent_variables:
self.dependent_variables.append(self.parse_dependent_variable(variable))
self.dataset = {}
self.in_order_dataset_keys = deque()
#############################################################################
# Insertion.
def append(self, record):
"""Add `record` to the dataset.
Raises:
ValueError : If any `str`-to-numeric conversions fail.
"""
# The distinguishing variables are the control and independent variables.
# They form the key for each record in the dataset. Records with the same
# distinguishing variables are treated as observations of the same data
# point.
dependent_values = {}
# To allow the same sample size variable to be used for multiple dependent
# variables, we don't pop sample size variables until we're done processing
# all variables.
sample_size_variables = []
# Separate the dependent values from the distinguishing variables and
# perform `str`-to-numeric conversions.
for variable in self.dependent_variables:
quantity, uncertainty, sample_size, units = variable.as_tuple()
dependent_values[quantity] = [int_or_float(record.pop(quantity))]
dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))]
dependent_values[sample_size] = [int(record[sample_size])]
sample_size_variables.append(sample_size)
# Pop sample size variables.
for sample_size_variable in sample_size_variables:
# Allowed to fail, as we may have duplicates.
record.pop(sample_size_variable, None)
# `dict`s aren't hashable, so create a tuple of key-value pairs.
distinguishing_values = tuple(record.items())
if distinguishing_values in self.dataset:
# These distinguishing values already exist, so get the `dict` they're
# mapped to, look up each key in `dependent_values` in the `dict`, and
# add the corresponding quantity in `dependent_values` to the list in the
# the `dict`.
for variable, columns in dependent_values.iteritems():
self.dataset[distinguishing_values][variable] += columns
else:
# These distinguishing values aren't in the dataset, so add them and
# record them in `in_order_dataset_keys`.
self.dataset[distinguishing_values] = dependent_values
self.in_order_dataset_keys.append(distinguishing_values)
#############################################################################
# Postprocessing.
def combine_dependent_values(self, dependent_values):
"""Takes a mapping of dependent variables to lists of cells and returns
a new mapping with the cells combined.
Raises:
AssertionError : If class invariants were violated.
"""
combined_dependent_values = dependent_values.copy()
for variable in self.dependent_variables:
quantity, uncertainty, sample_size, units = variable.as_tuple()
quantities = dependent_values[quantity]
uncertainties = dependent_values[uncertainty]
sample_sizes = dependent_values[sample_size]
if type(sample_size) is list:
# Sample size hasn't been combined yet.
assert len(quantities) == len(uncertainties) \
and len(uncertainties) == len(sample_sizes), \
"Length of quantities list `(" + str(len(quantities)) + ")`, " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
"),` and length of sample sizes list `(" + str(len(sample_sizes)) + \
")` are not the same."
else:
# Another dependent variable that uses our sample size has combined it
# already.
assert len(quantities) == len(uncertainties), \
"Length of quantities list `(" + str(len(quantities)) + ")` and " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
")` are not the same."
# Convert the three separate `list`s into one list of `measured_value`s.
measured_values = []
for i in range(len(quantities)):
mv = measured_value(
quantities[i], uncertainties[i], sample_sizes[i], units
)
measured_values.append(mv)
# Combine the `measured_value`s.
combined_sample_size = combine_sample_size(
measured_values
)
combined_arithmetic_mean = combine_arithmetic_mean(
measured_values, combined_sample_size
)
combined_sample_standard_deviation = combine_sample_standard_deviation(
measured_values, combined_sample_size, combined_arithmetic_mean
)
# Round the quantity and uncertainty to the significant digit of
# uncertainty and insert the combined values into the results.
sigdig = find_significant_digit(combined_sample_standard_deviation)
# combined_arithmetic_mean = round_with_int_conversion(
# combined_arithmetic_mean, sigdig
# )
# combined_sample_standard_deviation = round_with_int_conversion(
# combined_sample_standard_deviation, sigdig
# )
combined_dependent_values[quantity] = combined_arithmetic_mean
combined_dependent_values[uncertainty] = combined_sample_standard_deviation
combined_dependent_values[sample_size] = combined_sample_size
return combined_dependent_values
#############################################################################
# Output Stream.
def __iter__(self):
"""Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s).
This is a requirement for the `Iterable` protocol.
"""
return self
def records(self):
"""Return an iterator to the output sequence of CSV rows (`dict`s of
variables to values).
"""
return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self)
def next(self):
"""Produce the components of the next output record - a tuple of two
`dict`s. The first `dict` is a mapping of distinguishing variables to
distinguishing values, the second `dict` is a mapping of dependent
variables to combined dependent values. Combining the two dicts forms a
CSV row suitable for output.
This is a requirement for the `Iterator` protocol.
Raises:
StopIteration : If there is no more output.
AssertionError : If class invariants were violated.
"""
assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \
"Number of dataset keys (`" + str(len(self.dataset.keys())) + \
"`) is not equal to the number of keys in the ordering list (`" + \
str(len(self.in_order_dataset_keys)) + "`)."
if len(self.in_order_dataset_keys) == 0:
raise StopIteration()
# Get the next set of distinguishing values and convert them to a `dict`.
raw_distinguishing_values = self.in_order_dataset_keys.popleft()
distinguishing_values = dict(raw_distinguishing_values)
dependent_values = self.dataset.pop(raw_distinguishing_values)
combined_dependent_values = self.combine_dependent_values(dependent_values)
return (distinguishing_values, combined_dependent_values)
###############################################################################
args = process_program_arguments()
if args.dependent_variables is None:
args.dependent_variables = [
"STL Average Walltime,STL Walltime Uncertainty,STL Trials",
"STL Average Throughput,STL Throughput Uncertainty,STL Trials",
"Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials",
"Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"
]
# Read input files and open the output file.
with io_manager(args.input_files,
args.output_file,
args.preserve_whitespace) as iom:
# Parse dependent variable options.
ra = record_aggregator(args.dependent_variables)
# Add all input data to the `record_aggregator`.
for record in iom:
ra.append(record)
iom.write_header()
# Write combined results out.
for record in ra.records():
iom.write(record)
| thrust-master | internal/benchmark/combine_benchmark_results.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2012-7 Bryce Adelstein Lelbach aka wash <[email protected]>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
###############################################################################
###############################################################################
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# XXX Put code shared with `combine_benchmark_results.py` in a common place.
# XXX Relative uncertainty.
# XXX Create uncertain value class which is quantity + uncertainty.
from sys import exit, stdout
from os.path import splitext
from itertools import imap # Lazy map.
from math import sqrt, log10, floor
from collections import deque
from argparse import ArgumentParser as argument_parser
from argparse import Action as argument_action
from csv import DictReader as csv_dict_reader
from csv import DictWriter as csv_dict_writer
from re import compile as regex_compile
###############################################################################
def unpack_tuple(f):
"""Return a unary function that calls `f` with its argument unpacked."""
return lambda args: f(*iter(args))
def strip_dict(d):
"""Strip leading and trailing whitespace from all keys and values in `d`.
Returns:
The modified dict `d`.
"""
d.update({key: value.strip() for (key, value) in d.items()})
return d
def merge_dicts(d0, d1):
"""Create a new `dict` that is the union of `dict`s `d0` and `d1`."""
d = d0.copy()
d.update(d1)
return d
def change_key_in_dict(d, old_key, new_key):
"""Change the key of the entry in `d` with key `old_key` to `new_key`. If
there is an existing entry
Returns:
The modified dict `d`.
Raises:
KeyError : If `old_key` is not in `d`.
"""
d[new_key] = d.pop(old_key)
return d
def key_from_dict(d):
"""Create a hashable key from a `dict` by converting the `dict` to a tuple."""
return tuple(sorted(d.items()))
def strip_list(l):
"""Strip leading and trailing whitespace from all values in `l`."""
for i, value in enumerate(l): l[i] = value.strip()
return l
def remove_from_list(l, item):
"""Remove the first occurence of `item` from list `l` and return a tuple of
the index that was removed and the element that was removed.
Raises:
ValueError : If `item` is not in `l`.
"""
idx = l.index(item)
item = l.pop(idx)
return (idx, item)
###############################################################################
def int_or_float(x):
"""Convert `x` to either `int` or `float`, preferring `int`.
Raises:
ValueError : If `x` is not convertible to either `int` or `float`
"""
try:
return int(x)
except ValueError:
return float(x)
def try_int_or_float(x):
"""Try to convert `x` to either `int` or `float`, preferring `int`. `x` is
returned unmodified if conversion fails.
"""
try:
return int_or_float(x)
except ValueError:
return x
###############################################################################
def ranges_overlap(x1, x2, y1, y2):
"""Returns true if the ranges `[x1, x2]` and `[y1, y2]` overlap,
where `x1 <= x2` and `y1 <= y2`.
Raises:
AssertionError : If `x1 > x2` or `y1 > y2`.
"""
assert x1 <= x2
assert y1 <= y2
return x1 <= y2 and y1 <= x2
def ranges_overlap_uncertainty(x, x_unc, y, y_unc):
"""Returns true if the ranges `[x - x_unc, x + x_unc]` and
`[y - y_unc, y + y_unc]` overlap, where `x_unc >= 0` and `y_unc >= 0`.
Raises:
AssertionError : If `x_unc < 0` or `y_unc < 0`.
"""
assert x_unc >= 0
assert y_unc >= 0
return ranges_overlap(x - x_unc, x + x_unc, y - y_unc, y + y_unc)
###############################################################################
# Formulas for propagation of uncertainty from:
#
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulas
#
# Even though it's Wikipedia, I trust it as I helped write that table.
#
# XXX Replace with a proper reference.
def uncertainty_multiplicative(f, A, A_abs_unc, B, B_abs_unc):
"""Compute the propagated uncertainty from the multiplication of two
uncertain values, `A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = AB` or
`f = A/B`, where `A != 0` and `B != 0`, the uncertainty in `f` is
approximately:
.. math::
\sigma_f = |f| \sqrt{\frac{\sigma_A}{A} ^ 2 + \frac{\sigma_B}{B} ^ 2}
Raises:
ZeroDivisionError : If `A == 0` or `B == 0`.
"""
return abs(f) * sqrt((A_abs_unc / A) ** 2 + (B_abs_unc / B) ** 2);
def uncertainty_additive(c, A_abs_unc, d, B_abs_unc):
"""Compute the propagated uncertainty from addition of two uncertain values,
`A +/- A_abs_unc` and `B +/- B_abs_unc`. Given `f = cA + dB`, where `c` and
`d` are certain constants, the uncertainty in `f` is approximately:
.. math::
f_{\sigma} = \sqrt{c ^ 2 * A_{\sigma} ^ 2 + d ^ 2 * B_{\sigma} ^ 2}
"""
return sqrt(((c ** 2) * (A_abs_unc ** 2)) + ((d ** 2) * (B_abs_unc ** 2)))
###############################################################################
# XXX Create change class.
def absolute_change(old, new):
"""Computes the absolute change from old to new:
.. math::
absolute_change = new - old
"""
return new - old
def absolute_change_uncertainty(old, old_unc, new, new_unc):
"""Computes the uncertainty in the absolute change from old to new and returns
a tuple of the absolute change and the absolute change uncertainty.
"""
absolute_change = new - old
absolute_change_unc = uncertainty_additive(1.0, new_unc, -1.0, old_unc)
return (absolute_change, absolute_change_unc)
def percent_change(old, new):
"""Computes the percent change from old to new:
.. math::
percent_change = 100 \frac{new - old}{abs(old)}
"""
return float(new - old) / abs(old)
def percent_change_uncertainty(old, old_unc, new, new_unc):
"""Computes the uncertainty in the percent change from old to new and returns
a tuple of the absolute change, the absolute change uncertainty, the percent
change and the percent change uncertainty.
"""
# Let's break this down into a few sub-operations:
#
# absolute_change = new - old <- Additive propagation.
# relative_change = change / abs(old) <- Multiplicative propagation.
# percent_change = 100 * y <- Multiplicative propagation.
if old == 0:
# We can't compute relative change because the old value is 0.
return (float("nan"), float("nan"), float("nan"), float("nan"))
(absolute_change, absolute_change_unc) = absolute_change_uncertainty(
old, old_unc, new, new_unc
)
if absolute_change == 0:
# We can't compute relative change uncertainty because the relative
# uncertainty of a value of 0 is undefined.
return (absolute_change, absolute_change_unc, float("nan"), float("nan"))
relative_change = float(absolute_change) / abs(old)
relative_change_unc = uncertainty_multiplicative(
relative_change, absolute_change, absolute_change_unc, old, old_unc
)
percent_change = 100.0 * relative_change
percent_change_unc = uncertainty_multiplicative(
percent_change, 100.0, 0.0, relative_change, relative_change_unc
)
return (
absolute_change, absolute_change_unc, percent_change, percent_change_unc
)
###############################################################################
def find_significant_digit(x):
"""Return the significant digit of the number x. The result is the number of
digits after the decimal place to round to (negative numbers indicate rounding
before the decimal place)."""
if x == 0: return 0
return -int(floor(log10(abs(x))))
def round_with_int_conversion(x, ndigits = None):
"""Rounds `x` to `ndigits` after the the decimal place. If `ndigits` is less
than 1, convert the result to `int`. If `ndigits` is `None`, the significant
digit of `x` is used."""
if ndigits is None: ndigits = find_significant_digit(x)
x_rounded = round(x, ndigits)
return int(x_rounded) if ndigits < 1 else x_rounded
###############################################################################
class measured_variable(object):
"""A meta-variable representing measured data. It is composed of three raw
variables plus units meta-data.
Attributes:
quantity (`str`) :
Name of the quantity variable of this object.
uncertainty (`str`) :
Name of the uncertainty variable of this object.
sample_size (`str`) :
Name of the sample size variable of this object.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
class measured_value(object):
"""An object that represents a value determined by multiple measurements.
Attributes:
quantity (scalar) :
The quantity of the value, e.g. the arithmetic mean.
uncertainty (scalar) :
The measurement uncertainty, e.g. the sample standard deviation.
sample_size (`int`) :
The number of observations contributing to the value.
units (units class or `None`) :
The units the value is measured in.
"""
def __init__(self, quantity, uncertainty, sample_size = 1, units = None):
self.quantity = quantity
self.uncertainty = uncertainty
self.sample_size = sample_size
self.units = units
def as_tuple(self):
return (self.quantity, self.uncertainty, self.sample_size, self.units)
def __iter__(self):
return iter(self.as_tuple())
def __str__(self):
return str(self.as_tuple())
def __repr__(self):
return str(self)
###############################################################################
def arithmetic_mean(X):
"""Computes the arithmetic mean of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
.. math::
u = \frac{\sum_{i = 0}^{n - 1} X_i}{n}
"""
return sum(X) / len(X)
def sample_variance(X, u = None):
"""Computes the sample variance of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
v = \frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
"""
if u is None: u = arithmetic_mean(X)
return sum(imap(lambda X_i: (X_i - u) ** 2, X)) / (len(X) - 1)
def sample_standard_deviation(X, u = None, v = None):
"""Computes the sample standard deviation of the sequence `X`.
Let:
* `n = len(X)`.
* `u` denote the arithmetic mean of `X`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
s &= \sqrt{v}
&= \sqrt{\frac{\sum_{i = 0}^{n - 1} (X_i - u)^2}{n - 1}}
Args:
X (`Iterable`) : The sequence of values.
u (number) : The arithmetic mean of `X`.
v (number) : The sample variance of `X`.
"""
if u is None: u = arithmetic_mean(X)
if v is None: v = sample_variance(X, u)
return sqrt(v)
def combine_sample_size(As):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
.. math::
n = \sum{i = 0}^{g - 1} n_i
"""
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i), As))
def combine_arithmetic_mean(As, n = None):
"""Computes the combined arithmetic mean of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
.. math::
u = \frac{\sum{i = 0}^{g - 1} n_i u_i}{n}
"""
if n is None: n = combine_sample_size(As)
return sum(imap(unpack_tuple(lambda u_i, s_i, n_i, t_i: n_i * u_i), As)) / n
def combine_sample_variance(As, n = None, u = None):
"""Computes the combined sample variance of a group of `measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
.. math::
v = \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
return sum(imap(unpack_tuple(
lambda u_i, s_i, n_i, t_i: n_i * (u_i - u) ** 2 + (s_i ** 2) * (n_i - 1)
), As)) / (n - 1)
def combine_sample_standard_deviation(As, n = None, u = None, v = None):
"""Computes the combined sample standard deviation of a group of
`measured_value`s.
Let:
* `g = len(As)`.
* `u_i = As[i].quantity`.
* `s_i = As[i].uncertainty`.
* `n_i = As[i].samples`.
* `n` denote the combined sample size of `As`.
* `u` denote the arithmetic mean of the quantities of `As`.
* `v` denote the sample variance of `X`.
* `s` denote the sample standard deviation of `X`.
.. math::
v &= \frac{(\sum_{i = 0}^{g - 1} n_i (u_i - u)^2 + s_i^2 (n_i - 1))}{n - 1}
s &= \sqrt{v}
Args:
As (`Iterable` of `measured_value`s) : The sequence of values.
n (number) : The combined sample sizes of `As`.
u (number) : The combined arithmetic mean of `As`.
v (number) : The combined sample variance of `As`.
"""
if n <= 1: return 0
if n is None: n = combine_sample_size(As)
if u is None: u = combine_arithmetic_mean(As, n)
if v is None: v = combine_sample_variance(As, n, u)
return sqrt(v)
###############################################################################
def store_const_multiple(const, *destinations):
"""Returns an `argument_action` class that sets multiple argument
destinations (`destinations`) to `const`."""
class store_const_multiple_action(argument_action):
def __init__(self, *args, **kwargs):
super(store_const_multiple_action, self).__init__(
metavar = None, nargs = 0, const = const, *args, **kwargs
)
def __call__(self, parser, namespace, values, option_string = None):
for destination in destinations:
setattr(namespace, destination, const)
return store_const_multiple_action
def store_true_multiple(*destinations):
"""Returns an `argument_action` class that sets multiple argument
destinations (`destinations`) to `True`."""
return store_const_multiple(True, *destinations)
def store_false_multiple(*destinations):
"""Returns an `argument_action` class that sets multiple argument
destinations (`destinations`) to `False`."""
return store_const_multiple(False, *destinations)
###############################################################################
def process_program_arguments():
ap = argument_parser(
description = (
"Compares two sets of combined performance results and identifies "
"statistically significant changes."
)
)
ap.add_argument(
"baseline_input_file",
help = ("CSV file containing the baseline performance results. The first "
"two rows should be a header. The 1st header row specifies the "
"name of each variable, and the 2nd header row specifies the units "
"for that variable. The baseline results may be a superset of the "
"observed performance results, but the reverse is not true. The "
"baseline results must contain data for every datapoint in the "
"observed performance results."),
type = str
)
ap.add_argument(
"observed_input_file",
help = ("CSV file containing the observed performance results. The first "
"two rows should be a header. The 1st header row specifies the name "
"of header row specifies the units for that variable."),
type = str
)
ap.add_argument(
"-o", "--output-file",
help = ("The file that results are written to. If `-`, results are "
"written to stdout."),
action = "store", type = str, default = "-",
metavar = "OUTPUT"
)
ap.add_argument(
"-c", "--control-variable",
help = ("Treat the specified variable as a control variable. This means "
"it will be filtered out when forming dataset keys. For example, "
"this could be used to ignore a timestamp variable that is "
"different in the baseline and observed results. May be specified "
"multiple times."),
action = "append", type = str, dest = "control_variables", default = [],
metavar = "QUANTITY"
)
ap.add_argument(
"-d", "--dependent-variable",
help = ("Treat the specified three variables as a dependent variable. The "
"1st variable is the measured quantity, the 2nd is the uncertainty "
"of the measurement and the 3rd is the sample size. The defaults "
"are the dependent variables of Thrust's benchmark suite. May be "
"specified multiple times."),
action = "append", type = str, dest = "dependent_variables", default = [],
metavar = "QUANTITY,UNCERTAINTY,SAMPLES"
)
ap.add_argument(
"-t", "--change-threshold",
help = ("Treat relative changes less than this amount (a percentage) as "
"statistically insignificant. The default is 5%%."),
action = "store", type = float, default = 5,
metavar = "PERCENTAGE"
)
ap.add_argument(
"-p", "--preserve-whitespace",
help = ("Don't trim leading and trailing whitespace from each CSV cell."),
action = "store_true", default = False
)
ap.add_argument(
"--output-all-variables",
help = ("Don't omit original absolute values in output."),
action = "store_true", default = False
)
ap.add_argument(
"--output-all-datapoints",
help = ("Don't omit datapoints that are statistically indistinguishable "
"in output."),
action = "store_true", default = False
)
ap.add_argument(
"-a", "--output-all",
help = ("Equivalent to `--output-all-variables --output-all-datapoints`."),
action = store_true_multiple("output_all_variables", "output_all_datapoints")
)
return ap.parse_args()
###############################################################################
def filter_comments(f, s = "#"):
"""Return an iterator to the file `f` which filters out all lines beginning
with `s`."""
return filter(lambda line: not line.startswith(s), f)
###############################################################################
class io_manager(object):
"""Manages I/O operations and represents the input data as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`. It can be used with `with`.
Attributes:
preserve_whitespace (`bool`) :
If `False`, leading and trailing whitespace is stripped from each CSV cell.
writer (`csv_dict_writer`) :
CSV writer object that the output is written to.
output_file (`file` or `stdout`) :
The output `file` object.
baseline_reader (`csv_dict_reader`) :
CSV reader object for the baseline results.
observed_reader (`csv_dict_reader`) :
CSV reader object for the observed results.
baseline_input_file (`file`) :
`file` object for the baseline results.
observed_input_file (`file`) :
`file` object for the observed results..
variable_names (`list` of `str`s) :
Names of the variables, in order.
variable_units (`list` of `str`s) :
Units of the variables, in order.
"""
def __init__(self,
baseline_input_file, observed_input_file,
output_file,
preserve_whitespace = False):
"""Read input files and open the output file and construct a new `io_manager`
object.
If `preserve_whitespace` is `False`, leading and trailing whitespace is
stripped from each CSV cell.
Raises
AssertionError :
If `type(preserve_whitespace) != bool`.
"""
assert type(preserve_whitespace) == bool
self.preserve_whitespace = preserve_whitespace
# Open baseline results.
self.baseline_input_file = open(baseline_input_file)
self.baseline_reader = csv_dict_reader(
filter_comments(self.baseline_input_file)
)
if not self.preserve_whitespace:
strip_list(self.baseline_reader.fieldnames)
self.variable_names = list(self.baseline_reader.fieldnames) # Copy.
self.variable_units = self.baseline_reader.next()
if not self.preserve_whitespace:
strip_dict(self.variable_units)
# Open observed results.
self.observed_input_file = open(observed_input_file)
self.observed_reader = csv_dict_reader(
filter_comments(self.observed_input_file)
)
if not self.preserve_whitespace:
strip_list(self.observed_reader.fieldnames)
# Make sure all inputs have the same variables schema.
assert self.variable_names == self.observed_reader.fieldnames, \
"Observed results input file (`" + observed_input_file + "`) " + \
"variable schema `" + str(self.observed_reader.fieldnames) + "` does " + \
"not match the baseline results input file (`" + baseline_input_file + \
"`) variable schema `" + str(self.variable_names) + "`."
# Consume the next row, which should be the second line of the header.
observed_variable_units = self.observed_reader.next()
if not self.preserve_whitespace:
strip_dict(observed_variable_units)
# Make sure all inputs have the same units schema.
assert self.variable_units == observed_variable_units, \
"Observed results input file (`" + observed_input_file + "`) " + \
"units schema `" + str(observed_variable_units) + "` does not " + \
"match the baseline results input file (`" + baseline_input_file + \
"`) units schema `" + str(self.variable_units) + "`."
if output_file == "-": # Output to stdout.
self.output_file = stdout
else: # Output to user-specified file.
self.output_file = open(output_file, "w")
self.writer = csv_dict_writer(
self.output_file, fieldnames = self.variable_names
)
def __enter__(self):
"""Called upon entering a `with` statement."""
return self
def __exit__(self, *args):
"""Called upon exiting a `with` statement."""
if self.output_file is stdout:
self.output_file = None
elif self.output_file is not None:
self.output_file.__exit__(*args)
self.baseline_input_file.__exit__(*args)
self.observed_input_file.__exit__(*args)
def append_variable(self, name, units):
"""Add a new variable to the output schema."""
self.variable_names.append(name)
self.variable_units.update({name : units})
# Update CSV writer field names.
self.writer.fieldnames = self.variable_names
def insert_variable(self, idx, name, units):
"""Insert a new variable into the output schema at index `idx`."""
self.variable_names.insert(idx, name)
self.variable_units.update({name : units})
# Update CSV writer field names.
self.writer.fieldnames = self.variable_names
def remove_variable(self, name):
"""Remove variable from the output schema and return a tuple of the variable
index and the variable units.
Raises:
ValueError : If `name` is not in the output schema.
"""
# Remove the variable and get its index, which we'll need to remove the
# corresponding units entry.
(idx, item) = remove_from_list(self.variable_names, name)
# Remove the units entry.
units = self.variable_units.pop(item)
# Update CSV writer field names.
self.writer.fieldnames = self.variable_names
return (idx, units)
#############################################################################
# Input Stream.
def baseline(self):
"""Return an iterator to the baseline results input sequence."""
return imap(lambda row: strip_dict(row), self.baseline_reader)
def observed(self):
"""Return an iterator to the observed results input sequence."""
return imap(lambda row: strip_dict(row), self.observed_reader)
#############################################################################
# Output.
def write_header(self):
"""Write the header for the output CSV file."""
# Write the first line of the header.
self.writer.writeheader()
# Write the second line of the header.
self.writer.writerow(self.variable_units)
def write(self, d):
"""Write a record (a `dict`) to the output CSV file."""
self.writer.writerow(d)
###############################################################################
class dependent_variable_parser(object):
"""Parses a `--dependent-variable=AVG,STDEV,TRIALS` command line argument."""
#############################################################################
# Grammar
# Parse a variable_name.
variable_name_rule = r'[^,]+'
# Parse a variable classification.
dependent_variable_rule = r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')' \
+ r',' \
+ r'(' + variable_name_rule + r')'
engine = regex_compile(dependent_variable_rule)
#############################################################################
def __call__(self, s):
"""Parses the string `s` with the form "AVG,STDEV,TRIALS".
Returns:
A `measured_variable`.
Raises:
AssertionError : If parsing fails.
"""
match = self.engine.match(s)
assert match is not None, \
"Dependent variable (-d) `" +s+ "` is invalid, the format is " + \
"`AVG,STDEV,TRIALS`."
return measured_variable(match.group(1), match.group(2), match.group(3))
###############################################################################
class record_aggregator(object):
"""Consumes and combines records and represents the result as an `Iterable`
sequence of `dict`s.
It is `Iterable` and an `Iterator`.
Attributes:
dependent_variables (`list` of `measured_variable`s) :
A list of dependent variables provided on the command line.
control_variables (`list` of `str`s) :
A list of control variables provided on the command line.
dataset (`dict`) :
A mapping of distinguishing (e.g. control + independent) values (`tuple`s
of variable-quantity pairs) to `list`s of dependent values (`dict`s from
variables to lists of cells).
in_order_dataset_keys :
A list of unique dataset keys (e.g. distinguishing variables) in order of
appearance.
"""
def __init__(self, dependent_variables, control_variables):
"""Construct a new `record_aggregator` object.
Raises:
AssertionError : If parsing of dependent variables fails.
"""
self.dependent_variables = dependent_variables
self.control_variables = control_variables
self.dataset = {}
self.in_order_dataset_keys = deque()
#############################################################################
# Insertion.
def key_from_dict(self, d):
"""Create a hashable key from a `dict` by filtering out control variables
and then converting the `dict` to a tuple.
Raises:
AssertionError : If any control variable was not found in `d`.
"""
distinguishing_values = d.copy()
# Filter out control variables.
for var in self.control_variables:
distinguishing_values.pop(var, None)
return key_from_dict(distinguishing_values)
def append(self, record):
"""Add `record` to the dataset.
Raises:
ValueError : If any `str`-to-numeric conversions fail.
"""
# The distinguishing variables are the control and independent variables.
# They form the key for each record in the dataset. Records with the same
# distinguishing variables are treated as observations of the same
# datapoint.
dependent_values = {}
# To allow the same sample size variable to be used for multiple dependent
# variables, we don't pop sample size variables until we're done processing
# all variables.
sample_size_variables = []
# Separate the dependent values from the distinguishing variables and
# perform `str`-to-numeric conversions.
for var in self.dependent_variables:
quantity, uncertainty, sample_size, units = var.as_tuple()
dependent_values[quantity] = [int_or_float(record.pop(quantity))]
dependent_values[uncertainty] = [int_or_float(record.pop(uncertainty))]
dependent_values[sample_size] = [int(record[sample_size])]
sample_size_variables.append(sample_size)
# Pop sample size variables.
for var in sample_size_variables:
# Allowed to fail, as we may have duplicates.
record.pop(var, None)
distinguishing_values = self.key_from_dict(record)
if distinguishing_values in self.dataset:
# These distinguishing values already exist, so get the `dict` they're
# mapped to, look up each key in `dependent_values` in the `dict`, and
# add the corresponding quantity in `dependent_values` to the list in the
# the `dict`.
for var, columns in dependent_values.iteritems():
self.dataset[distinguishing_values][var] += columns
else:
# These distinguishing values aren't in the dataset, so add them and
# record them in `in_order_dataset_keys`.
self.dataset[distinguishing_values] = dependent_values
self.in_order_dataset_keys.append(distinguishing_values)
#############################################################################
# Postprocessing.
def combine_dependent_values(self, dependent_values):
"""Takes a mapping of dependent variables to lists of cells and returns
a new mapping with the cells combined.
Raises:
AssertionError : If class invariants were violated.
"""
combined_dependent_values = dependent_values.copy()
for var in self.dependent_variables:
quantity, uncertainty, sample_size, units = var.as_tuple()
quantities = dependent_values[quantity]
uncertainties = dependent_values[uncertainty]
sample_sizes = dependent_values[sample_size]
if type(sample_size) is list:
# Sample size hasn't been combined yet.
assert len(quantities) == len(uncertainties) \
and len(uncertainties) == len(sample_sizes), \
"Length of quantities list `(" + str(len(quantities)) + ")`, " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
"),` and length of sample sizes list `(" + str(len(sample_sizes)) + \
")` are not the same."
else:
# Another dependent variable that uses our sample size has combined it
# already.
assert len(quantities) == len(uncertainties), \
"Length of quantities list `(" + str(len(quantities)) + ")` and " + \
"length of uncertainties list `(" + str(len(uncertainties)) + \
")` are not the same."
# Convert the three separate `list`s into one list of `measured_value`s.
measured_values = []
for i in range(len(quantities)):
mv = measured_value(
quantities[i], uncertainties[i], sample_sizes[i], units
)
measured_values.append(mv)
# Combine the `measured_value`s.
combined_sample_size = combine_sample_size(
measured_values
)
combined_arithmetic_mean = combine_arithmetic_mean(
measured_values, combined_sample_size
)
combined_sample_standard_deviation = combine_sample_standard_deviation(
measured_values, combined_sample_size, combined_arithmetic_mean
)
# Round the quantity and uncertainty to the significant digit of
# uncertainty and insert the combined values into the results.
sigdig = find_significant_digit(combined_sample_standard_deviation)
# combined_arithmetic_mean = round_with_int_conversion(
# combined_arithmetic_mean, sigdig
# )
# combined_sample_standard_deviation = round_with_int_conversion(
# combined_sample_standard_deviation, sigdig
# )
combined_dependent_values[quantity] = combined_arithmetic_mean
combined_dependent_values[uncertainty] = combined_sample_standard_deviation
combined_dependent_values[sample_size] = combined_sample_size
return combined_dependent_values
#############################################################################
# Output Stream.
def __iter__(self):
"""Return an iterator to the output sequence of separated distinguishing
variables and dependent variables (a tuple of two `dict`s).
This is a requirement for the `Iterable` protocol.
"""
return self
def records(self):
"""Return an iterator to the output sequence of CSV rows (`dict`s of
variables to values).
"""
return imap(unpack_tuple(lambda dist, dep: merge_dicts(dist, dep)), self)
def next(self):
"""Produce the components of the next output record - a tuple of two
`dict`s. The first `dict` is a mapping of distinguishing variables to
distinguishing values, the second `dict` is a mapping of dependent
variables to combined dependent values. Combining the two dicts forms a
CSV row suitable for output.
This is a requirement for the `Iterator` protocol.
Raises:
StopIteration : If there is no more output.
AssertionError : If class invariants were violated.
"""
assert len(self.dataset.keys()) == len(self.in_order_dataset_keys), \
"Number of dataset keys (`" + str(len(self.dataset.keys())) + \
"`) is not equal to the number of keys in the ordering list (`" + \
str(len(self.in_order_dataset_keys)) + "`)."
if len(self.in_order_dataset_keys) == 0:
raise StopIteration()
# Get the next set of distinguishing values and convert them to a `dict`.
raw_distinguishing_values = self.in_order_dataset_keys.popleft()
distinguishing_values = dict(raw_distinguishing_values)
dependent_values = self.dataset.pop(raw_distinguishing_values)
combined_dependent_values = self.combine_dependent_values(dependent_values)
return (distinguishing_values, combined_dependent_values)
def __getitem__(self, distinguishing_values):
"""Produce the dependent component, a `dict` mapping dependent variables to
combined dependent values, associated with `distinguishing_values`.
Args:
distinguishing_values (`dict`) :
A `dict` mapping distinguishing variables to distinguishing values.
Raises:
KeyError : If `distinguishing_values` is not in the dataset.
"""
raw_distinguishing_values = self.key_from_dict(distinguishing_values)
dependent_values = self.dataset[raw_distinguishing_values]
combined_dependent_values = self.combine_dependent_values(dependent_values)
return combined_dependent_values
###############################################################################
args = process_program_arguments()
if len(args.dependent_variables) == 0:
args.dependent_variables = [
"STL Average Walltime,STL Walltime Uncertainty,STL Trials",
"STL Average Throughput,STL Throughput Uncertainty,STL Trials",
"Thrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials",
"Thrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"
]
# Parse dependent variable options.
dependent_variables = []
parse_dependent_variable = dependent_variable_parser()
#if args.dependent_variables is not None:
for var in args.dependent_variables:
dependent_variables.append(parse_dependent_variable(var))
# Read input files and open the output file.
with io_manager(args.baseline_input_file,
args.observed_input_file,
args.output_file,
args.preserve_whitespace) as iom:
# Create record aggregators.
baseline_ra = record_aggregator(dependent_variables, args.control_variables)
observed_ra = record_aggregator(dependent_variables, args.control_variables)
# Duplicate dependent variables: one for baseline results, one for observed
# results.
baseline_suffix = " - `{0}`".format(
args.baseline_input_file
)
observed_suffix = " - `{0}`".format(
args.observed_input_file
)
for var in dependent_variables:
# Remove the existing quantity variable:
#
# [ ..., a, b, c, ... ]
# ^- remove b at index i
#
(quantity_idx, quantity_units) = iom.remove_variable(var.quantity)
# If the `--output-all-variables` option was specified, add the new baseline
# and observed quantity variables. Note that we insert in the reverse of
# the order we desire (which is baseline then observed):
#
# [ ..., a, b_1, c, ... ]
# ^- insert b_1 at index i
#
# [ ..., a, b_0, b_1, c, ... ]
# ^- insert b_0 at index i
#
if args.output_all_variables:
iom.insert_variable(
quantity_idx, var.quantity + observed_suffix, quantity_units
)
iom.insert_variable(
quantity_idx, var.quantity + baseline_suffix, quantity_units
)
# Remove the existing uncertainty variable.
(uncertainty_idx, uncertainty_units) = iom.remove_variable(var.uncertainty)
# If the `--output-all-variables` option was specified, add the new baseline
# and observed uncertainty variables.
if args.output_all_variables:
iom.insert_variable(
uncertainty_idx, var.uncertainty + observed_suffix, uncertainty_units
)
iom.insert_variable(
uncertainty_idx, var.uncertainty + baseline_suffix, uncertainty_units
)
try:
# Remove the existing sample size variable.
(sample_size_idx, sample_size_units) = iom.remove_variable(var.sample_size)
# If the `--output-all-variables` option was specified, add the new
# baseline and observed sample size variables.
if args.output_all_variables:
iom.insert_variable(
sample_size_idx, var.sample_size + observed_suffix, sample_size_units
)
iom.insert_variable(
sample_size_idx, var.sample_size + baseline_suffix, sample_size_units
)
except ValueError:
# This is alright, because dependent variables may share the same sample
# size variable.
pass
for var in args.control_variables:
iom.remove_variable(var)
# Add change variables.
absolute_change_suffix = " - Change (`{0}` - `{1}`)".format(
args.observed_input_file, args.baseline_input_file
)
percent_change_suffix = " - % Change (`{0}` to `{1}`)".format(
args.observed_input_file, args.baseline_input_file
)
for var in dependent_variables:
iom.append_variable(var.quantity + absolute_change_suffix, var.units)
iom.append_variable(var.uncertainty + absolute_change_suffix, var.units)
iom.append_variable(var.quantity + percent_change_suffix, "")
iom.append_variable(var.uncertainty + percent_change_suffix, "")
# Add all baseline input data to the `record_aggregator`.
for record in iom.baseline():
baseline_ra.append(record)
for record in iom.observed():
observed_ra.append(record)
iom.write_header()
# Compare and output results.
for distinguishing_values, observed_dependent_values in observed_ra:
try:
baseline_dependent_values = baseline_ra[distinguishing_values]
except KeyError:
assert False, \
"Distinguishing value `" + \
str(baseline_ra.key_from_dict(distinguishing_values)) + \
"` was not found in the baseline results."
statistically_significant_change = False
record = distinguishing_values.copy()
# Compute changes, add the values and changes to the record, and identify
# changes that are statistically significant.
for var in dependent_variables:
# Compute changes.
baseline_quantity = baseline_dependent_values[var.quantity]
baseline_uncertainty = baseline_dependent_values[var.uncertainty]
baseline_sample_size = baseline_dependent_values[var.sample_size]
observed_quantity = observed_dependent_values[var.quantity]
observed_uncertainty = observed_dependent_values[var.uncertainty]
observed_sample_size = observed_dependent_values[var.sample_size]
(abs_change, abs_change_unc, per_change, per_change_unc) = \
percent_change_uncertainty(
baseline_quantity, baseline_uncertainty,
observed_quantity, observed_uncertainty
)
# Round the change quantities and uncertainties to the significant digit
# of uncertainty.
try:
abs_change_sigdig = max(
find_significant_digit(abs_change),
find_significant_digit(abs_change_unc),
)
# abs_change = round_with_int_conversion(
# abs_change, abs_change_sigdig
# )
# abs_change_unc = round_with_int_conversion(
# abs_change_unc, abs_change_sigdig
# )
except:
# Any value errors should be due to NaNs returned by
# `percent_change_uncertainty` because quantities or change in
# quantities was 0. We can ignore these.
pass
try:
per_change_sigdig = max(
find_significant_digit(per_change),
find_significant_digit(per_change_unc)
)
# per_change = round_with_int_conversion(
# per_change, per_change_sigdig
# )
# per_change_unc = round_with_int_conversion(
# per_change_unc, per_change_sigdig
# )
except:
# Any value errors should be due to NaNs returned by
# `percent_change_uncertainty` because quantities or change in
# quantities was 0. We can ignore these.
pass
# Add the values (if the `--output-all-variables` option was specified)
# and the changes to the record. Note that the record's schema is
# different from the original schema. If multiple dependent variables
# share the same sample size variable, it's fine - they will overwrite
# each other, but with the same value.
if args.output_all_variables:
record[var.quantity + baseline_suffix] = baseline_quantity
record[var.uncertainty + baseline_suffix] = baseline_uncertainty
record[var.sample_size + baseline_suffix] = baseline_sample_size
record[var.quantity + observed_suffix] = observed_quantity
record[var.uncertainty + observed_suffix] = observed_uncertainty
record[var.sample_size + observed_suffix] = observed_sample_size
record[var.quantity + absolute_change_suffix] = abs_change
record[var.uncertainty + absolute_change_suffix] = abs_change_unc
record[var.quantity + percent_change_suffix] = per_change
record[var.uncertainty + percent_change_suffix] = per_change_unc
# If the range of uncertainties overlap don't overlap and the percentage
# change is greater than the change threshold, then change is
# statistically significant.
overlap = ranges_overlap_uncertainty(
baseline_quantity, baseline_uncertainty,
observed_quantity, observed_uncertainty
)
if not overlap and per_change >= args.change_threshold:
statistically_significant_change = True
# Print the record if a statistically significant change was found or if the
# `--output-all-datapoints` option was specified.
if args.output_all_datapoints or statistically_significant_change:
iom.write(record)
| thrust-master | internal/benchmark/compare_benchmark_results.py |
'''
Convert Google Code .wiki files into .tex formatted files.
Output is designed to be included within a larger TeX project, it is
not standalone.
'''
import sys
import re
import codecs
print(sys.argv)
'''
A "rule" is a begin tag, an end tag, and how to reformat the inner text
(function)
'''
def encase(pre, post, strip=False):
"""Return a function that prepends pre and postpends post"""
def f(txt):
if strip:
return pre + txt.strip() + post
else:
return pre + txt + post
return f
def constant(text):
def f(txt):
return text
return f
def encase_with_rules(pre, post, rules, strip=False):
def f(txt):
if strip:
return pre + apply_rules(txt, rules).strip() + post
else:
return pre + apply_rules(txt, rules) + post
return f
def encase_escape_underscore(pre, post):
def f(txt):
txt = sub(r'_', r'\_', txt)
return pre + txt + post
return f
def sub(pat, repl, txt):
"""Substitute in repl for pat in txt, txt can be multiple lines"""
return re.compile(pat, re.MULTILINE).sub(repl, txt)
def process_list(rules):
def f(txt):
txt = ' *' + txt # was removed to match begin tag of list
res = '\\begin{itemize}\n'
for ln in txt.split('\n'):
# Convert " *" to "\item "
ln = sub(r'^ \*', r'\\item ', ln)
res += apply_rules(ln, rules) + '\n'
res += '\\end{itemize}\n'
return res
return f
def process_link(rules):
def f(txt):
lst = txt.split(' ')
lnk = lst[0]
desc = apply_rules(' '.join(lst[1:]), rules)
if lnk[:7] == 'http://':
desc = apply_rules(' '.join(lst[1:]), rules)
return r'\href{' + lnk + r'}{' + desc + r'}'
if len(lst) > 1:
return r'\href{}{' + desc + r'}'
return r'\href{}{' + lnk + r'}'
return f
# Some rules can be used inside some other rules (backticks in section names)
link_rules = [
['_', '', constant(r'\_')],
]
section_rules = [
['`', '`', encase_escape_underscore(r'\texttt{', r'}')],
]
item_rules = [
['`', '`', encase(r'\verb|', r'|')],
['[', ']', process_link(link_rules)],
]
# Main rules for Latex formatting
rules = [
['{{{', '}}}', encase(r'\begin{lstlisting}[language=c++]', r'\end{lstlisting}')],
['[', ']', process_link(link_rules)],
[' *', '\n\n', process_list(item_rules)],
['"', '"', encase("``", "''")],
['`', '`', encase(r'\verb|', r'|')],
['*', '*', encase(r'\emph{', r'}')],
['_', '_', encase(r'\emph{', r'}')],
['==', '==', encase_with_rules(r'\section{', r'}', section_rules, True)],
['=', '=', encase_with_rules(r'\chapter{', r'}', section_rules, True)],
['(e.g. f(x) -> y and f(x,y) -> ', 'z)', constant(r'(e.g. $f(x)\to y$ and $f(x,y)\to z$)')],
]
def match_rules(txt, rules):
"""Find rule that first matches in txt"""
# Find first begin tag
first_begin_loc = 10e100
matching_rule = None
for rule in rules:
begin_tag, end_tag, func = rule
loc = txt.find(begin_tag)
if loc > -1 and loc < first_begin_loc:
first_begin_loc = loc
matching_rule = rule
return (matching_rule, first_begin_loc)
def apply_rules(txt, rules):
"""Apply set of rules to give txt, return transformed version of txt"""
matching_rule, first_begin_loc = match_rules(txt, rules)
if matching_rule is None:
return txt
begin_tag, end_tag, func = matching_rule
end_loc = txt.find(end_tag, first_begin_loc + 1)
if end_loc == -1:
sys.exit('Could not find end tag {0} after position {1}'.format(end_tag, first_begin_loc + 1))
inner_txt = txt[first_begin_loc + len(begin_tag) : end_loc]
# Copy characters up until begin tag
# Then have output of rule function on inner text
new_txt_start = txt[:first_begin_loc] + func(inner_txt)
# Follow with the remaining processed text
remaining_txt = txt[end_loc + len(end_tag):]
return new_txt_start + apply_rules(remaining_txt, rules)
def split_sections(contents):
"""Given one string of all file contents, return list of sections
Return format is list of pairs, each pair has section title
and list of lines. Result is ordered as the original input.
"""
res = []
cur_section = ''
section = []
for ln in contents.split('\n'):
if len(ln) > 0 and ln[0] == '=':
# remove = formatting from line
section_title = sub(r'^\=+ (.*) \=+', r'\1', ln)
res.append((cur_section, section))
cur_section = section_title
section = [ln]
else:
section.append(ln)
res.append((cur_section, section))
return res
def filter_sections(splitinput, removelst):
"""Take split input and remove sections in removelst"""
res = []
for sectname, sectcontents in splitinput:
if sectname in removelst:
pass
else:
res.extend(sectcontents)
# convert to single string for output
return '\n'.join(res)
def main():
infile = codecs.open(sys.argv[1], encoding='utf-8')
outfile = codecs.open(sys.argv[2], mode='w', encoding='utf-8')
contents = infile.read()
# Remove first three lines
contents = '\n'.join(contents.split('\n')[3:])
# Split sections and filter out some of them
sections = split_sections(contents)
contents = filter_sections(sections, ['Introduction', 'Prerequisites', 'Simple Example'])
# Convert to latex format
contents = apply_rules(contents, rules)
infile.close()
outfile.write(contents)
outfile.close()
return 0
if __name__ == '__main__':
sys.exit(main())
| thrust-master | internal/scripts/wiki2tex.py |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from sys import exit
from os.path import join, dirname, basename, realpath
from csv import DictReader as csv_dict_reader
from subprocess import Popen
from argparse import ArgumentParser as argument_parser
###############################################################################
def printable_cmd(c):
"""Converts a `list` of `str`s representing a shell command to a printable
`str`."""
return " ".join(map(lambda e: '"' + str(e) + '"', c))
###############################################################################
def print_file(p):
"""Open the path `p` and print its contents to `stdout`."""
print "********************************************************************************"
with open(p) as f:
for line in f:
print line,
print "********************************************************************************"
###############################################################################
ap = argument_parser(
description = (
"CUDA Eris driver script: runs a benchmark suite multiple times, combines "
"the results, and outputs them in the CUDA Eris performance result format."
)
)
ap.add_argument(
"-b", "--benchmark",
help = ("The location of the benchmark suite executable to run."),
type = str,
default = join(dirname(realpath(__file__)), "bench"),
metavar = "R"
)
ap.add_argument(
"-p", "--postprocess",
help = ("The location of the postprocessing script to run to combine the "
"results."),
type = str,
default = join(dirname(realpath(__file__)), "combine_benchmark_results.py"),
metavar = "R"
)
ap.add_argument(
"-r", "--runs",
help = ("Run the benchmark suite `R` times.a),"),
type = int, default = 5,
metavar = "R"
)
args = ap.parse_args()
if args.runs <= 0:
print "ERROR: `--runs` must be greater than `0`."
ap.print_help()
exit(1)
BENCHMARK_EXE = args.benchmark
BENCHMARK_NAME = basename(BENCHMARK_EXE)
POSTPROCESS_EXE = args.postprocess
OUTPUT_FILE_NAME = lambda i: BENCHMARK_NAME + "_" + str(i) + ".csv"
COMBINED_OUTPUT_FILE_NAME = BENCHMARK_NAME + "_combined.csv"
###############################################################################
print '&&&& RUNNING {0}'.format(BENCHMARK_NAME)
print '#### RUNS {0}'.format(args.runs)
###############################################################################
print '#### CMD {0}'.format(BENCHMARK_EXE)
for i in xrange(args.runs):
with open(OUTPUT_FILE_NAME(i), "w") as output_file:
print '#### RUN {0} OUTPUT -> {1}'.format(i, OUTPUT_FILE_NAME(i))
p = None
try:
p = Popen(BENCHMARK_EXE, stdout = output_file, stderr = output_file)
p.communicate()
except OSError as ex:
print_file(OUTPUT_FILE_NAME(i))
print '#### ERROR Caught OSError `{0}`.'.format(ex)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(-1)
print_file(OUTPUT_FILE_NAME(i))
if p.returncode != 0:
print '#### ERROR Process exited with code {0}.'.format(p.returncode)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(p.returncode)
###############################################################################
post_cmd = [POSTPROCESS_EXE]
# Add dependent variable options.
post_cmd += ["-dSTL Average Walltime,STL Walltime Uncertainty,STL Trials"]
post_cmd += ["-dSTL Average Throughput,STL Throughput Uncertainty,STL Trials"]
post_cmd += ["-dThrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials"]
post_cmd += ["-dThrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"]
post_cmd += [OUTPUT_FILE_NAME(i) for i in range(args.runs)]
print '#### CMD {0}'.format(printable_cmd(post_cmd))
with open(COMBINED_OUTPUT_FILE_NAME, "w") as output_file:
p = None
try:
p = Popen(post_cmd, stdout = output_file, stderr = output_file)
p.communicate()
except OSError as ex:
print_file(COMBINED_OUTPUT_FILE_NAME)
print '#### ERROR Caught OSError `{0}`.'.format(ex)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(-1)
print_file(COMBINED_OUTPUT_FILE_NAME)
if p.returncode != 0:
print '#### ERROR Process exited with code {0}.'.format(p.returncode)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(p.returncode)
with open(COMBINED_OUTPUT_FILE_NAME) as input_file:
reader = csv_dict_reader(input_file)
variable_units = reader.next() # Get units header row.
distinguishing_variables = reader.fieldnames
measured_variables = [
("STL Average Throughput", "+"),
("Thrust Average Throughput", "+")
]
for record in reader:
for variable, directionality in measured_variables:
# Don't monitor regressions for STL implementations, nvbug 28980890:
if "STL" in variable:
continue
print "&&&& PERF {0}_{1}_{2}bit_{3}mib_{4} {5} {6}{7}".format(
record["Algorithm"],
record["Element Type"],
record["Element Size"],
record["Total Input Size"],
variable.replace(" ", "_").lower(),
record[variable],
directionality,
variable_units[variable]
)
###############################################################################
print '&&&& PASSED {0}'.format(BENCHMARK_NAME)
| thrust-master | internal/scripts/eris_perf.py |
'''
Helper script for creating a header file that includes all of Thrust's
public headers. This is useful for instance, to quickly check that
all the thrust headers obey proper syntax or are warning free.
This script simply outputs a list of C-style #include's to the standard
output--this should be redirected to a header file by the caller.
'''
import sys
import os
import re
from stat import *
thrustdir = sys.argv[1]
def find_headers(base_dir, rel_dir, exclude = ['\B']):
'''
Recursively find all *.h files inside base_dir/rel_dir,
except any that match the exclude regexp list
'''
assert(type(exclude) == list)
full_dir = base_dir + '/' + rel_dir
result = []
for f in os.listdir(full_dir):
rel_file = rel_dir + '/' + f
for e in exclude:
if re.match(e, rel_file):
break
else:
if f.endswith('.h'):
result.append(rel_file)
elif S_ISDIR(os.stat(full_dir + '/' + f).st_mode):
result.extend(find_headers(base_dir, rel_file, exclude))
return result
print('/* File is generated by ' + sys.argv[0] + ' */')
exclude_re = ['.*/detail$',
'thrust/iterator',
'thrust/random',
'thrust/system/tbb']
headers = find_headers(thrustdir, 'thrust', exclude_re)
if len(headers) == 0:
print('#error no include files found\n')
print('#define THRUST_CPP11_REQUIRED_NO_ERROR')
print('#define THRUST_CPP14_REQUIRED_NO_ERROR')
print('#define THRUST_MODERN_GCC_REQUIRED_NO_ERROR')
for h in headers:
print('#include <' + h + '>')
exit()
| thrust-master | internal/build/warningstester_create_uber_header.py |
import gdb
import sys
if sys.version_info[0] > 2:
Iterator = object
else:
# "Polyfill" for Python2 Iterator interface
class Iterator:
def next(self):
return self.__next__()
class ThrustVectorPrinter(gdb.printing.PrettyPrinter):
"Print a thrust::*_vector"
class _host_accessible_iterator(Iterator):
def __init__(self, start, size):
self.item = start
self.size = size
self.count = 0
def __iter__(self):
return self
def __next__(self):
if self.count >= self.size:
raise StopIteration
elt = self.item.dereference()
count = self.count
self.item = self.item + 1
self.count = self.count + 1
return ('[%d]' % count, elt)
class _device_iterator(Iterator):
def __init__(self, start, size):
self.exec = exec
self.item = start
self.size = size
self.count = 0
self.buffer = None
self.sizeof = self.item.dereference().type.sizeof
self.buffer_start = 0
# At most 1 MB or size, at least 1
self.buffer_size = min(size, max(1, 2 ** 20 // self.sizeof))
self.buffer = gdb.parse_and_eval(
'(void*)malloc(%s)' % (self.buffer_size * self.sizeof))
self.buffer.fetch_lazy()
self.buffer_count = self.buffer_size
self.update_buffer()
def update_buffer(self):
if self.buffer_count >= self.buffer_size:
self.buffer_item = gdb.parse_and_eval(
hex(self.buffer)).cast(self.item.type)
self.buffer_count = 0
self.buffer_start = self.count
device_addr = hex(self.item.dereference().address)
buffer_addr = hex(self.buffer)
size = min(self.buffer_size, self.size -
self.buffer_start) * self.sizeof
status = gdb.parse_and_eval(
'(cudaError)cudaMemcpy(%s, %s, %d, cudaMemcpyDeviceToHost)' % (buffer_addr, device_addr, size))
if status != 0:
raise gdb.MemoryError(
'memcpy from device failed: %s' % status)
def __del__(self):
gdb.parse_and_eval('(void)free(%s)' %
hex(self.buffer)).fetch_lazy()
def __iter__(self):
return self
def __next__(self):
if self.count >= self.size:
raise StopIteration
self.update_buffer()
elt = self.buffer_item.dereference()
self.buffer_item = self.buffer_item + 1
self.buffer_count = self.buffer_count + 1
count = self.count
self.item = self.item + 1
self.count = self.count + 1
return ('[%d]' % count, elt)
def __init__(self, val):
self.val = val
self.pointer = val['m_storage']['m_begin']['m_iterator']
self.size = int(val['m_size'])
self.capacity = int(val['m_storage']['m_size'])
self.is_device = False
if str(self.pointer.type).startswith("thrust::device_ptr"):
self.pointer = self.pointer['m_iterator']
self.is_device = True
def children(self):
if self.is_device:
return self._device_iterator(self.pointer, self.size)
else:
return self._host_accessible_iterator(self.pointer, self.size)
def to_string(self):
typename = str(self.val.type)
return ('%s of length %d, capacity %d' % (typename, self.size, self.capacity))
def display_hint(self):
return 'array'
class ThrustReferencePrinter(gdb.printing.PrettyPrinter):
"Print a thrust::device_reference"
def __init__(self, val):
self.val = val
self.pointer = val['ptr']['m_iterator']
self.type = self.pointer.dereference().type
sizeof = self.type.sizeof
self.buffer = gdb.parse_and_eval('(void*)malloc(%s)' % sizeof)
device_addr = hex(self.pointer)
buffer_addr = hex(self.buffer)
status = gdb.parse_and_eval('(cudaError)cudaMemcpy(%s, %s, %d, cudaMemcpyDeviceToHost)' % (
buffer_addr, device_addr, sizeof))
if status != 0:
raise gdb.MemoryError('memcpy from device failed: %s' % status)
self.buffer_val = gdb.parse_and_eval(
hex(self.buffer)).cast(self.pointer.type).dereference()
def __del__(self):
gdb.parse_and_eval('(void)free(%s)' % hex(self.buffer)).fetch_lazy()
def children(self):
return []
def to_string(self):
typename = str(self.val.type)
return ('(%s) @%s: %s' % (typename, self.pointer, self.buffer_val))
def display_hint(self):
return None
def lookup_thrust_type(val):
if not str(val.type.unqualified()).startswith('thrust::'):
return None
suffix = str(val.type.unqualified())[8:]
if suffix.startswith('host_vector') or suffix.startswith('device_vector'):
return ThrustVectorPrinter(val)
elif int(gdb.VERSION.split(".")[0]) >= 10 and suffix.startswith('device_reference'):
return ThrustReferencePrinter(val)
return None
gdb.pretty_printers.append(lookup_thrust_type)
| thrust-master | scripts/gdb-pretty-printers.py |
#!/usr/bin/env python3
# Copyright (C) 2013-2022 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Usage:
# make-target-delegates.py
import re
import gdbcopyright
# The line we search for in target.h that marks where we should start
# looking for methods.
TRIGGER = re.compile(r"^struct target_ops$")
# The end of the methods part.
ENDER = re.compile(r"^\s*};$")
# Match a C symbol.
SYMBOL = "[a-zA-Z_][a-zA-Z0-9_]*"
# Match the name part of a method in struct target_ops.
NAME_PART = r"(?P<name>" + SYMBOL + ")\s"
# Match the arguments to a method.
ARGS_PART = r"(?P<args>\(.*\))"
# We strip the indentation so here we only need the caret.
INTRO_PART = r"^"
POINTER_PART = r"\s*(\*)?\s*"
# Match a C++ symbol, including scope operators and template
# parameters. E.g., 'std::vector<something>'.
CP_SYMBOL = r"[a-zA-Z_][a-zA-Z0-9_<>:]*"
# Match the return type when it is "ordinary".
SIMPLE_RETURN_PART = r"((struct|class|enum|union)\s+)?" + CP_SYMBOL
# Match a return type.
RETURN_PART = r"((const|volatile)\s+)?(" + SIMPLE_RETURN_PART + ")" + POINTER_PART
# Match "virtual".
VIRTUAL_PART = r"virtual\s"
# Match the TARGET_DEFAULT_* attribute for a method.
TARGET_DEFAULT_PART = r"TARGET_DEFAULT_(?P<style>[A-Z_]+)\s*\((?P<default_arg>.*)\)"
# Match the arguments and trailing attribute of a method definition.
# Note we don't match the trailing ";".
METHOD_TRAILER = r"\s*" + TARGET_DEFAULT_PART + "$"
# Match an entire method definition.
METHOD = re.compile(
INTRO_PART
+ VIRTUAL_PART
+ "(?P<return_type>"
+ RETURN_PART
+ ")"
+ NAME_PART
+ ARGS_PART
+ METHOD_TRAILER
)
# Regular expression used to dissect argument types.
ARGTYPES = re.compile(
"^("
+ r"(?P<E>enum\s+"
+ SYMBOL
+ r"\s*)("
+ SYMBOL
+ ")?"
+ r"|(?P<T>.*(enum\s+)?"
+ SYMBOL
+ r".*(\s|\*|&))"
+ SYMBOL
+ ")$"
)
# Match TARGET_DEBUG_PRINTER in an argument type.
# This must match the whole "sub-expression" including the parens.
TARGET_DEBUG_PRINTER = r"\s*TARGET_DEBUG_PRINTER\s*\((?P<arg>[^)]*)\)\s*"
def scan_target_h():
found_trigger = False
all_the_text = ""
with open("target.h", "r") as target_h:
for line in target_h:
line = line.strip()
if not found_trigger:
if TRIGGER.match(line):
found_trigger = True
elif "{" in line:
# Skip the open brace.
pass
elif ENDER.match(line):
break
else:
# Strip // comments.
line = re.split("//", line)[0]
all_the_text = all_the_text + " " + line
if not found_trigger:
raise "Could not find trigger line"
# Now strip out the C comments.
all_the_text = re.sub(r"/\*(.*?)\*/", "", all_the_text)
# Replace sequences whitespace with a single space character.
# We need the space because the method may have been split
# between multiple lines, like e.g.:
#
# virtual std::vector<long_type_name>
# my_long_method_name ()
# TARGET_DEFAULT_IGNORE ();
#
# If we didn't preserve the space, then we'd end up with:
#
# virtual std::vector<long_type_name>my_long_method_name ()TARGET_DEFAULT_IGNORE ()
#
# ... which wouldn't later be parsed correctly.
all_the_text = re.sub(r"\s+", " ", all_the_text)
return all_the_text.split(";")
# Parse arguments into a list.
def parse_argtypes(typestr):
# Remove the outer parens.
typestr = re.sub(r"^\((.*)\)$", r"\1", typestr)
result = []
for item in re.split(r",\s*", typestr):
if item == "void" or item == "":
continue
m = ARGTYPES.match(item)
if m:
if m.group("E"):
onetype = m.group("E")
else:
onetype = m.group("T")
else:
onetype = item
result.append(onetype.strip())
return result
# Write function header given name, return type, and argtypes.
# Returns a list of actual argument names.
def write_function_header(f, decl, name, return_type, argtypes):
print(return_type, file=f, end="")
if decl:
if not return_type.endswith("*"):
print(" ", file=f, end="")
else:
print("", file=f)
print(name + " (", file=f, end="")
argdecls = []
actuals = []
for i in range(len(argtypes)):
val = re.sub(TARGET_DEBUG_PRINTER, "", argtypes[i])
if not val.endswith("*") and not val.endswith("&"):
val = val + " "
vname = "arg" + str(i)
val = val + vname
argdecls.append(val)
actuals.append(vname)
print(", ".join(argdecls) + ")", file=f, end="")
if decl:
print(" override;", file=f)
else:
print("\n{", file=f)
return actuals
# Write out a declaration.
def write_declaration(f, name, return_type, argtypes):
write_function_header(f, True, name, return_type, argtypes)
# Write out a delegation function.
def write_delegator(f, name, return_type, argtypes):
names = write_function_header(
f, False, "target_ops::" + name, return_type, argtypes
)
print(" ", file=f, end="")
if return_type != "void":
print("return ", file=f, end="")
print("this->beneath ()->" + name + " (", file=f, end="")
print(", ".join(names), file=f, end="")
print(");", file=f)
print("}\n", file=f)
# Write out a default function.
def write_tdefault(f, content, style, name, return_type, argtypes):
name = "dummy_target::" + name
names = write_function_header(f, False, name, return_type, argtypes)
if style == "FUNC":
print(" ", file=f, end="")
if return_type != "void":
print("return ", file=f, end="")
print(content + " (", file=f, end="")
names.insert(0, "this")
print(", ".join(names) + ");", file=f)
elif style == "RETURN":
print(" return " + content + ";", file=f)
elif style == "NORETURN":
print(" " + content + ";", file=f)
elif style == "IGNORE":
# Nothing.
pass
else:
raise "unrecognized style: " + style
print("}\n", file=f)
def munge_type(typename):
m = re.search(TARGET_DEBUG_PRINTER, typename)
if m:
return m.group("arg")
typename = typename.rstrip()
typename = re.sub("[ ()<>:]", "_", typename)
typename = re.sub("[*]", "p", typename)
typename = re.sub("&", "r", typename)
# Identifers with double underscores are reserved to the C++
# implementation.
typename = re.sub("_+", "_", typename)
# Avoid ending the function name with underscore, for
# cosmetics. Trailing underscores appear after munging types
# with template parameters, like e.g. "foo<int>".
typename = re.sub("_+$", "", typename)
return "target_debug_print_" + typename
# Write out a debug method.
def write_debugmethod(f, content, name, return_type, argtypes):
debugname = "debug_target::" + name
names = write_function_header(f, False, debugname, return_type, argtypes)
if return_type != "void":
print(" " + return_type + " result;", file=f)
print(
' gdb_printf (gdb_stdlog, "-> %s->'
+ name
+ ' (...)\\n", this->beneath ()->shortname ());',
file=f,
)
# Delegate to the beneath target.
print(" ", file=f, end="")
if return_type != "void":
print("result = ", file=f, end="")
print("this->beneath ()->" + name + " (", file=f, end="")
print(", ".join(names), file=f, end="")
print(");", file=f)
# Now print the arguments.
print(
' gdb_printf (gdb_stdlog, "<- %s->'
+ name
+ ' (", this->beneath ()->shortname ());',
file=f,
)
for i in range(len(argtypes)):
if i > 0:
print(' gdb_puts (", ", gdb_stdlog);', file=f)
printer = munge_type(argtypes[i])
print(" " + printer + " (" + names[i] + ");", file=f)
if return_type != "void":
print(' gdb_puts (") = ", gdb_stdlog);', file=f)
printer = munge_type(return_type)
print(" " + printer + " (result);", file=f)
print(' gdb_puts ("\\n", gdb_stdlog);', file=f)
else:
print(' gdb_puts (")\\n", gdb_stdlog);', file=f)
if return_type != "void":
print(" return result;", file=f)
print("}\n", file=f)
def print_class(f, class_name, delegators, entries):
print("struct " + class_name + " : public target_ops", file=f)
print("{", file=f)
print(" const target_info &info () const override;", file=f)
print("", file=f)
print(" strata stratum () const override;", file=f)
print("", file=f)
for name in delegators:
return_type = entries[name]["return_type"]
argtypes = entries[name]["argtypes"]
print(" ", file=f, end="")
write_declaration(f, name, return_type, argtypes)
print("};\n", file=f)
delegators = []
entries = {}
for current_line in scan_target_h():
# See comments in scan_target_h. Here we strip away the leading
# and trailing whitespace.
current_line = current_line.strip()
m = METHOD.match(current_line)
if not m:
continue
data = m.groupdict()
data["argtypes"] = parse_argtypes(data["args"])
data["return_type"] = data["return_type"].strip()
entries[data["name"]] = data
delegators.append(data["name"])
with open("target-delegates.c", "w") as f:
print(
gdbcopyright.copyright(
"make-target-delegates.py", "Boilerplate target methods for GDB"
),
file=f,
)
print_class(f, "dummy_target", delegators, entries)
print_class(f, "debug_target", delegators, entries)
for name in delegators:
tdefault = entries[name]["default_arg"]
return_type = entries[name]["return_type"]
style = entries[name]["style"]
argtypes = entries[name]["argtypes"]
write_delegator(f, name, return_type, argtypes)
write_tdefault(f, tdefault, style, name, return_type, argtypes)
write_debugmethod(f, tdefault, name, return_type, argtypes)
| cuda-gdb-master | gdb/make-target-delegates.py |
#! /usr/bin/env python3
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""copyright.py
This script updates the list of years in the copyright notices in
most files maintained by the GDB project.
Usage: cd src/gdb && ./copyright.py
Always review the output of this script before committing it!
A useful command to review the output is:
% filterdiff -x \*.c -x \*.cc -x \*.h -x \*.exp updates.diff
This removes the bulk of the changes which are most likely to be correct.
"""
import argparse
import datetime
import locale
import os
import os.path
import subprocess
import sys
from typing import List, Optional
def get_update_list():
"""Return the list of files to update.
Assumes that the current working directory when called is the root
of the GDB source tree (NOT the gdb/ subdirectory!). The names of
the files are relative to that root directory.
"""
result = []
for gdb_dir in (
"gdb",
"gdbserver",
"gdbsupport",
"gnulib",
"sim",
"include/gdb",
):
for root, dirs, files in os.walk(gdb_dir, topdown=True):
for dirname in dirs:
reldirname = "%s/%s" % (root, dirname)
if (
dirname in EXCLUDE_ALL_LIST
or reldirname in EXCLUDE_LIST
or reldirname in NOT_FSF_LIST
or reldirname in BY_HAND
):
# Prune this directory from our search list.
dirs.remove(dirname)
for filename in files:
relpath = "%s/%s" % (root, filename)
if (
filename in EXCLUDE_ALL_LIST
or relpath in EXCLUDE_LIST
or relpath in NOT_FSF_LIST
or relpath in BY_HAND
):
# Ignore this file.
pass
else:
result.append(relpath)
return result
def update_files(update_list):
"""Update the copyright header of the files in the given list.
We use gnulib's update-copyright script for that.
"""
# We want to use year intervals in the copyright notices, and
# all years should be collapsed to one single year interval,
# even if there are "holes" in the list of years found in the
# original copyright notice (OK'ed by the FSF, case [gnu.org #719834]).
os.environ["UPDATE_COPYRIGHT_USE_INTERVALS"] = "2"
# Perform the update, and save the output in a string.
update_cmd = ["bash", "gnulib/import/extra/update-copyright"]
update_cmd += update_list
p = subprocess.Popen(
update_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding=locale.getpreferredencoding(),
)
update_out = p.communicate()[0]
# Process the output. Typically, a lot of files do not have
# a copyright notice :-(. The update-copyright script prints
# a well defined warning when it did not find the copyright notice.
# For each of those, do a sanity check and see if they may in fact
# have one. For the files that are found not to have one, we filter
# the line out from the output, since there is nothing more to do,
# short of looking at each file and seeing which notice is appropriate.
# Too much work! (~4,000 files listed as of 2012-01-03).
update_out = update_out.splitlines(keepends=False)
warning_string = ": warning: copyright statement not found"
warning_len = len(warning_string)
for line in update_out:
if line.endswith(warning_string):
filename = line[:-warning_len]
if may_have_copyright_notice(filename):
print(line)
else:
# Unrecognized file format. !?!
print("*** " + line)
def may_have_copyright_notice(filename):
"""Check that the given file does not seem to have a copyright notice.
The filename is relative to the root directory.
This function assumes that the current working directory is that root
directory.
The algorithm is fairly crude, meaning that it might return
some false positives. I do not think it will return any false
negatives... We might improve this function to handle more
complex cases later...
"""
# For now, it may have a copyright notice if we find the word
# "Copyright" at the (reasonable) start of the given file, say
# 50 lines...
MAX_LINES = 50
# We don't really know what encoding each file might be following,
# so just open the file as a byte stream. We only need to search
# for a pattern that should be the same regardless of encoding,
# so that should be good enough.
with open(filename, "rb") as fd:
for lineno, line in enumerate(fd, start=1):
if b"Copyright" in line:
return True
if lineno > MAX_LINES:
break
return False
def get_parser() -> argparse.ArgumentParser:
"""Get a command line parser."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
return parser
def main(argv: List[str]) -> Optional[int]:
"""The main subprogram."""
parser = get_parser()
_ = parser.parse_args(argv)
root_dir = os.path.dirname(os.getcwd())
os.chdir(root_dir)
if not (
os.path.isdir("gdb") and os.path.isfile("gnulib/import/extra/update-copyright")
):
sys.exit("Error: This script must be called from the gdb directory.")
update_list = get_update_list()
update_files(update_list)
# Remind the user that some files need to be updated by HAND...
if MULTIPLE_COPYRIGHT_HEADERS:
print()
print(
"\033[31m"
"REMINDER: Multiple copyright headers must be updated by hand:"
"\033[0m"
)
for filename in MULTIPLE_COPYRIGHT_HEADERS:
print(" ", filename)
if BY_HAND:
print()
print(
"\033[31mREMINDER: The following files must be updated by hand." "\033[0m"
)
for filename in BY_HAND:
print(" ", filename)
############################################################################
#
# Some constants, placed at the end because they take up a lot of room.
# The actual value of these constants is not significant to the understanding
# of the script.
#
############################################################################
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Filenames are relative to the root directory.
EXCLUDE_LIST = (
"gdb/nat/glibc_thread_db.h",
"gdb/CONTRIBUTE",
"gdbsupport/Makefile.in",
"gnulib/import",
"gnulib/config.in",
"gnulib/Makefile.in",
)
# Files which should not be modified, either because they are
# generated, non-FSF, or otherwise special (e.g. license text,
# or test cases which must be sensitive to line numbering).
#
# Matches any file or directory name anywhere. Use with caution.
# This is mostly for files that can be found in multiple directories.
# Eg: We want all files named COPYING to be left untouched.
EXCLUDE_ALL_LIST = (
"COPYING",
"COPYING.LIB",
"CVS",
"configure",
"copying.c",
"fdl.texi",
"gpl.texi",
"aclocal.m4",
)
# The list of files to update by hand.
BY_HAND = (
# Nothing at the moment :-).
)
# Files containing multiple copyright headers. This script is only
# fixing the first one it finds, so we need to finish the update
# by hand.
MULTIPLE_COPYRIGHT_HEADERS = (
"gdb/doc/gdb.texinfo",
"gdb/doc/refcard.tex",
"gdb/syscalls/update-netbsd.sh",
)
# The list of file which have a copyright, but not held by the FSF.
# Filenames are relative to the root directory.
NOT_FSF_LIST = (
"gdb/exc_request.defs",
"gdb/gdbtk",
"gdb/testsuite/gdb.gdbtk/",
"sim/arm/armemu.h",
"sim/arm/armos.c",
"sim/arm/gdbhost.c",
"sim/arm/dbg_hif.h",
"sim/arm/dbg_conf.h",
"sim/arm/communicate.h",
"sim/arm/armos.h",
"sim/arm/armcopro.c",
"sim/arm/armemu.c",
"sim/arm/kid.c",
"sim/arm/thumbemu.c",
"sim/arm/armdefs.h",
"sim/arm/armopts.h",
"sim/arm/dbg_cp.h",
"sim/arm/dbg_rdi.h",
"sim/arm/parent.c",
"sim/arm/armsupp.c",
"sim/arm/armrdi.c",
"sim/arm/bag.c",
"sim/arm/armvirt.c",
"sim/arm/main.c",
"sim/arm/bag.h",
"sim/arm/communicate.c",
"sim/arm/gdbhost.h",
"sim/arm/armfpe.h",
"sim/arm/arminit.c",
"sim/common/cgen-fpu.c",
"sim/common/cgen-fpu.h",
"sim/common/cgen-accfp.c",
"sim/mips/m16run.c",
"sim/mips/sim-main.c",
"sim/moxie/moxie-gdb.dts",
# Not a single file in sim/ppc/ appears to be copyright FSF :-(.
"sim/ppc/filter.h",
"sim/ppc/gen-support.h",
"sim/ppc/ld-insn.h",
"sim/ppc/hw_sem.c",
"sim/ppc/hw_disk.c",
"sim/ppc/idecode_branch.h",
"sim/ppc/sim-endian.h",
"sim/ppc/table.c",
"sim/ppc/hw_core.c",
"sim/ppc/gen-support.c",
"sim/ppc/gen-semantics.h",
"sim/ppc/cpu.h",
"sim/ppc/sim_callbacks.h",
"sim/ppc/RUN",
"sim/ppc/Makefile.in",
"sim/ppc/emul_chirp.c",
"sim/ppc/hw_nvram.c",
"sim/ppc/dc-test.01",
"sim/ppc/hw_phb.c",
"sim/ppc/hw_eeprom.c",
"sim/ppc/bits.h",
"sim/ppc/hw_vm.c",
"sim/ppc/cap.h",
"sim/ppc/os_emul.h",
"sim/ppc/options.h",
"sim/ppc/gen-idecode.c",
"sim/ppc/filter.c",
"sim/ppc/corefile-n.h",
"sim/ppc/std-config.h",
"sim/ppc/ld-decode.h",
"sim/ppc/filter_filename.h",
"sim/ppc/hw_shm.c",
"sim/ppc/pk_disklabel.c",
"sim/ppc/dc-simple",
"sim/ppc/misc.h",
"sim/ppc/device_table.h",
"sim/ppc/ld-insn.c",
"sim/ppc/inline.c",
"sim/ppc/emul_bugapi.h",
"sim/ppc/hw_cpu.h",
"sim/ppc/debug.h",
"sim/ppc/hw_ide.c",
"sim/ppc/debug.c",
"sim/ppc/gen-itable.h",
"sim/ppc/interrupts.c",
"sim/ppc/hw_glue.c",
"sim/ppc/emul_unix.c",
"sim/ppc/sim_calls.c",
"sim/ppc/dc-complex",
"sim/ppc/ld-cache.c",
"sim/ppc/registers.h",
"sim/ppc/dc-test.02",
"sim/ppc/options.c",
"sim/ppc/igen.h",
"sim/ppc/registers.c",
"sim/ppc/device.h",
"sim/ppc/emul_chirp.h",
"sim/ppc/hw_register.c",
"sim/ppc/hw_init.c",
"sim/ppc/sim-endian-n.h",
"sim/ppc/filter_filename.c",
"sim/ppc/bits.c",
"sim/ppc/idecode_fields.h",
"sim/ppc/hw_memory.c",
"sim/ppc/misc.c",
"sim/ppc/double.c",
"sim/ppc/psim.h",
"sim/ppc/hw_trace.c",
"sim/ppc/emul_netbsd.h",
"sim/ppc/psim.c",
"sim/ppc/ppc-instructions",
"sim/ppc/tree.h",
"sim/ppc/README",
"sim/ppc/gen-icache.h",
"sim/ppc/gen-model.h",
"sim/ppc/ld-cache.h",
"sim/ppc/mon.c",
"sim/ppc/corefile.h",
"sim/ppc/vm.c",
"sim/ppc/INSTALL",
"sim/ppc/gen-model.c",
"sim/ppc/hw_cpu.c",
"sim/ppc/corefile.c",
"sim/ppc/hw_opic.c",
"sim/ppc/gen-icache.c",
"sim/ppc/events.h",
"sim/ppc/os_emul.c",
"sim/ppc/emul_generic.c",
"sim/ppc/main.c",
"sim/ppc/hw_com.c",
"sim/ppc/gen-semantics.c",
"sim/ppc/emul_bugapi.c",
"sim/ppc/device.c",
"sim/ppc/emul_generic.h",
"sim/ppc/tree.c",
"sim/ppc/mon.h",
"sim/ppc/interrupts.h",
"sim/ppc/cap.c",
"sim/ppc/cpu.c",
"sim/ppc/hw_phb.h",
"sim/ppc/device_table.c",
"sim/ppc/lf.c",
"sim/ppc/lf.c",
"sim/ppc/dc-stupid",
"sim/ppc/hw_pal.c",
"sim/ppc/ppc-spr-table",
"sim/ppc/emul_unix.h",
"sim/ppc/words.h",
"sim/ppc/basics.h",
"sim/ppc/hw_htab.c",
"sim/ppc/lf.h",
"sim/ppc/ld-decode.c",
"sim/ppc/sim-endian.c",
"sim/ppc/gen-itable.c",
"sim/ppc/idecode_expression.h",
"sim/ppc/table.h",
"sim/ppc/dgen.c",
"sim/ppc/events.c",
"sim/ppc/gen-idecode.h",
"sim/ppc/emul_netbsd.c",
"sim/ppc/igen.c",
"sim/ppc/vm_n.h",
"sim/ppc/vm.h",
"sim/ppc/hw_iobus.c",
"sim/ppc/inline.h",
"sim/testsuite/mips/mips32-dsp2.s",
)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| cuda-gdb-master | gdb/copyright.py |
# Dynamic architecture support for GDB, the GNU debugger.
# Copyright (C) 1998-2022 Free Software Foundation, Inc.
# This file is part of GDB.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# How to add to gdbarch:
#
# There are four kinds of fields in gdbarch:
#
# * Info - you should never need this; it is only for things that are
# copied directly from the gdbarch_info.
#
# * Value - a variable.
#
# * Function - a function pointer.
#
# * Method - a function pointer, but the function takes a gdbarch as
# its first parameter.
#
# You construct a new one with a call to one of those functions. So,
# for instance, you can use the function named "Value" to make a new
# Value.
#
# All parameters are keyword-only. This is done to help catch typos.
#
# Some parameters are shared among all types (including Info):
#
# * "name" - required, the name of the field.
#
# * "type" - required, the type of the field. For functions and
# methods, this is the return type.
#
# * "printer" - an expression to turn this field into a 'const char
# *'. This is used for dumping. The string must live long enough to
# be passed to printf.
#
# Value, Function, and Method share some more parameters. Some of
# these work in conjunction in a somewhat complicated way, so they are
# described in a separate sub-section below.
#
# * "comment" - a comment that's written to the .h file. Please
# always use this. (It isn't currently a required option for
# historical reasons.)
#
# * "predicate" - a boolean, if True then a _p predicate function will
# be generated. The predicate will use the generic validation
# function for the field. See below.
#
# * "predefault", "postdefault", and "invalid" - These are used for
# the initialization and verification steps:
#
# A gdbarch is zero-initialized. Then, if a field has a pre-default,
# the field is set to that value. After initialization is complete
# (that is, after the tdep code has a chance to change the settings),
# the post-initialization step is done.
#
# There is a generic algorithm to generate a "validation function" for
# all fields. If the field has an "invalid" attribute with a string
# value, then this string is the expression (note that a string-valued
# "invalid" and "predicate" are mutually exclusive; and the case where
# invalid is True means to ignore this field and instead use the
# default checking that is about to be described). Otherwise, if
# there is a "predefault", then the field is valid if it differs from
# the predefault. Otherwise, the check is done against 0 (really NULL
# for function pointers, but same idea).
#
# In post-initialization / validation, there are several cases.
#
# * If "invalid" is False, or if the field specifies "predicate",
# validation is skipped. Otherwise, a validation step is emitted.
#
# * Otherwise, the validity is checked using the usual validation
# function (see above). If the field is considered valid, nothing is
# done.
#
# * Otherwise, the field's value is invalid. If there is a
# "postdefault", then the field is assigned that value.
#
# * Otherwise, the gdbarch will fail validation and gdb will crash.
#
# Function and Method share:
#
# * "params" - required, a tuple of tuples. Each inner tuple is a
# pair of the form (TYPE, NAME), where TYPE is the type of this
# argument, and NAME is the name. Note that while the names could be
# auto-generated, this approach lets the "comment" field refer to
# arguments in a nicer way. It is also just nicer for users.
#
# * "param_checks" - optional, a list of strings. Each string is an
# expression that is placed within a gdb_assert before the call is
# made to the Function/Method implementation. Each expression is
# something that should be true, and it is expected that the
# expression will make use of the parameters named in 'params' (though
# this is not required).
#
# * "result_checks" - optional, a list of strings. Each string is an
# expression that is placed within a gdb_assert after the call to the
# Function/Method implementation. Within each expression the variable
# 'result' can be used to reference the result of the function/method
# implementation. The 'result_checks' can only be used if the 'type'
# of this Function/Method is not 'void'.
Info(
type="const struct bfd_arch_info *",
name="bfd_arch_info",
printer="gdbarch_bfd_arch_info (gdbarch)->printable_name",
)
Info(
type="enum bfd_endian",
name="byte_order",
)
Info(
type="enum bfd_endian",
name="byte_order_for_code",
)
Info(
type="enum gdb_osabi",
name="osabi",
)
Info(
type="const struct target_desc *",
name="target_desc",
printer="host_address_to_string (gdbarch->target_desc)",
)
Value(
comment="""
Number of bits in a short or unsigned short for the target machine.
""",
type="int",
name="short_bit",
predefault="2*TARGET_CHAR_BIT",
invalid=False,
)
int_bit = Value(
comment="""
Number of bits in an int or unsigned int for the target machine.
""",
type="int",
name="int_bit",
predefault="4*TARGET_CHAR_BIT",
invalid=False,
)
long_bit = Value(
comment="""
Number of bits in a long or unsigned long for the target machine.
""",
type="int",
name="long_bit",
predefault="4*TARGET_CHAR_BIT",
invalid=False,
)
Value(
comment="""
Number of bits in a long long or unsigned long long for the target
machine.
""",
type="int",
name="long_long_bit",
predefault="2*" + long_bit.predefault,
invalid=False,
)
Value(
comment="""
The ABI default bit-size and format for "bfloat16", "half", "float", "double", and
"long double". These bit/format pairs should eventually be combined
into a single object. For the moment, just initialize them as a pair.
Each format describes both the big and little endian layouts (if
useful).
""",
type="int",
name="bfloat16_bit",
predefault="2*TARGET_CHAR_BIT",
invalid=False,
)
Value(
type="const struct floatformat **",
name="bfloat16_format",
postdefault="floatformats_bfloat16",
invalid=True,
printer="pformat (gdbarch, gdbarch->bfloat16_format)",
)
Value(
type="int",
name="half_bit",
predefault="2*TARGET_CHAR_BIT",
invalid=False,
)
Value(
type="const struct floatformat **",
name="half_format",
postdefault="floatformats_ieee_half",
invalid=True,
printer="pformat (gdbarch, gdbarch->half_format)",
)
Value(
type="int",
name="float_bit",
predefault="4*TARGET_CHAR_BIT",
invalid=False,
)
Value(
type="const struct floatformat **",
name="float_format",
postdefault="floatformats_ieee_single",
invalid=True,
printer="pformat (gdbarch, gdbarch->float_format)",
)
Value(
type="int",
name="double_bit",
predefault="8*TARGET_CHAR_BIT",
invalid=False,
)
Value(
type="const struct floatformat **",
name="double_format",
postdefault="floatformats_ieee_double",
invalid=True,
printer="pformat (gdbarch, gdbarch->double_format)",
)
Value(
type="int",
name="long_double_bit",
predefault="8*TARGET_CHAR_BIT",
invalid=False,
)
Value(
type="const struct floatformat **",
name="long_double_format",
postdefault="floatformats_ieee_double",
invalid=True,
printer="pformat (gdbarch, gdbarch->long_double_format)",
)
Value(
comment="""
The ABI default bit-size for "wchar_t". wchar_t is a built-in type
starting with C++11.
""",
type="int",
name="wchar_bit",
predefault="4*TARGET_CHAR_BIT",
invalid=False,
)
Value(
comment="""
One if `wchar_t' is signed, zero if unsigned.
""",
type="int",
name="wchar_signed",
predefault="-1",
postdefault="1",
invalid=True,
)
Method(
comment="""
Returns the floating-point format to be used for values of length LENGTH.
NAME, if non-NULL, is the type name, which may be used to distinguish
different target formats of the same length.
""",
type="const struct floatformat **",
name="floatformat_for_type",
params=[("const char *", "name"), ("int", "length")],
predefault="default_floatformat_for_type",
invalid=False,
)
Value(
comment="""
For most targets, a pointer on the target and its representation as an
address in GDB have the same size and "look the same". For such a
target, you need only set gdbarch_ptr_bit and gdbarch_addr_bit
/ addr_bit will be set from it.
If gdbarch_ptr_bit and gdbarch_addr_bit are different, you'll probably
also need to set gdbarch_dwarf2_addr_size, gdbarch_pointer_to_address and
gdbarch_address_to_pointer as well.
ptr_bit is the size of a pointer on the target
""",
type="int",
name="ptr_bit",
predefault=int_bit.predefault,
invalid=False,
)
Value(
comment="""
addr_bit is the size of a target address as represented in gdb
""",
type="int",
name="addr_bit",
predefault="0",
postdefault="gdbarch_ptr_bit (gdbarch)",
invalid=True,
)
Value(
comment="""
dwarf2_addr_size is the target address size as used in the Dwarf debug
info. For .debug_frame FDEs, this is supposed to be the target address
size from the associated CU header, and which is equivalent to the
DWARF2_ADDR_SIZE as defined by the target specific GCC back-end.
Unfortunately there is no good way to determine this value. Therefore
dwarf2_addr_size simply defaults to the target pointer size.
dwarf2_addr_size is not used for .eh_frame FDEs, which are generally
defined using the target's pointer size so far.
Note that dwarf2_addr_size only needs to be redefined by a target if the
GCC back-end defines a DWARF2_ADDR_SIZE other than the target pointer size,
and if Dwarf versions < 4 need to be supported.
""",
type="int",
name="dwarf2_addr_size",
predefault="0",
postdefault="gdbarch_ptr_bit (gdbarch) / TARGET_CHAR_BIT",
invalid=True,
)
Value(
comment="""
One if `char' acts like `signed char', zero if `unsigned char'.
""",
type="int",
name="char_signed",
predefault="-1",
postdefault="1",
invalid=True,
)
Function(
type="CORE_ADDR",
name="read_pc",
params=[("readable_regcache *", "regcache")],
predicate=True,
invalid=True,
)
Function(
type="void",
name="write_pc",
params=[("struct regcache *", "regcache"), ("CORE_ADDR", "val")],
predicate=True,
invalid=True,
)
Method(
comment="""
Function for getting target's idea of a frame pointer. FIXME: GDB's
whole scheme for dealing with "frames" and "frame pointers" needs a
serious shakedown.
""",
type="void",
name="virtual_frame_pointer",
params=[
("CORE_ADDR", "pc"),
("int *", "frame_regnum"),
("LONGEST *", "frame_offset"),
],
predefault="legacy_virtual_frame_pointer",
invalid=False,
)
Method(
type="enum register_status",
name="pseudo_register_read",
params=[
("readable_regcache *", "regcache"),
("int", "cookednum"),
("gdb_byte *", "buf"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
Read a register into a new struct value. If the register is wholly
or partly unavailable, this should call mark_value_bytes_unavailable
as appropriate. If this is defined, then pseudo_register_read will
never be called.
""",
type="struct value *",
name="pseudo_register_read_value",
params=[("readable_regcache *", "regcache"), ("int", "cookednum")],
predicate=True,
invalid=True,
)
Method(
type="void",
name="pseudo_register_write",
params=[
("struct regcache *", "regcache"),
("int", "cookednum"),
("const gdb_byte *", "buf"),
],
predicate=True,
invalid=True,
)
Value(
type="int",
name="num_regs",
predefault="-1",
invalid=True,
)
Value(
comment="""
This macro gives the number of pseudo-registers that live in the
register namespace but do not get fetched or stored on the target.
These pseudo-registers may be aliases for other registers,
combinations of other registers, or they may be computed by GDB.
""",
type="int",
name="num_pseudo_regs",
predefault="0",
invalid=False,
)
Method(
comment="""
Assemble agent expression bytecode to collect pseudo-register REG.
Return -1 if something goes wrong, 0 otherwise.
""",
type="int",
name="ax_pseudo_register_collect",
params=[("struct agent_expr *", "ax"), ("int", "reg")],
predicate=True,
invalid=True,
)
Method(
comment="""
Assemble agent expression bytecode to push the value of pseudo-register
REG on the interpreter stack.
Return -1 if something goes wrong, 0 otherwise.
""",
type="int",
name="ax_pseudo_register_push_stack",
params=[("struct agent_expr *", "ax"), ("int", "reg")],
predicate=True,
invalid=True,
)
Method(
comment="""
Some architectures can display additional information for specific
signals.
UIOUT is the output stream where the handler will place information.
""",
type="void",
name="report_signal_info",
params=[("struct ui_out *", "uiout"), ("enum gdb_signal", "siggnal")],
predicate=True,
invalid=True,
)
Value(
comment="""
GDB's standard (or well known) register numbers. These can map onto
a real register or a pseudo (computed) register or not be defined at
all (-1).
gdbarch_sp_regnum will hopefully be replaced by UNWIND_SP.
""",
type="int",
name="sp_regnum",
predefault="-1",
invalid=False,
)
Value(
type="int",
name="pc_regnum",
predefault="-1",
invalid=False,
)
Value(
type="int",
name="ps_regnum",
predefault="-1",
invalid=False,
)
Value(
type="int",
name="fp0_regnum",
predefault="-1",
invalid=False,
)
Method(
comment="""
Convert stab register number (from `r' declaration) to a gdb REGNUM.
""",
type="int",
name="stab_reg_to_regnum",
params=[("int", "stab_regnr")],
predefault="no_op_reg_to_regnum",
invalid=False,
)
Method(
comment="""
Provide a default mapping from a ecoff register number to a gdb REGNUM.
""",
type="int",
name="ecoff_reg_to_regnum",
params=[("int", "ecoff_regnr")],
predefault="no_op_reg_to_regnum",
invalid=False,
)
Method(
comment="""
Convert from an sdb register number to an internal gdb register number.
""",
type="int",
name="sdb_reg_to_regnum",
params=[("int", "sdb_regnr")],
predefault="no_op_reg_to_regnum",
invalid=False,
)
Method(
comment="""
Provide a default mapping from a DWARF2 register number to a gdb REGNUM.
Return -1 for bad REGNUM. Note: Several targets get this wrong.
""",
type="int",
name="dwarf2_reg_to_regnum",
params=[("int", "dwarf2_regnr")],
predefault="no_op_reg_to_regnum",
invalid=False,
)
Method(
comment="""
Return the name of register REGNR for the specified architecture.
REGNR can be any value greater than, or equal to zero, and less than
'gdbarch_num_cooked_regs (GDBARCH)'. If REGNR is not supported for
GDBARCH, then this function will return an empty string, this function
should never return nullptr.
""",
type="const char *",
name="register_name",
params=[("int", "regnr")],
param_checks=["regnr >= 0", "regnr < gdbarch_num_cooked_regs (gdbarch)"],
result_checks=["result != nullptr"],
predefault="0",
invalid=True,
)
Method(
comment="""
Return the type of a register specified by the architecture. Only
the register cache should call this function directly; others should
use "register_type".
""",
type="struct type *",
name="register_type",
params=[("int", "reg_nr")],
invalid=True,
)
Method(
comment="""
Generate a dummy frame_id for THIS_FRAME assuming that the frame is
a dummy frame. A dummy frame is created before an inferior call,
the frame_id returned here must match the frame_id that was built
for the inferior call. Usually this means the returned frame_id's
stack address should match the address returned by
gdbarch_push_dummy_call, and the returned frame_id's code address
should match the address at which the breakpoint was set in the dummy
frame.
""",
type="struct frame_id",
name="dummy_id",
params=[("frame_info_ptr", "this_frame")],
predefault="default_dummy_id",
invalid=False,
)
Value(
comment="""
Implement DUMMY_ID and PUSH_DUMMY_CALL, then delete
deprecated_fp_regnum.
""",
type="int",
name="deprecated_fp_regnum",
predefault="-1",
invalid=False,
)
Method(
type="CORE_ADDR",
name="push_dummy_call",
params=[
("struct value *", "function"),
("struct regcache *", "regcache"),
("CORE_ADDR", "bp_addr"),
("int", "nargs"),
("struct value **", "args"),
("CORE_ADDR", "sp"),
("function_call_return_method", "return_method"),
("CORE_ADDR", "struct_addr"),
],
predicate=True,
invalid=True,
)
Value(
type="enum call_dummy_location_type",
name="call_dummy_location",
predefault="AT_ENTRY_POINT",
invalid=False,
)
Method(
type="CORE_ADDR",
name="push_dummy_code",
params=[
("CORE_ADDR", "sp"),
("CORE_ADDR", "funaddr"),
("struct value **", "args"),
("int", "nargs"),
("struct type *", "value_type"),
("CORE_ADDR *", "real_pc"),
("CORE_ADDR *", "bp_addr"),
("struct regcache *", "regcache"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
Return true if the code of FRAME is writable.
""",
type="int",
name="code_of_frame_writable",
params=[("frame_info_ptr", "frame")],
predefault="default_code_of_frame_writable",
invalid=False,
)
Method(
type="void",
name="print_registers_info",
params=[
("struct ui_file *", "file"),
("frame_info_ptr", "frame"),
("int", "regnum"),
("int", "all"),
],
predefault="default_print_registers_info",
invalid=False,
)
Method(
type="void",
name="print_float_info",
params=[
("struct ui_file *", "file"),
("frame_info_ptr", "frame"),
("const char *", "args"),
],
predefault="default_print_float_info",
invalid=False,
)
Method(
type="void",
name="print_vector_info",
params=[
("struct ui_file *", "file"),
("frame_info_ptr", "frame"),
("const char *", "args"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
MAP a GDB RAW register number onto a simulator register number. See
also include/...-sim.h.
""",
type="int",
name="register_sim_regno",
params=[("int", "reg_nr")],
predefault="legacy_register_sim_regno",
invalid=False,
)
Method(
type="int",
name="cannot_fetch_register",
params=[("int", "regnum")],
predefault="cannot_register_not",
invalid=False,
)
Method(
type="int",
name="cannot_store_register",
params=[("int", "regnum")],
predefault="cannot_register_not",
invalid=False,
)
Function(
comment="""
Determine the address where a longjmp will land and save this address
in PC. Return nonzero on success.
FRAME corresponds to the longjmp frame.
""",
type="int",
name="get_longjmp_target",
params=[("frame_info_ptr", "frame"), ("CORE_ADDR *", "pc")],
predicate=True,
invalid=True,
)
Value(
type="int",
name="believe_pcc_promotion",
invalid=False,
)
Method(
type="int",
name="convert_register_p",
params=[("int", "regnum"), ("struct type *", "type")],
predefault="generic_convert_register_p",
invalid=False,
)
Function(
type="int",
name="register_to_value",
params=[
("frame_info_ptr", "frame"),
("int", "regnum"),
("struct type *", "type"),
("gdb_byte *", "buf"),
("int *", "optimizedp"),
("int *", "unavailablep"),
],
invalid=False,
)
Function(
type="void",
name="value_to_register",
params=[
("frame_info_ptr", "frame"),
("int", "regnum"),
("struct type *", "type"),
("const gdb_byte *", "buf"),
],
invalid=False,
)
Method(
comment="""
Construct a value representing the contents of register REGNUM in
frame FRAME_ID, interpreted as type TYPE. The routine needs to
allocate and return a struct value with all value attributes
(but not the value contents) filled in.
""",
type="struct value *",
name="value_from_register",
params=[
("struct type *", "type"),
("int", "regnum"),
("struct frame_id", "frame_id"),
],
predefault="default_value_from_register",
invalid=False,
)
Method(
type="CORE_ADDR",
name="pointer_to_address",
params=[("struct type *", "type"), ("const gdb_byte *", "buf")],
predefault="unsigned_pointer_to_address",
invalid=False,
)
Method(
type="void",
name="address_to_pointer",
params=[("struct type *", "type"), ("gdb_byte *", "buf"), ("CORE_ADDR", "addr")],
predefault="unsigned_address_to_pointer",
invalid=False,
)
Method(
type="CORE_ADDR",
name="integer_to_address",
params=[("struct type *", "type"), ("const gdb_byte *", "buf")],
predicate=True,
invalid=True,
)
Method(
comment="""
Return the return-value convention that will be used by FUNCTION
to return a value of type VALTYPE. FUNCTION may be NULL in which
case the return convention is computed based only on VALTYPE.
If READBUF is not NULL, extract the return value and save it in this buffer.
If WRITEBUF is not NULL, it contains a return value which will be
stored into the appropriate register. This can be used when we want
to force the value returned by a function (see the "return" command
for instance).
""",
type="enum return_value_convention",
name="return_value",
params=[
("struct value *", "function"),
("struct type *", "valtype"),
("struct regcache *", "regcache"),
("gdb_byte *", "readbuf"),
("const gdb_byte *", "writebuf"),
],
predicate=True,
invalid=True,
)
Function(
comment="""
Return the address at which the value being returned from
the current function will be stored. This routine is only
called if the current function uses the the "struct return
convention".
May return 0 when unable to determine that address.""",
type="CORE_ADDR",
name="get_return_buf_addr",
params=[("struct type *", "val_type"), ("frame_info_ptr", "cur_frame")],
predefault="default_get_return_buf_addr",
invalid=False,
)
Method(
comment="""
Return true if the return value of function is stored in the first hidden
parameter. In theory, this feature should be language-dependent, specified
by language and its ABI, such as C++. Unfortunately, compiler may
implement it to a target-dependent feature. So that we need such hook here
to be aware of this in GDB.
""",
type="int",
name="return_in_first_hidden_param_p",
params=[("struct type *", "type")],
predefault="default_return_in_first_hidden_param_p",
invalid=False,
)
Method(
type="CORE_ADDR",
name="skip_prologue",
params=[("CORE_ADDR", "ip")],
predefault="0",
invalid=True,
)
Method(
type="CORE_ADDR",
name="skip_main_prologue",
params=[("CORE_ADDR", "ip")],
predicate=True,
invalid=True,
)
Method(
comment="""
On some platforms, a single function may provide multiple entry points,
e.g. one that is used for function-pointer calls and a different one
that is used for direct function calls.
In order to ensure that breakpoints set on the function will trigger
no matter via which entry point the function is entered, a platform
may provide the skip_entrypoint callback. It is called with IP set
to the main entry point of a function (as determined by the symbol table),
and should return the address of the innermost entry point, where the
actual breakpoint needs to be set. Note that skip_entrypoint is used
by GDB common code even when debugging optimized code, where skip_prologue
is not used.
""",
type="CORE_ADDR",
name="skip_entrypoint",
params=[("CORE_ADDR", "ip")],
predicate=True,
invalid=True,
)
Function(
type="int",
name="inner_than",
params=[("CORE_ADDR", "lhs"), ("CORE_ADDR", "rhs")],
predefault="0",
invalid=True,
)
Method(
type="const gdb_byte *",
name="breakpoint_from_pc",
params=[("CORE_ADDR *", "pcptr"), ("int *", "lenptr")],
predefault="default_breakpoint_from_pc",
invalid=False,
)
Method(
comment="""
Return the breakpoint kind for this target based on *PCPTR.
""",
type="int",
name="breakpoint_kind_from_pc",
params=[("CORE_ADDR *", "pcptr")],
predefault="0",
invalid=True,
)
Method(
comment="""
Return the software breakpoint from KIND. KIND can have target
specific meaning like the Z0 kind parameter.
SIZE is set to the software breakpoint's length in memory.
""",
type="const gdb_byte *",
name="sw_breakpoint_from_kind",
params=[("int", "kind"), ("int *", "size")],
predefault="NULL",
invalid=False,
)
Method(
comment="""
Return the breakpoint kind for this target based on the current
processor state (e.g. the current instruction mode on ARM) and the
*PCPTR. In default, it is gdbarch->breakpoint_kind_from_pc.
""",
type="int",
name="breakpoint_kind_from_current_state",
params=[("struct regcache *", "regcache"), ("CORE_ADDR *", "pcptr")],
predefault="default_breakpoint_kind_from_current_state",
invalid=False,
)
Method(
type="CORE_ADDR",
name="adjust_breakpoint_address",
params=[("CORE_ADDR", "bpaddr")],
predicate=True,
invalid=True,
)
Method(
type="int",
name="memory_insert_breakpoint",
params=[("struct bp_target_info *", "bp_tgt")],
predefault="default_memory_insert_breakpoint",
invalid=False,
)
Method(
type="int",
name="memory_remove_breakpoint",
params=[("struct bp_target_info *", "bp_tgt")],
predefault="default_memory_remove_breakpoint",
invalid=False,
)
Value(
type="CORE_ADDR",
name="decr_pc_after_break",
invalid=False,
)
Value(
comment="""
A function can be addressed by either it's "pointer" (possibly a
descriptor address) or "entry point" (first executable instruction).
The method "convert_from_func_ptr_addr" converting the former to the
latter. gdbarch_deprecated_function_start_offset is being used to implement
a simplified subset of that functionality - the function's address
corresponds to the "function pointer" and the function's start
corresponds to the "function entry point" - and hence is redundant.
""",
type="CORE_ADDR",
name="deprecated_function_start_offset",
invalid=False,
)
Method(
comment="""
Return the remote protocol register number associated with this
register. Normally the identity mapping.
""",
type="int",
name="remote_register_number",
params=[("int", "regno")],
predefault="default_remote_register_number",
invalid=False,
)
Function(
comment="""
Fetch the target specific address used to represent a load module.
""",
type="CORE_ADDR",
name="fetch_tls_load_module_address",
params=[("struct objfile *", "objfile")],
predicate=True,
invalid=True,
)
Method(
comment="""
Return the thread-local address at OFFSET in the thread-local
storage for the thread PTID and the shared library or executable
file given by LM_ADDR. If that block of thread-local storage hasn't
been allocated yet, this function may throw an error. LM_ADDR may
be zero for statically linked multithreaded inferiors.
""",
type="CORE_ADDR",
name="get_thread_local_address",
params=[("ptid_t", "ptid"), ("CORE_ADDR", "lm_addr"), ("CORE_ADDR", "offset")],
predicate=True,
invalid=True,
)
Value(
type="CORE_ADDR",
name="frame_args_skip",
invalid=False,
)
Method(
type="CORE_ADDR",
name="unwind_pc",
params=[("frame_info_ptr", "next_frame")],
predefault="default_unwind_pc",
invalid=False,
)
Method(
type="CORE_ADDR",
name="unwind_sp",
params=[("frame_info_ptr", "next_frame")],
predefault="default_unwind_sp",
invalid=False,
)
Function(
comment="""
DEPRECATED_FRAME_LOCALS_ADDRESS as been replaced by the per-frame
frame-base. Enable frame-base before frame-unwind.
""",
type="int",
name="frame_num_args",
params=[("frame_info_ptr", "frame")],
predicate=True,
invalid=True,
)
Method(
type="CORE_ADDR",
name="frame_align",
params=[("CORE_ADDR", "address")],
predicate=True,
invalid=True,
)
Method(
type="int",
name="stabs_argument_has_addr",
params=[("struct type *", "type")],
predefault="default_stabs_argument_has_addr",
invalid=False,
)
Value(
type="int",
name="frame_red_zone_size",
invalid=False,
)
Method(
type="CORE_ADDR",
name="convert_from_func_ptr_addr",
params=[("CORE_ADDR", "addr"), ("struct target_ops *", "targ")],
predefault="convert_from_func_ptr_addr_identity",
invalid=False,
)
Method(
comment="""
On some machines there are bits in addresses which are not really
part of the address, but are used by the kernel, the hardware, etc.
for special purposes. gdbarch_addr_bits_remove takes out any such bits so
we get a "real" address such as one would find in a symbol table.
This is used only for addresses of instructions, and even then I'm
not sure it's used in all contexts. It exists to deal with there
being a few stray bits in the PC which would mislead us, not as some
sort of generic thing to handle alignment or segmentation (it's
possible it should be in TARGET_READ_PC instead).
""",
type="CORE_ADDR",
name="addr_bits_remove",
params=[("CORE_ADDR", "addr")],
predefault="core_addr_identity",
invalid=False,
)
Value(
comment="""
On some machines, not all bits of an address word are significant.
For example, on AArch64, the top bits of an address known as the "tag"
are ignored by the kernel, the hardware, etc. and can be regarded as
additional data associated with the address.
""",
type="int",
name="significant_addr_bit",
invalid=False,
)
Method(
comment="""
Return a string representation of the memory tag TAG.
""",
type="std::string",
name="memtag_to_string",
params=[("struct value *", "tag")],
predefault="default_memtag_to_string",
invalid=False,
)
Method(
comment="""
Return true if ADDRESS contains a tag and false otherwise. ADDRESS
must be either a pointer or a reference type.
""",
type="bool",
name="tagged_address_p",
params=[("struct value *", "address")],
predefault="default_tagged_address_p",
invalid=False,
)
Method(
comment="""
Return true if the tag from ADDRESS matches the memory tag for that
particular address. Return false otherwise.
""",
type="bool",
name="memtag_matches_p",
params=[("struct value *", "address")],
predefault="default_memtag_matches_p",
invalid=False,
)
Method(
comment="""
Set the tags of type TAG_TYPE, for the memory address range
[ADDRESS, ADDRESS + LENGTH) to TAGS.
Return true if successful and false otherwise.
""",
type="bool",
name="set_memtags",
params=[
("struct value *", "address"),
("size_t", "length"),
("const gdb::byte_vector &", "tags"),
("memtag_type", "tag_type"),
],
predefault="default_set_memtags",
invalid=False,
)
Method(
comment="""
Return the tag of type TAG_TYPE associated with the memory address ADDRESS,
assuming ADDRESS is tagged.
""",
type="struct value *",
name="get_memtag",
params=[("struct value *", "address"), ("memtag_type", "tag_type")],
predefault="default_get_memtag",
invalid=False,
)
Value(
comment="""
memtag_granule_size is the size of the allocation tag granule, for
architectures that support memory tagging.
This is 0 for architectures that do not support memory tagging.
For a non-zero value, this represents the number of bytes of memory per tag.
""",
type="CORE_ADDR",
name="memtag_granule_size",
invalid=False,
)
Function(
comment="""
FIXME/cagney/2001-01-18: This should be split in two. A target method that
indicates if the target needs software single step. An ISA method to
implement it.
FIXME/cagney/2001-01-18: The logic is backwards. It should be asking if the
target can single step. If not, then implement single step using breakpoints.
Return a vector of addresses on which the software single step
breakpoints should be inserted. NULL means software single step is
not used.
Multiple breakpoints may be inserted for some instructions such as
conditional branch. However, each implementation must always evaluate
the condition and only put the breakpoint at the branch destination if
the condition is true, so that we ensure forward progress when stepping
past a conditional branch to self.
""",
type="std::vector<CORE_ADDR>",
name="software_single_step",
params=[("struct regcache *", "regcache")],
predicate=True,
invalid=True,
)
Method(
comment="""
Return non-zero if the processor is executing a delay slot and a
further single-step is needed before the instruction finishes.
""",
type="int",
name="single_step_through_delay",
params=[("frame_info_ptr", "frame")],
predicate=True,
invalid=True,
)
Function(
comment="""
FIXME: cagney/2003-08-28: Need to find a better way of selecting the
disassembler. Perhaps objdump can handle it?
""",
type="int",
name="print_insn",
params=[("bfd_vma", "vma"), ("struct disassemble_info *", "info")],
predefault="default_print_insn",
invalid=False,
)
Function(
type="CORE_ADDR",
name="skip_trampoline_code",
params=[("frame_info_ptr", "frame"), ("CORE_ADDR", "pc")],
predefault="generic_skip_trampoline_code",
invalid=False,
)
Value(
comment="Vtable of solib operations functions.",
type="const struct target_so_ops *",
name="so_ops",
postdefault="&solib_target_so_ops",
printer="host_address_to_string (gdbarch->so_ops)",
)
Method(
comment="""
If in_solib_dynsym_resolve_code() returns true, and SKIP_SOLIB_RESOLVER
evaluates non-zero, this is the address where the debugger will place
a step-resume breakpoint to get us past the dynamic linker.
""",
type="CORE_ADDR",
name="skip_solib_resolver",
params=[("CORE_ADDR", "pc")],
predefault="generic_skip_solib_resolver",
invalid=False,
)
Method(
comment="""
Some systems also have trampoline code for returning from shared libs.
""",
type="int",
name="in_solib_return_trampoline",
params=[("CORE_ADDR", "pc"), ("const char *", "name")],
predefault="generic_in_solib_return_trampoline",
invalid=False,
)
Method(
comment="""
Return true if PC lies inside an indirect branch thunk.
""",
type="bool",
name="in_indirect_branch_thunk",
params=[("CORE_ADDR", "pc")],
predefault="default_in_indirect_branch_thunk",
invalid=False,
)
Method(
comment="""
A target might have problems with watchpoints as soon as the stack
frame of the current function has been destroyed. This mostly happens
as the first action in a function's epilogue. stack_frame_destroyed_p()
is defined to return a non-zero value if either the given addr is one
instruction after the stack destroying instruction up to the trailing
return instruction or if we can figure out that the stack frame has
already been invalidated regardless of the value of addr. Targets
which don't suffer from that problem could just let this functionality
untouched.
""",
type="int",
name="stack_frame_destroyed_p",
params=[("CORE_ADDR", "addr")],
predefault="generic_stack_frame_destroyed_p",
invalid=False,
)
Function(
comment="""
Process an ELF symbol in the minimal symbol table in a backend-specific
way. Normally this hook is supposed to do nothing, however if required,
then this hook can be used to apply tranformations to symbols that are
considered special in some way. For example the MIPS backend uses it
to interpret `st_other' information to mark compressed code symbols so
that they can be treated in the appropriate manner in the processing of
the main symbol table and DWARF-2 records.
""",
type="void",
name="elf_make_msymbol_special",
params=[("asymbol *", "sym"), ("struct minimal_symbol *", "msym")],
predicate=True,
invalid=True,
)
Function(
type="void",
name="coff_make_msymbol_special",
params=[("int", "val"), ("struct minimal_symbol *", "msym")],
predefault="default_coff_make_msymbol_special",
invalid=False,
)
Function(
comment="""
Process a symbol in the main symbol table in a backend-specific way.
Normally this hook is supposed to do nothing, however if required,
then this hook can be used to apply tranformations to symbols that
are considered special in some way. This is currently used by the
MIPS backend to make sure compressed code symbols have the ISA bit
set. This in turn is needed for symbol values seen in GDB to match
the values used at the runtime by the program itself, for function
and label references.
""",
type="void",
name="make_symbol_special",
params=[("struct symbol *", "sym"), ("struct objfile *", "objfile")],
predefault="default_make_symbol_special",
invalid=False,
)
Function(
comment="""
Adjust the address retrieved from a DWARF-2 record other than a line
entry in a backend-specific way. Normally this hook is supposed to
return the address passed unchanged, however if that is incorrect for
any reason, then this hook can be used to fix the address up in the
required manner. This is currently used by the MIPS backend to make
sure addresses in FDE, range records, etc. referring to compressed
code have the ISA bit set, matching line information and the symbol
table.
""",
type="CORE_ADDR",
name="adjust_dwarf2_addr",
params=[("CORE_ADDR", "pc")],
predefault="default_adjust_dwarf2_addr",
invalid=False,
)
Function(
comment="""
Adjust the address updated by a line entry in a backend-specific way.
Normally this hook is supposed to return the address passed unchanged,
however in the case of inconsistencies in these records, this hook can
be used to fix them up in the required manner. This is currently used
by the MIPS backend to make sure all line addresses in compressed code
are presented with the ISA bit set, which is not always the case. This
in turn ensures breakpoint addresses are correctly matched against the
stop PC.
""",
type="CORE_ADDR",
name="adjust_dwarf2_line",
params=[("CORE_ADDR", "addr"), ("int", "rel")],
predefault="default_adjust_dwarf2_line",
invalid=False,
)
Value(
type="int",
name="cannot_step_breakpoint",
predefault="0",
invalid=False,
)
Value(
comment="""
See comment in target.h about continuable, steppable and
non-steppable watchpoints.
""",
type="int",
name="have_nonsteppable_watchpoint",
predefault="0",
invalid=False,
)
Function(
type="type_instance_flags",
name="address_class_type_flags",
params=[("int", "byte_size"), ("int", "dwarf2_addr_class")],
predicate=True,
invalid=True,
)
Method(
type="const char *",
name="address_class_type_flags_to_name",
params=[("type_instance_flags", "type_flags")],
predicate=True,
invalid=True,
)
Method(
comment="""
Execute vendor-specific DWARF Call Frame Instruction. OP is the instruction.
FS are passed from the generic execute_cfa_program function.
""",
type="bool",
name="execute_dwarf_cfa_vendor_op",
params=[("gdb_byte", "op"), ("struct dwarf2_frame_state *", "fs")],
predefault="default_execute_dwarf_cfa_vendor_op",
invalid=False,
)
Method(
comment="""
Return the appropriate type_flags for the supplied address class.
This function should return true if the address class was recognized and
type_flags was set, false otherwise.
""",
type="bool",
name="address_class_name_to_type_flags",
params=[("const char *", "name"), ("type_instance_flags *", "type_flags_ptr")],
predicate=True,
invalid=True,
)
Method(
comment="""
Is a register in a group
""",
type="int",
name="register_reggroup_p",
params=[("int", "regnum"), ("const struct reggroup *", "reggroup")],
predefault="default_register_reggroup_p",
invalid=False,
)
Function(
comment="""
Fetch the pointer to the ith function argument.
""",
type="CORE_ADDR",
name="fetch_pointer_argument",
params=[
("frame_info_ptr", "frame"),
("int", "argi"),
("struct type *", "type"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
Iterate over all supported register notes in a core file. For each
supported register note section, the iterator must call CB and pass
CB_DATA unchanged. If REGCACHE is not NULL, the iterator can limit
the supported register note sections based on the current register
values. Otherwise it should enumerate all supported register note
sections.
""",
type="void",
name="iterate_over_regset_sections",
params=[
("iterate_over_regset_sections_cb *", "cb"),
("void *", "cb_data"),
("const struct regcache *", "regcache"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
Create core file notes
""",
type="gdb::unique_xmalloc_ptr<char>",
name="make_corefile_notes",
params=[("bfd *", "obfd"), ("int *", "note_size")],
predicate=True,
invalid=True,
)
Method(
comment="""
Find core file memory regions
""",
type="int",
name="find_memory_regions",
params=[("find_memory_region_ftype", "func"), ("void *", "data")],
predicate=True,
invalid=True,
)
Method(
comment="""
Given a bfd OBFD, segment ADDRESS and SIZE, create a memory tag section to be dumped to a core file
""",
type="asection *",
name="create_memtag_section",
params=[("bfd *", "obfd"), ("CORE_ADDR", "address"), ("size_t", "size")],
predicate=True,
invalid=True,
)
Method(
comment="""
Given a memory tag section OSEC, fill OSEC's contents with the appropriate tag data
""",
type="bool",
name="fill_memtag_section",
params=[("asection *", "osec")],
predicate=True,
invalid=True,
)
Method(
comment="""
Decode a memory tag SECTION and return the tags of type TYPE contained in
the memory range [ADDRESS, ADDRESS + LENGTH).
If no tags were found, return an empty vector.
""",
type="gdb::byte_vector",
name="decode_memtag_section",
params=[
("bfd_section *", "section"),
("int", "type"),
("CORE_ADDR", "address"),
("size_t", "length"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
Read offset OFFSET of TARGET_OBJECT_LIBRARIES formatted shared libraries list from
core file into buffer READBUF with length LEN. Return the number of bytes read
(zero indicates failure).
failed, otherwise, return the red length of READBUF.
""",
type="ULONGEST",
name="core_xfer_shared_libraries",
params=[("gdb_byte *", "readbuf"), ("ULONGEST", "offset"), ("ULONGEST", "len")],
predicate=True,
invalid=True,
)
Method(
comment="""
Read offset OFFSET of TARGET_OBJECT_LIBRARIES_AIX formatted shared
libraries list from core file into buffer READBUF with length LEN.
Return the number of bytes read (zero indicates failure).
""",
type="ULONGEST",
name="core_xfer_shared_libraries_aix",
params=[("gdb_byte *", "readbuf"), ("ULONGEST", "offset"), ("ULONGEST", "len")],
predicate=True,
invalid=True,
)
Method(
comment="""
How the core target converts a PTID from a core file to a string.
""",
type="std::string",
name="core_pid_to_str",
params=[("ptid_t", "ptid")],
predicate=True,
invalid=True,
)
Method(
comment="""
How the core target extracts the name of a thread from a core file.
""",
type="const char *",
name="core_thread_name",
params=[("struct thread_info *", "thr")],
predicate=True,
invalid=True,
)
Method(
comment="""
Read offset OFFSET of TARGET_OBJECT_SIGNAL_INFO signal information
from core file into buffer READBUF with length LEN. Return the number
of bytes read (zero indicates EOF, a negative value indicates failure).
""",
type="LONGEST",
name="core_xfer_siginfo",
params=[("gdb_byte *", "readbuf"), ("ULONGEST", "offset"), ("ULONGEST", "len")],
predicate=True,
invalid=True,
)
Value(
comment="""
BFD target to use when generating a core file.
""",
type="const char *",
name="gcore_bfd_target",
predicate=True,
predefault="0",
invalid=True,
printer="pstring (gdbarch->gcore_bfd_target)",
)
Value(
comment="""
If the elements of C++ vtables are in-place function descriptors rather
than normal function pointers (which may point to code or a descriptor),
set this to one.
""",
type="int",
name="vtable_function_descriptors",
predefault="0",
invalid=False,
)
Value(
comment="""
Set if the least significant bit of the delta is used instead of the least
significant bit of the pfn for pointers to virtual member functions.
""",
type="int",
name="vbit_in_delta",
predefault="0",
invalid=False,
)
Function(
comment="""
Advance PC to next instruction in order to skip a permanent breakpoint.
""",
type="void",
name="skip_permanent_breakpoint",
params=[("struct regcache *", "regcache")],
predefault="default_skip_permanent_breakpoint",
invalid=False,
)
Value(
comment="""
The maximum length of an instruction on this architecture in bytes.
""",
type="ULONGEST",
name="max_insn_length",
predicate=True,
predefault="0",
invalid=True,
)
Method(
comment="""
Copy the instruction at FROM to TO, and make any adjustments
necessary to single-step it at that address.
REGS holds the state the thread's registers will have before
executing the copied instruction; the PC in REGS will refer to FROM,
not the copy at TO. The caller should update it to point at TO later.
Return a pointer to data of the architecture's choice to be passed
to gdbarch_displaced_step_fixup.
For a general explanation of displaced stepping and how GDB uses it,
see the comments in infrun.c.
The TO area is only guaranteed to have space for
gdbarch_max_insn_length (arch) bytes, so this function must not
write more bytes than that to that area.
If you do not provide this function, GDB assumes that the
architecture does not support displaced stepping.
If the instruction cannot execute out of line, return NULL. The
core falls back to stepping past the instruction in-line instead in
that case.
""",
type="displaced_step_copy_insn_closure_up",
name="displaced_step_copy_insn",
params=[("CORE_ADDR", "from"), ("CORE_ADDR", "to"), ("struct regcache *", "regs")],
predicate=True,
invalid=True,
)
Method(
comment="""
Return true if GDB should use hardware single-stepping to execute a displaced
step instruction. If false, GDB will simply restart execution at the
displaced instruction location, and it is up to the target to ensure GDB will
receive control again (e.g. by placing a software breakpoint instruction into
the displaced instruction buffer).
The default implementation returns false on all targets that provide a
gdbarch_software_single_step routine, and true otherwise.
""",
type="bool",
name="displaced_step_hw_singlestep",
params=[],
predefault="default_displaced_step_hw_singlestep",
invalid=False,
)
Method(
comment="""
Fix up the state resulting from successfully single-stepping a
displaced instruction, to give the result we would have gotten from
stepping the instruction in its original location.
REGS is the register state resulting from single-stepping the
displaced instruction.
CLOSURE is the result from the matching call to
gdbarch_displaced_step_copy_insn.
If you provide gdbarch_displaced_step_copy_insn.but not this
function, then GDB assumes that no fixup is needed after
single-stepping the instruction.
For a general explanation of displaced stepping and how GDB uses it,
see the comments in infrun.c.
""",
type="void",
name="displaced_step_fixup",
params=[
("struct displaced_step_copy_insn_closure *", "closure"),
("CORE_ADDR", "from"),
("CORE_ADDR", "to"),
("struct regcache *", "regs"),
],
predicate=True,
predefault="NULL",
invalid=True,
)
Method(
comment="""
Prepare THREAD for it to displaced step the instruction at its current PC.
Throw an exception if any unexpected error happens.
""",
type="displaced_step_prepare_status",
name="displaced_step_prepare",
params=[("thread_info *", "thread"), ("CORE_ADDR &", "displaced_pc")],
predicate=True,
invalid=True,
)
Method(
comment="""
Clean up after a displaced step of THREAD.
""",
type="displaced_step_finish_status",
name="displaced_step_finish",
params=[("thread_info *", "thread"), ("gdb_signal", "sig")],
predefault="NULL",
invalid="(! gdbarch->displaced_step_finish) != (! gdbarch->displaced_step_prepare)",
)
Function(
comment="""
Return the closure associated to the displaced step buffer that is at ADDR.
""",
type="const displaced_step_copy_insn_closure *",
name="displaced_step_copy_insn_closure_by_addr",
params=[("inferior *", "inf"), ("CORE_ADDR", "addr")],
predicate=True,
invalid=True,
)
Function(
comment="""
PARENT_INF has forked and CHILD_PTID is the ptid of the child. Restore the
contents of all displaced step buffers in the child's address space.
""",
type="void",
name="displaced_step_restore_all_in_ptid",
params=[("inferior *", "parent_inf"), ("ptid_t", "child_ptid")],
invalid=False,
)
Method(
comment="""
Relocate an instruction to execute at a different address. OLDLOC
is the address in the inferior memory where the instruction to
relocate is currently at. On input, TO points to the destination
where we want the instruction to be copied (and possibly adjusted)
to. On output, it points to one past the end of the resulting
instruction(s). The effect of executing the instruction at TO shall
be the same as if executing it at FROM. For example, call
instructions that implicitly push the return address on the stack
should be adjusted to return to the instruction after OLDLOC;
relative branches, and other PC-relative instructions need the
offset adjusted; etc.
""",
type="void",
name="relocate_instruction",
params=[("CORE_ADDR *", "to"), ("CORE_ADDR", "from")],
predicate=True,
predefault="NULL",
invalid=True,
)
Function(
comment="""
Refresh overlay mapped state for section OSECT.
""",
type="void",
name="overlay_update",
params=[("struct obj_section *", "osect")],
predicate=True,
invalid=True,
)
Method(
type="const struct target_desc *",
name="core_read_description",
params=[("struct target_ops *", "target"), ("bfd *", "abfd")],
predicate=True,
invalid=True,
)
Value(
comment="""
Set if the address in N_SO or N_FUN stabs may be zero.
""",
type="int",
name="sofun_address_maybe_missing",
predefault="0",
invalid=False,
)
Method(
comment="""
Parse the instruction at ADDR storing in the record execution log
the registers REGCACHE and memory ranges that will be affected when
the instruction executes, along with their current values.
Return -1 if something goes wrong, 0 otherwise.
""",
type="int",
name="process_record",
params=[("struct regcache *", "regcache"), ("CORE_ADDR", "addr")],
predicate=True,
invalid=True,
)
Method(
comment="""
Save process state after a signal.
Return -1 if something goes wrong, 0 otherwise.
""",
type="int",
name="process_record_signal",
params=[("struct regcache *", "regcache"), ("enum gdb_signal", "signal")],
predicate=True,
invalid=True,
)
Method(
comment="""
Signal translation: translate inferior's signal (target's) number
into GDB's representation. The implementation of this method must
be host independent. IOW, don't rely on symbols of the NAT_FILE
header (the nm-*.h files), the host <signal.h> header, or similar
headers. This is mainly used when cross-debugging core files ---
"Live" targets hide the translation behind the target interface
(target_wait, target_resume, etc.).
""",
type="enum gdb_signal",
name="gdb_signal_from_target",
params=[("int", "signo")],
predicate=True,
invalid=True,
)
Method(
comment="""
Signal translation: translate the GDB's internal signal number into
the inferior's signal (target's) representation. The implementation
of this method must be host independent. IOW, don't rely on symbols
of the NAT_FILE header (the nm-*.h files), the host <signal.h>
header, or similar headers.
Return the target signal number if found, or -1 if the GDB internal
signal number is invalid.
""",
type="int",
name="gdb_signal_to_target",
params=[("enum gdb_signal", "signal")],
predicate=True,
invalid=True,
)
Method(
comment="""
Extra signal info inspection.
Return a type suitable to inspect extra signal information.
""",
type="struct type *",
name="get_siginfo_type",
params=[],
predicate=True,
invalid=True,
)
Method(
comment="""
Record architecture-specific information from the symbol table.
""",
type="void",
name="record_special_symbol",
params=[("struct objfile *", "objfile"), ("asymbol *", "sym")],
predicate=True,
invalid=True,
)
Method(
comment="""
Function for the 'catch syscall' feature.
Get architecture-specific system calls information from registers.
""",
type="LONGEST",
name="get_syscall_number",
params=[("thread_info *", "thread")],
predicate=True,
invalid=True,
)
Value(
comment="""
The filename of the XML syscall for this architecture.
""",
type="const char *",
name="xml_syscall_file",
predefault="0",
invalid=False,
printer="pstring (gdbarch->xml_syscall_file)",
)
Value(
comment="""
Information about system calls from this architecture
""",
type="struct syscalls_info *",
name="syscalls_info",
predefault="0",
invalid=False,
printer="host_address_to_string (gdbarch->syscalls_info)",
)
Value(
comment="""
SystemTap related fields and functions.
A NULL-terminated array of prefixes used to mark an integer constant
on the architecture's assembly.
For example, on x86 integer constants are written as:
$10 ;; integer constant 10
in this case, this prefix would be the character `$'.
""",
type="const char *const *",
name="stap_integer_prefixes",
predefault="0",
invalid=False,
printer="pstring_list (gdbarch->stap_integer_prefixes)",
)
Value(
comment="""
A NULL-terminated array of suffixes used to mark an integer constant
on the architecture's assembly.
""",
type="const char *const *",
name="stap_integer_suffixes",
predefault="0",
invalid=False,
printer="pstring_list (gdbarch->stap_integer_suffixes)",
)
Value(
comment="""
A NULL-terminated array of prefixes used to mark a register name on
the architecture's assembly.
For example, on x86 the register name is written as:
%eax ;; register eax
in this case, this prefix would be the character `%'.
""",
type="const char *const *",
name="stap_register_prefixes",
predefault="0",
invalid=False,
printer="pstring_list (gdbarch->stap_register_prefixes)",
)
Value(
comment="""
A NULL-terminated array of suffixes used to mark a register name on
the architecture's assembly.
""",
type="const char *const *",
name="stap_register_suffixes",
predefault="0",
invalid=False,
printer="pstring_list (gdbarch->stap_register_suffixes)",
)
Value(
comment="""
A NULL-terminated array of prefixes used to mark a register
indirection on the architecture's assembly.
For example, on x86 the register indirection is written as:
(%eax) ;; indirecting eax
in this case, this prefix would be the charater `('.
Please note that we use the indirection prefix also for register
displacement, e.g., `4(%eax)' on x86.
""",
type="const char *const *",
name="stap_register_indirection_prefixes",
predefault="0",
invalid=False,
printer="pstring_list (gdbarch->stap_register_indirection_prefixes)",
)
Value(
comment="""
A NULL-terminated array of suffixes used to mark a register
indirection on the architecture's assembly.
For example, on x86 the register indirection is written as:
(%eax) ;; indirecting eax
in this case, this prefix would be the charater `)'.
Please note that we use the indirection suffix also for register
displacement, e.g., `4(%eax)' on x86.
""",
type="const char *const *",
name="stap_register_indirection_suffixes",
predefault="0",
invalid=False,
printer="pstring_list (gdbarch->stap_register_indirection_suffixes)",
)
Value(
comment="""
Prefix(es) used to name a register using GDB's nomenclature.
For example, on PPC a register is represented by a number in the assembly
language (e.g., `10' is the 10th general-purpose register). However,
inside GDB this same register has an `r' appended to its name, so the 10th
register would be represented as `r10' internally.
""",
type="const char *",
name="stap_gdb_register_prefix",
predefault="0",
invalid=False,
printer="pstring (gdbarch->stap_gdb_register_prefix)",
)
Value(
comment="""
Suffix used to name a register using GDB's nomenclature.
""",
type="const char *",
name="stap_gdb_register_suffix",
predefault="0",
invalid=False,
printer="pstring (gdbarch->stap_gdb_register_suffix)",
)
Method(
comment="""
Check if S is a single operand.
Single operands can be:
- Literal integers, e.g. `$10' on x86
- Register access, e.g. `%eax' on x86
- Register indirection, e.g. `(%eax)' on x86
- Register displacement, e.g. `4(%eax)' on x86
This function should check for these patterns on the string
and return 1 if some were found, or zero otherwise. Please try to match
as much info as you can from the string, i.e., if you have to match
something like `(%', do not match just the `('.
""",
type="int",
name="stap_is_single_operand",
params=[("const char *", "s")],
predicate=True,
invalid=True,
)
Method(
comment="""
Function used to handle a "special case" in the parser.
A "special case" is considered to be an unknown token, i.e., a token
that the parser does not know how to parse. A good example of special
case would be ARM's register displacement syntax:
[R0, #4] ;; displacing R0 by 4
Since the parser assumes that a register displacement is of the form:
<number> <indirection_prefix> <register_name> <indirection_suffix>
it means that it will not be able to recognize and parse this odd syntax.
Therefore, we should add a special case function that will handle this token.
This function should generate the proper expression form of the expression
using GDB's internal expression mechanism (e.g., `write_exp_elt_opcode'
and so on). It should also return 1 if the parsing was successful, or zero
if the token was not recognized as a special token (in this case, returning
zero means that the special parser is deferring the parsing to the generic
parser), and should advance the buffer pointer (p->arg).
""",
type="expr::operation_up",
name="stap_parse_special_token",
params=[("struct stap_parse_info *", "p")],
predicate=True,
invalid=True,
)
Method(
comment="""
Perform arch-dependent adjustments to a register name.
In very specific situations, it may be necessary for the register
name present in a SystemTap probe's argument to be handled in a
special way. For example, on i386, GCC may over-optimize the
register allocation and use smaller registers than necessary. In
such cases, the client that is reading and evaluating the SystemTap
probe (ourselves) will need to actually fetch values from the wider
version of the register in question.
To illustrate the example, consider the following probe argument
(i386):
4@%ax
This argument says that its value can be found at the %ax register,
which is a 16-bit register. However, the argument's prefix says
that its type is "uint32_t", which is 32-bit in size. Therefore, in
this case, GDB should actually fetch the probe's value from register
%eax, not %ax. In this scenario, this function would actually
replace the register name from %ax to %eax.
The rationale for this can be found at PR breakpoints/24541.
""",
type="std::string",
name="stap_adjust_register",
params=[
("struct stap_parse_info *", "p"),
("const std::string &", "regname"),
("int", "regnum"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
DTrace related functions.
The expression to compute the NARTGth+1 argument to a DTrace USDT probe.
NARG must be >= 0.
""",
type="expr::operation_up",
name="dtrace_parse_probe_argument",
params=[("int", "narg")],
predicate=True,
invalid=True,
)
Method(
comment="""
True if the given ADDR does not contain the instruction sequence
corresponding to a disabled DTrace is-enabled probe.
""",
type="int",
name="dtrace_probe_is_enabled",
params=[("CORE_ADDR", "addr")],
predicate=True,
invalid=True,
)
Method(
comment="""
Enable a DTrace is-enabled probe at ADDR.
""",
type="void",
name="dtrace_enable_probe",
params=[("CORE_ADDR", "addr")],
predicate=True,
invalid=True,
)
Method(
comment="""
Disable a DTrace is-enabled probe at ADDR.
""",
type="void",
name="dtrace_disable_probe",
params=[("CORE_ADDR", "addr")],
predicate=True,
invalid=True,
)
Value(
comment="""
True if the list of shared libraries is one and only for all
processes, as opposed to a list of shared libraries per inferior.
This usually means that all processes, although may or may not share
an address space, will see the same set of symbols at the same
addresses.
""",
type="int",
name="has_global_solist",
predefault="0",
invalid=False,
)
Value(
comment="""
On some targets, even though each inferior has its own private
address space, the debug interface takes care of making breakpoints
visible to all address spaces automatically. For such cases,
this property should be set to true.
""",
type="int",
name="has_global_breakpoints",
predefault="0",
invalid=False,
)
Method(
comment="""
True if inferiors share an address space (e.g., uClinux).
""",
type="int",
name="has_shared_address_space",
params=[],
predefault="default_has_shared_address_space",
invalid=False,
)
Method(
comment="""
True if a fast tracepoint can be set at an address.
""",
type="int",
name="fast_tracepoint_valid_at",
params=[("CORE_ADDR", "addr"), ("std::string *", "msg")],
predefault="default_fast_tracepoint_valid_at",
invalid=False,
)
Method(
comment="""
Guess register state based on tracepoint location. Used for tracepoints
where no registers have been collected, but there's only one location,
allowing us to guess the PC value, and perhaps some other registers.
On entry, regcache has all registers marked as unavailable.
""",
type="void",
name="guess_tracepoint_registers",
params=[("struct regcache *", "regcache"), ("CORE_ADDR", "addr")],
predefault="default_guess_tracepoint_registers",
invalid=False,
)
Function(
comment="""
Return the "auto" target charset.
""",
type="const char *",
name="auto_charset",
params=[],
predefault="default_auto_charset",
invalid=False,
)
Function(
comment="""
Return the "auto" target wide charset.
""",
type="const char *",
name="auto_wide_charset",
params=[],
predefault="default_auto_wide_charset",
invalid=False,
)
Value(
comment="""
If non-empty, this is a file extension that will be opened in place
of the file extension reported by the shared library list.
This is most useful for toolchains that use a post-linker tool,
where the names of the files run on the target differ in extension
compared to the names of the files GDB should load for debug info.
""",
type="const char *",
name="solib_symbols_extension",
invalid=False,
printer="pstring (gdbarch->solib_symbols_extension)",
)
Value(
comment="""
If true, the target OS has DOS-based file system semantics. That
is, absolute paths include a drive name, and the backslash is
considered a directory separator.
""",
type="int",
name="has_dos_based_file_system",
predefault="0",
invalid=False,
)
Method(
comment="""
Generate bytecodes to collect the return address in a frame.
Since the bytecodes run on the target, possibly with GDB not even
connected, the full unwinding machinery is not available, and
typically this function will issue bytecodes for one or more likely
places that the return address may be found.
""",
type="void",
name="gen_return_address",
params=[
("struct agent_expr *", "ax"),
("struct axs_value *", "value"),
("CORE_ADDR", "scope"),
],
predefault="default_gen_return_address",
invalid=False,
)
Method(
comment="""
Implement the "info proc" command.
""",
type="void",
name="info_proc",
params=[("const char *", "args"), ("enum info_proc_what", "what")],
predicate=True,
invalid=True,
)
Method(
comment="""
Implement the "info proc" command for core files. Noe that there
are two "info_proc"-like methods on gdbarch -- one for core files,
one for live targets.
""",
type="void",
name="core_info_proc",
params=[("const char *", "args"), ("enum info_proc_what", "what")],
predicate=True,
invalid=True,
)
Method(
comment="""
Iterate over all objfiles in the order that makes the most sense
for the architecture to make global symbol searches.
CB is a callback function passed an objfile to be searched. The iteration stops
if this function returns nonzero.
If not NULL, CURRENT_OBJFILE corresponds to the objfile being
inspected when the symbol search was requested.
""",
type="void",
name="iterate_over_objfiles_in_search_order",
params=[
("iterate_over_objfiles_in_search_order_cb_ftype", "cb"),
("struct objfile *", "current_objfile"),
],
predefault="default_iterate_over_objfiles_in_search_order",
invalid=False,
)
Value(
comment="""
Ravenscar arch-dependent ops.
""",
type="struct ravenscar_arch_ops *",
name="ravenscar_ops",
predefault="NULL",
invalid=False,
printer="host_address_to_string (gdbarch->ravenscar_ops)",
)
Method(
comment="""
Return non-zero if the instruction at ADDR is a call; zero otherwise.
""",
type="int",
name="insn_is_call",
params=[("CORE_ADDR", "addr")],
predefault="default_insn_is_call",
invalid=False,
)
Method(
comment="""
Return non-zero if the instruction at ADDR is a return; zero otherwise.
""",
type="int",
name="insn_is_ret",
params=[("CORE_ADDR", "addr")],
predefault="default_insn_is_ret",
invalid=False,
)
Method(
comment="""
Return non-zero if the instruction at ADDR is a jump; zero otherwise.
""",
type="int",
name="insn_is_jump",
params=[("CORE_ADDR", "addr")],
predefault="default_insn_is_jump",
invalid=False,
)
Method(
comment="""
Return true if there's a program/permanent breakpoint planted in
memory at ADDRESS, return false otherwise.
""",
type="bool",
name="program_breakpoint_here_p",
params=[("CORE_ADDR", "address")],
predefault="default_program_breakpoint_here_p",
invalid=False,
)
Method(
comment="""
Read one auxv entry from *READPTR, not reading locations >= ENDPTR.
Return 0 if *READPTR is already at the end of the buffer.
Return -1 if there is insufficient buffer for a whole entry.
Return 1 if an entry was read into *TYPEP and *VALP.
""",
type="int",
name="auxv_parse",
params=[
("const gdb_byte **", "readptr"),
("const gdb_byte *", "endptr"),
("CORE_ADDR *", "typep"),
("CORE_ADDR *", "valp"),
],
predicate=True,
invalid=True,
)
Method(
comment="""
Print the description of a single auxv entry described by TYPE and VAL
to FILE.
""",
type="void",
name="print_auxv_entry",
params=[("struct ui_file *", "file"), ("CORE_ADDR", "type"), ("CORE_ADDR", "val")],
predefault="default_print_auxv_entry",
invalid=False,
)
Method(
comment="""
Find the address range of the current inferior's vsyscall/vDSO, and
write it to *RANGE. If the vsyscall's length can't be determined, a
range with zero length is returned. Returns true if the vsyscall is
found, false otherwise.
""",
type="int",
name="vsyscall_range",
params=[("struct mem_range *", "range")],
predefault="default_vsyscall_range",
invalid=False,
)
Function(
comment="""
Allocate SIZE bytes of PROT protected page aligned memory in inferior.
PROT has GDB_MMAP_PROT_* bitmask format.
Throw an error if it is not possible. Returned address is always valid.
""",
type="CORE_ADDR",
name="infcall_mmap",
params=[("CORE_ADDR", "size"), ("unsigned", "prot")],
predefault="default_infcall_mmap",
invalid=False,
)
Function(
comment="""
Deallocate SIZE bytes of memory at ADDR in inferior from gdbarch_infcall_mmap.
Print a warning if it is not possible.
""",
type="void",
name="infcall_munmap",
params=[("CORE_ADDR", "addr"), ("CORE_ADDR", "size")],
predefault="default_infcall_munmap",
invalid=False,
)
Method(
comment="""
Return string (caller has to use xfree for it) with options for GCC
to produce code for this target, typically "-m64", "-m32" or "-m31".
These options are put before CU's DW_AT_producer compilation options so that
they can override it.
""",
type="std::string",
name="gcc_target_options",
params=[],
predefault="default_gcc_target_options",
invalid=False,
)
Method(
comment="""
Return a regular expression that matches names used by this
architecture in GNU configury triplets. The result is statically
allocated and must not be freed. The default implementation simply
returns the BFD architecture name, which is correct in nearly every
case.
""",
type="const char *",
name="gnu_triplet_regexp",
params=[],
predefault="default_gnu_triplet_regexp",
invalid=False,
)
Method(
comment="""
Return the size in 8-bit bytes of an addressable memory unit on this
architecture. This corresponds to the number of 8-bit bytes associated to
each address in memory.
""",
type="int",
name="addressable_memory_unit_size",
params=[],
predefault="default_addressable_memory_unit_size",
invalid=False,
)
Value(
comment="""
Functions for allowing a target to modify its disassembler options.
""",
type="const char *",
name="disassembler_options_implicit",
predefault="0",
invalid=False,
printer="pstring (gdbarch->disassembler_options_implicit)",
)
Value(
type="char **",
name="disassembler_options",
predefault="0",
invalid=False,
printer="pstring_ptr (gdbarch->disassembler_options)",
)
Value(
type="const disasm_options_and_args_t *",
name="valid_disassembler_options",
predefault="0",
invalid=False,
printer="host_address_to_string (gdbarch->valid_disassembler_options)",
)
Method(
comment="""
Type alignment override method. Return the architecture specific
alignment required for TYPE. If there is no special handling
required for TYPE then return the value 0, GDB will then apply the
default rules as laid out in gdbtypes.c:type_align.
""",
type="ULONGEST",
name="type_align",
params=[("struct type *", "type")],
predefault="default_type_align",
invalid=False,
)
Function(
comment="""
Return a string containing any flags for the given PC in the given FRAME.
""",
type="std::string",
name="get_pc_address_flags",
params=[("frame_info_ptr", "frame"), ("CORE_ADDR", "pc")],
predefault="default_get_pc_address_flags",
invalid=False,
)
Method(
comment="""
Read core file mappings
""",
type="void",
name="read_core_file_mappings",
params=[
("struct bfd *", "cbfd"),
("read_core_file_mappings_pre_loop_ftype", "pre_loop_cb"),
("read_core_file_mappings_loop_ftype", "loop_cb"),
],
predefault="default_read_core_file_mappings",
invalid=False,
)
| cuda-gdb-master | gdb/gdbarch-components.py |
# Copyright constant for Python code to use.
#
# Copyright (C) 2022 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
def copyright(tool, description):
# Search the tool source itself for the correct copyright years.
with open(tool, "r") as f:
for line in f:
if line.startswith("# Copyright (C) "):
dateline = line[1:].strip()
break
return f"""/* *INDENT-OFF* */ /* THIS FILE IS GENERATED -*- buffer-read-only: t -*- */
/* vi:set ro: */
/* {description}
{dateline}
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* To regenerate this file, run:
./{tool}
*/
"""
| cuda-gdb-master | gdb/gdbcopyright.py |
#!/usr/bin/env python3
# Generate Unicode case-folding table for Ada.
# Copyright (C) 2022 Free Software Foundation, Inc.
# This file is part of GDB.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This generates the ada-casefold.h header.
# Usage:
# python ada-unicode.py
import gdbcopyright
# The start of the current range of case-conversions we are
# processing. If RANGE_START is None, then we're outside of a range.
range_start = None
# End of the current range.
range_end = None
# The delta between RANGE_START and the upper-case variant of that
# character.
upper_delta = None
# The delta between RANGE_START and the lower-case variant of that
# character.
lower_delta = None
# All the ranges found and completed so far.
# Each entry is a tuple of the form (START, END, UPPER_DELTA, LOWER_DELTA).
all_ranges = []
def finish_range():
global range_start
global range_end
global upper_delta
global lower_delta
if range_start is not None:
all_ranges.append((range_start, range_end, upper_delta, lower_delta))
range_start = None
range_end = None
upper_delta = None
lower_delta = None
def process_codepoint(val):
global range_start
global range_end
global upper_delta
global lower_delta
c = chr(val)
low = c.lower()
up = c.upper()
# U+00DF ("LATIN SMALL LETTER SHARP S", aka eszsett) traditionally
# upper-cases to the two-character string "SS" (the capital form
# is a relatively recent addition -- 2017). Our simple scheme
# can't handle this, so we skip it. Also, because our approach
# just represents runs of characters with identical folding
# deltas, this change must terminate the current run.
if (c == low and c == up) or len(low) != 1 or len(up) != 1:
finish_range()
return
updelta = ord(up) - val
lowdelta = ord(low) - val
if range_start is not None and (updelta != upper_delta or lowdelta != lower_delta):
finish_range()
if range_start is None:
range_start = val
upper_delta = updelta
lower_delta = lowdelta
range_end = val
for c in range(0, 0x10FFFF):
process_codepoint(c)
with open("ada-casefold.h", "w") as f:
print(
gdbcopyright.copyright("ada-unicode.py", "UTF-32 case-folding for GDB"),
file=f,
)
for r in all_ranges:
print(f" {{{r[0]}, {r[1]}, {r[2]}, {r[3]}}},", file=f)
| cuda-gdb-master | gdb/ada-unicode.py |
#!/usr/bin/env python3
# Architecture commands for GDB, the GNU debugger.
#
# Copyright (C) 1998-2022 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import textwrap
import gdbcopyright
# All the components created in gdbarch-components.py.
components = []
def indentation(n_columns):
"""Return string with tabs and spaces to indent line to N_COLUMNS."""
return "\t" * (n_columns // 8) + " " * (n_columns % 8)
def join_type_and_name(t, n):
"Combine the type T and the name N into a C declaration."
if t.endswith("*") or t.endswith("&"):
return t + n
else:
return t + " " + n
def join_params(params):
"""Given a sequence of (TYPE, NAME) pairs, generate a comma-separated
list of declarations."""
params = [join_type_and_name(p[0], p[1]) for p in params]
return ", ".join(params)
class _Component:
"Base class for all components."
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
components.append(self)
# It doesn't make sense to have a check of the result value
# for a function or method with void return type.
if self.type == "void" and self.result_checks:
raise Exception("can't have result checks with a void return type")
def get_predicate(self):
"Return the expression used for validity checking."
assert self.predicate and not isinstance(self.invalid, str)
if self.predefault:
predicate = f"gdbarch->{self.name} != {self.predefault}"
elif isinstance(c, Value):
predicate = f"gdbarch->{self.name} != 0"
else:
predicate = f"gdbarch->{self.name} != NULL"
return predicate
class Info(_Component):
"An Info component is copied from the gdbarch_info."
def __init__(self, *, name, type, printer=None):
super().__init__(name=name, type=type, printer=printer)
# This little hack makes the generator a bit simpler.
self.predicate = None
class Value(_Component):
"A Value component is just a data member."
def __init__(
self,
*,
name,
type,
comment=None,
predicate=None,
predefault=None,
postdefault=None,
invalid=None,
printer=None,
):
super().__init__(
comment=comment,
name=name,
type=type,
predicate=predicate,
predefault=predefault,
postdefault=postdefault,
invalid=invalid,
printer=printer,
)
class Function(_Component):
"A Function component is a function pointer member."
def __init__(
self,
*,
name,
type,
params,
comment=None,
predicate=None,
predefault=None,
postdefault=None,
invalid=None,
printer=None,
param_checks=None,
result_checks=None,
):
super().__init__(
comment=comment,
name=name,
type=type,
predicate=predicate,
predefault=predefault,
postdefault=postdefault,
invalid=invalid,
printer=printer,
params=params,
param_checks=param_checks,
result_checks=result_checks,
)
def ftype(self):
"Return the name of the function typedef to use."
return f"gdbarch_{self.name}_ftype"
def param_list(self):
"Return the formal parameter list as a string."
return join_params(self.params)
def set_list(self):
"""Return the formal parameter list of the caller function,
as a string. This list includes the gdbarch."""
arch_arg = ("struct gdbarch *", "gdbarch")
arch_tuple = [arch_arg]
return join_params(arch_tuple + list(self.params))
def actuals(self):
"Return the actual parameters to forward, as a string."
return ", ".join([p[1] for p in self.params])
class Method(Function):
"A Method is like a Function but passes the gdbarch through."
def param_list(self):
"See superclass."
return self.set_list()
def actuals(self):
"See superclass."
result = ["gdbarch"] + [p[1] for p in self.params]
return ", ".join(result)
# Read the components.
with open("gdbarch-components.py") as fd:
exec(fd.read())
copyright = gdbcopyright.copyright(
"gdbarch.py", "Dynamic architecture support for GDB, the GNU debugger."
)
def info(c):
"Filter function to only allow Info components."
return type(c) is Info
def not_info(c):
"Filter function to omit Info components."
return type(c) is not Info
with open("gdbarch-gen.h", "w") as f:
print(copyright, file=f)
print(file=f)
print(file=f)
print("/* The following are pre-initialized by GDBARCH. */", file=f)
# Do Info components first.
for c in filter(info, components):
print(file=f)
print(
f"""extern {c.type} gdbarch_{c.name} (struct gdbarch *gdbarch);
/* set_gdbarch_{c.name}() - not applicable - pre-initialized. */""",
file=f,
)
print(file=f)
print(file=f)
print("/* The following are initialized by the target dependent code. */", file=f)
# Generate decls for accessors, setters, and predicates for all
# non-Info components.
for c in filter(not_info, components):
if c.comment:
print(file=f)
comment = c.comment.split("\n")
if comment[0] == "":
comment = comment[1:]
if comment[-1] == "":
comment = comment[:-1]
print("/* ", file=f, end="")
print(comment[0], file=f, end="")
if len(comment) > 1:
print(file=f)
print(
textwrap.indent("\n".join(comment[1:]), prefix=" "),
end="",
file=f,
)
print(" */", file=f)
if c.predicate:
print(file=f)
print(f"extern bool gdbarch_{c.name}_p (struct gdbarch *gdbarch);", file=f)
print(file=f)
if isinstance(c, Value):
print(
f"extern {c.type} gdbarch_{c.name} (struct gdbarch *gdbarch);",
file=f,
)
print(
f"extern void set_gdbarch_{c.name} (struct gdbarch *gdbarch, {c.type} {c.name});",
file=f,
)
else:
assert isinstance(c, Function)
print(
f"typedef {c.type} ({c.ftype()}) ({c.param_list()});",
file=f,
)
print(
f"extern {c.type} gdbarch_{c.name} ({c.set_list()});",
file=f,
)
print(
f"extern void set_gdbarch_{c.name} (struct gdbarch *gdbarch, {c.ftype()} *{c.name});",
file=f,
)
with open("gdbarch.c", "w") as f:
print(copyright, file=f)
print(file=f)
print("/* Maintain the struct gdbarch object. */", file=f)
print(file=f)
#
# The struct definition body.
#
print("struct gdbarch", file=f)
print("{", file=f)
print(" /* Has this architecture been fully initialized? */", file=f)
print(" bool initialized_p = false;", file=f)
print(file=f)
print(" /* An obstack bound to the lifetime of the architecture. */", file=f)
print(" auto_obstack obstack;", file=f)
print(" /* Registry. */", file=f)
print(" registry<gdbarch> registry_fields;", file=f)
print(file=f)
print(" /* basic architectural information. */", file=f)
for c in filter(info, components):
print(f" {c.type} {c.name};", file=f)
print(file=f)
print(" /* target specific vector. */", file=f)
print(" struct gdbarch_tdep_base *tdep = nullptr;", file=f)
print(" gdbarch_dump_tdep_ftype *dump_tdep = nullptr;", file=f)
print(file=f)
print(" /* per-architecture data-pointers. */", file=f)
print(" unsigned nr_data = 0;", file=f)
print(" void **data = nullptr;", file=f)
print(file=f)
for c in filter(not_info, components):
if isinstance(c, Function):
print(f" gdbarch_{c.name}_ftype *", file=f, end="")
else:
print(f" {c.type} ", file=f, end="")
print(f"{c.name} = ", file=f, end="")
if c.predefault is not None:
print(f"{c.predefault};", file=f)
elif isinstance(c, Value):
print("0;", file=f)
else:
assert isinstance(c, Function)
print("nullptr;", file=f)
print("};", file=f)
print(file=f)
#
# Initialization.
#
print("/* Create a new ``struct gdbarch'' based on information provided by", file=f)
print(" ``struct gdbarch_info''. */", file=f)
print(file=f)
print("struct gdbarch *", file=f)
print("gdbarch_alloc (const struct gdbarch_info *info,", file=f)
print(" struct gdbarch_tdep_base *tdep)", file=f)
print("{", file=f)
print(" struct gdbarch *gdbarch;", file=f)
print("", file=f)
print(" gdbarch = new struct gdbarch;", file=f)
print(file=f)
print(" gdbarch->tdep = tdep;", file=f)
print(file=f)
for c in filter(info, components):
print(f" gdbarch->{c.name} = info->{c.name};", file=f)
print(file=f)
print(" return gdbarch;", file=f)
print("}", file=f)
print(file=f)
print(file=f)
print(file=f)
#
# Post-initialization validation and updating
#
print("/* Ensure that all values in a GDBARCH are reasonable. */", file=f)
print(file=f)
print("static void", file=f)
print("verify_gdbarch (struct gdbarch *gdbarch)", file=f)
print("{", file=f)
print(" string_file log;", file=f)
print(file=f)
print(" /* fundamental */", file=f)
print(" if (gdbarch->byte_order == BFD_ENDIAN_UNKNOWN)", file=f)
print(""" log.puts ("\\n\\tbyte-order");""", file=f)
print(" if (gdbarch->bfd_arch_info == NULL)", file=f)
print(""" log.puts ("\\n\\tbfd_arch_info");""", file=f)
print(
" /* Check those that need to be defined for the given multi-arch level. */",
file=f,
)
for c in filter(not_info, components):
if c.invalid is False:
print(f" /* Skip verify of {c.name}, invalid_p == 0 */", file=f)
elif c.predicate:
print(f" /* Skip verify of {c.name}, has predicate. */", file=f)
elif isinstance(c.invalid, str) and c.postdefault is not None:
print(f" if ({c.invalid})", file=f)
print(f" gdbarch->{c.name} = {c.postdefault};", file=f)
elif c.predefault is not None and c.postdefault is not None:
print(f" if (gdbarch->{c.name} == {c.predefault})", file=f)
print(f" gdbarch->{c.name} = {c.postdefault};", file=f)
elif c.postdefault is not None:
print(f" if (gdbarch->{c.name} == 0)", file=f)
print(f" gdbarch->{c.name} = {c.postdefault};", file=f)
elif isinstance(c.invalid, str):
print(f" if ({c.invalid})", file=f)
print(f""" log.puts ("\\n\\t{c.name}");""", file=f)
elif c.predefault is not None:
print(f" if (gdbarch->{c.name} == {c.predefault})", file=f)
print(f""" log.puts ("\\n\\t{c.name}");""", file=f)
elif c.invalid is True:
print(f" if (gdbarch->{c.name} == 0)", file=f)
print(f""" log.puts ("\\n\\t{c.name}");""", file=f)
else:
# We should not allow ourselves to simply do nothing here
# because no other case applies. If we end up here then
# either the input data needs adjusting so one of the
# above cases matches, or we need additional cases adding
# here.
raise Exception("unhandled case when generating gdbarch validation")
print(" if (!log.empty ())", file=f)
print(
""" internal_error (_("verify_gdbarch: the following are invalid ...%s"),""",
file=f,
)
print(" log.c_str ());", file=f)
print("}", file=f)
print(file=f)
print(file=f)
#
# Dumping.
#
print("/* Print out the details of the current architecture. */", file=f)
print(file=f)
print("void", file=f)
print("gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file)", file=f)
print("{", file=f)
print(""" const char *gdb_nm_file = "<not-defined>";""", file=f)
print(file=f)
print("#if defined (GDB_NM_FILE)", file=f)
print(" gdb_nm_file = GDB_NM_FILE;", file=f)
print("#endif", file=f)
print(" gdb_printf (file,", file=f)
print(""" "gdbarch_dump: GDB_NM_FILE = %s\\n",""", file=f)
print(" gdb_nm_file);", file=f)
for c in components:
if c.predicate:
print(" gdb_printf (file,", file=f)
print(
f""" "gdbarch_dump: gdbarch_{c.name}_p() = %d\\n",""",
file=f,
)
print(f" gdbarch_{c.name}_p (gdbarch));", file=f)
if isinstance(c, Function):
print(" gdb_printf (file,", file=f)
print(f""" "gdbarch_dump: {c.name} = <%s>\\n",""", file=f)
print(
f" host_address_to_string (gdbarch->{c.name}));",
file=f,
)
else:
if c.printer:
printer = c.printer
elif c.type == "CORE_ADDR":
printer = f"core_addr_to_string_nz (gdbarch->{c.name})"
else:
printer = f"plongest (gdbarch->{c.name})"
print(" gdb_printf (file,", file=f)
print(f""" "gdbarch_dump: {c.name} = %s\\n",""", file=f)
print(f" {printer});", file=f)
print(" if (gdbarch->dump_tdep != NULL)", file=f)
print(" gdbarch->dump_tdep (gdbarch, file);", file=f)
print("}", file=f)
print(file=f)
#
# Bodies of setter, accessor, and predicate functions.
#
for c in components:
if c.predicate:
print(file=f)
print("bool", file=f)
print(f"gdbarch_{c.name}_p (struct gdbarch *gdbarch)", file=f)
print("{", file=f)
print(" gdb_assert (gdbarch != NULL);", file=f)
print(f" return {c.get_predicate()};", file=f)
print("}", file=f)
if isinstance(c, Function):
print(file=f)
print(f"{c.type}", file=f)
print(f"gdbarch_{c.name} ({c.set_list()})", file=f)
print("{", file=f)
print(" gdb_assert (gdbarch != NULL);", file=f)
print(f" gdb_assert (gdbarch->{c.name} != NULL);", file=f)
if c.predicate and c.predefault:
# Allow a call to a function with a predicate.
print(
f" /* Do not check predicate: {c.get_predicate()}, allow call. */",
file=f,
)
if c.param_checks:
for rule in c.param_checks:
print(f" gdb_assert ({rule});", file=f)
print(" if (gdbarch_debug >= 2)", file=f)
print(
f""" gdb_printf (gdb_stdlog, "gdbarch_{c.name} called\\n");""",
file=f,
)
print(" ", file=f, end="")
if c.type != "void":
if c.result_checks:
print("auto result = ", file=f, end="")
else:
print("return ", file=f, end="")
print(f"gdbarch->{c.name} ({c.actuals()});", file=f)
if c.type != "void" and c.result_checks:
for rule in c.result_checks:
print(f" gdb_assert ({rule});", file=f)
print(" return result;", file=f)
print("}", file=f)
print(file=f)
print("void", file=f)
setter_name = f"set_gdbarch_{c.name}"
ftype_name = f"gdbarch_{c.name}_ftype"
print(f"{setter_name} (struct gdbarch *gdbarch,", file=f)
indent_columns = len(f"{setter_name} (")
print(f"{indentation(indent_columns)}{ftype_name} {c.name})", file=f)
print("{", file=f)
print(f" gdbarch->{c.name} = {c.name};", file=f)
print("}", file=f)
elif isinstance(c, Value):
print(file=f)
print(f"{c.type}", file=f)
print(f"gdbarch_{c.name} (struct gdbarch *gdbarch)", file=f)
print("{", file=f)
print(" gdb_assert (gdbarch != NULL);", file=f)
if c.invalid is False:
print(f" /* Skip verify of {c.name}, invalid_p == 0 */", file=f)
elif isinstance(c.invalid, str):
print(" /* Check variable is valid. */", file=f)
print(f" gdb_assert (!({c.invalid}));", file=f)
elif c.predefault:
print(" /* Check variable changed from pre-default. */", file=f)
print(f" gdb_assert (gdbarch->{c.name} != {c.predefault});", file=f)
print(" if (gdbarch_debug >= 2)", file=f)
print(
f""" gdb_printf (gdb_stdlog, "gdbarch_{c.name} called\\n");""",
file=f,
)
print(f" return gdbarch->{c.name};", file=f)
print("}", file=f)
print(file=f)
print("void", file=f)
setter_name = f"set_gdbarch_{c.name}"
print(f"{setter_name} (struct gdbarch *gdbarch,", file=f)
indent_columns = len(f"{setter_name} (")
print(f"{indentation(indent_columns)}{c.type} {c.name})", file=f)
print("{", file=f)
print(f" gdbarch->{c.name} = {c.name};", file=f)
print("}", file=f)
else:
assert isinstance(c, Info)
print(file=f)
print(f"{c.type}", file=f)
print(f"gdbarch_{c.name} (struct gdbarch *gdbarch)", file=f)
print("{", file=f)
print(" gdb_assert (gdbarch != NULL);", file=f)
print(" if (gdbarch_debug >= 2)", file=f)
print(
f""" gdb_printf (gdb_stdlog, "gdbarch_{c.name} called\\n");""",
file=f,
)
print(f" return gdbarch->{c.name};", file=f)
print("}", file=f)
| cuda-gdb-master | gdb/gdbarch.py |
# Program to fetch python compilation parameters.
# Copied from python-config of the 2.7 release.
import sys
import os
import getopt
import sysconfig
valid_opts = ["prefix", "exec-prefix", "includes", "libs", "cflags", "ldflags", "help"]
def exit_with_usage(code=1):
sys.stderr.write(
"Usage: %s [%s]\n" % (sys.argv[0], "|".join("--" + opt for opt in valid_opts))
)
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], "", valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var("VERSION")
getvar = sysconfig.get_config_var
abiflags = getattr(sys, "abiflags", "")
opt_flags = [flag for (flag, val) in opts]
if "--help" in opt_flags:
exit_with_usage(code=0)
def to_unix_path(path):
"""On Windows, returns the given path with all backslashes
converted into forward slashes. This is to help prevent problems
when using the paths returned by this script with cygwin tools.
In particular, cygwin bash treats backslashes as a special character.
On Unix systems, returns the path unchanged.
"""
if os.name == "nt":
path = path.replace("\\", "/")
return path
for opt in opt_flags:
if opt == "--prefix":
print(to_unix_path(os.path.normpath(sys.prefix)))
elif opt == "--exec-prefix":
print(to_unix_path(os.path.normpath(sys.exec_prefix)))
elif opt in ("--includes", "--cflags"):
flags = [
"-I" + sysconfig.get_path("include"),
"-I" + sysconfig.get_path("platinclude"),
]
if opt == "--cflags":
flags.extend(getvar("CFLAGS").split())
print(to_unix_path(" ".join(flags)))
elif opt in ("--libs", "--ldflags"):
libs = ["-lpython" + pyver + abiflags]
if getvar("LIBS") is not None:
libs.extend(getvar("LIBS").split())
if getvar("SYSLIBS") is not None:
libs.extend(getvar("SYSLIBS").split())
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == "--ldflags":
if not getvar("Py_ENABLE_SHARED"):
if getvar("LIBPL") is not None:
libs.insert(0, "-L" + getvar("LIBPL"))
elif os.name == "nt":
libs.insert(0, "-L" + os.path.normpath(sys.prefix) + "/libs")
if getvar("LINKFORSHARED") is not None:
libs.extend(getvar("LINKFORSHARED").split())
print(to_unix_path(" ".join(libs)))
| cuda-gdb-master | gdb/python/python-config.py |
# Copyright (C) 2013-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class FrameIterator(object):
"""A gdb.Frame iterator. Iterates over gdb.Frames or objects that
conform to that interface."""
def __init__(self, frame_obj):
"""Initialize a FrameIterator.
Arguments:
frame_obj the starting frame."""
super(FrameIterator, self).__init__()
self.frame = frame_obj
def __iter__(self):
return self
def next(self):
"""next implementation.
Returns:
The next oldest frame."""
result = self.frame
if result is None:
raise StopIteration
self.frame = result.older()
return result
# Python 3.x requires __next__(self) while Python 2.x requires
# next(self). Define next(self), and for Python 3.x create this
# wrapper.
def __next__(self):
return self.next()
| cuda-gdb-master | gdb/python/lib/gdb/FrameIterator.py |
# Copyright (C) 2015-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Unwinder class and register_unwinder function."""
import gdb
class Unwinder(object):
"""Base class (or a template) for frame unwinders written in Python.
An unwinder has a single method __call__ and the attributes
described below.
Attributes:
name: The name of the unwinder.
enabled: A boolean indicating whether the unwinder is enabled.
"""
def __init__(self, name):
"""Constructor.
Args:
name: An identifying name for the unwinder.
"""
self.name = name
self.enabled = True
def __call__(self, pending_frame):
"""GDB calls this method to unwind a frame.
Arguments:
pending_frame: gdb.PendingFrame instance.
Returns:
gdb.UnwindInfo instance.
"""
raise NotImplementedError("Unwinder __call__.")
def register_unwinder(locus, unwinder, replace=False):
"""Register unwinder in given locus.
The unwinder is prepended to the locus's unwinders list. Unwinder
name should be unique.
Arguments:
locus: Either an objfile, progspace, or None (in which case
the unwinder is registered globally).
unwinder: An object of a gdb.Unwinder subclass
replace: If True, replaces existing unwinder with the same name.
Otherwise, raises exception if unwinder with the same
name already exists.
Returns:
Nothing.
Raises:
RuntimeError: Unwinder name is not unique
TypeError: Bad locus type
"""
if locus is None:
if gdb.parameter("verbose"):
gdb.write("Registering global %s unwinder ...\n" % unwinder.name)
locus = gdb
elif isinstance(locus, gdb.Objfile) or isinstance(locus, gdb.Progspace):
if gdb.parameter("verbose"):
gdb.write(
"Registering %s unwinder for %s ...\n" % (unwinder.name, locus.filename)
)
else:
raise TypeError("locus should be gdb.Objfile or gdb.Progspace or None")
i = 0
for needle in locus.frame_unwinders:
if needle.name == unwinder.name:
if replace:
del locus.frame_unwinders[i]
else:
raise RuntimeError("Unwinder %s already exists." % unwinder.name)
i += 1
locus.frame_unwinders.insert(0, unwinder)
gdb.invalidate_cached_frames()
| cuda-gdb-master | gdb/python/lib/gdb/unwinder.py |
# Copyright (C) 2021-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Disassembler related module."""
import gdb
import _gdb.disassembler
# Re-export everything from the _gdb.disassembler module, which is
# defined within GDB's C++ code.
from _gdb.disassembler import *
# Module global dictionary of gdb.disassembler.Disassembler objects.
# The keys of this dictionary are bfd architecture names, or the
# special value None.
#
# When a request to disassemble comes in we first lookup the bfd
# architecture name from the gdbarch, if that name exists in this
# dictionary then we use that Disassembler object.
#
# If there's no architecture specific disassembler then we look for
# the key None in this dictionary, and if that key exists, we use that
# disassembler.
#
# If none of the above checks found a suitable disassembler, then no
# disassembly is performed in Python.
_disassemblers_dict = {}
class Disassembler(object):
"""A base class from which all user implemented disassemblers must
inherit."""
def __init__(self, name):
"""Constructor. Takes a name, which should be a string, which can be
used to identify this disassembler in diagnostic messages."""
self.name = name
def __call__(self, info):
"""A default implementation of __call__. All sub-classes must
override this method. Calling this default implementation will throw
a NotImplementedError exception."""
raise NotImplementedError("Disassembler.__call__")
def register_disassembler(disassembler, architecture=None):
"""Register a disassembler. DISASSEMBLER is a sub-class of
gdb.disassembler.Disassembler. ARCHITECTURE is either None or a
string, the name of an architecture known to GDB.
DISASSEMBLER is registered as a disassembler for ARCHITECTURE, or
all architectures when ARCHITECTURE is None.
Returns the previous disassembler registered with this
ARCHITECTURE value.
"""
if not isinstance(disassembler, Disassembler) and disassembler is not None:
raise TypeError("disassembler should sub-class gdb.disassembler.Disassembler")
old = None
if architecture in _disassemblers_dict:
old = _disassemblers_dict[architecture]
del _disassemblers_dict[architecture]
if disassembler is not None:
_disassemblers_dict[architecture] = disassembler
# Call the private _set_enabled function within the
# _gdb.disassembler module. This function sets a global flag
# within GDB's C++ code that enables or dissables the Python
# disassembler functionality, this improves performance of the
# disassembler by avoiding unneeded calls into Python when we know
# that no disassemblers are registered.
_gdb.disassembler._set_enabled(len(_disassemblers_dict) > 0)
return old
def _print_insn(info):
"""This function is called by GDB when it wants to disassemble an
instruction. INFO describes the instruction to be
disassembled."""
def lookup_disassembler(arch):
try:
name = arch.name()
if name is None:
return None
if name in _disassemblers_dict:
return _disassemblers_dict[name]
if None in _disassemblers_dict:
return _disassemblers_dict[None]
return None
except:
# It's pretty unlikely this exception case will ever
# trigger, one situation would be if the user somehow
# corrupted the _disassemblers_dict variable such that it
# was no longer a dictionary.
return None
disassembler = lookup_disassembler(info.architecture)
if disassembler is None:
return None
return disassembler(info)
class maint_info_py_disassemblers_cmd(gdb.Command):
"""
List all registered Python disassemblers.
List the name of all registered Python disassemblers, next to the
name of the architecture for which the disassembler is registered.
The global Python disassembler is listed next to the string
'GLOBAL'.
The disassembler that matches the architecture of the currently
selected inferior will be marked, this is an indication of which
disassembler will be invoked if any disassembly is performed in
the current inferior.
"""
def __init__(self):
super().__init__("maintenance info python-disassemblers", gdb.COMMAND_USER)
def invoke(self, args, from_tty):
# If no disassemblers are registered, tell the user.
if len(_disassemblers_dict) == 0:
print("No Python disassemblers registered.")
return
# Figure out the longest architecture name, so we can
# correctly format the table of results.
longest_arch_name = 0
for architecture in _disassemblers_dict:
if architecture is not None:
name = _disassemblers_dict[architecture].name
if len(name) > longest_arch_name:
longest_arch_name = len(name)
# Figure out the name of the current architecture. There
# should always be a current inferior, but if, somehow, there
# isn't, then leave curr_arch as the empty string, which will
# not then match agaisnt any architecture in the dictionary.
curr_arch = ""
if gdb.selected_inferior() is not None:
curr_arch = gdb.selected_inferior().architecture().name()
# Now print the dictionary of registered disassemblers out to
# the user.
match_tag = "\t(Matches current architecture)"
fmt_len = max(longest_arch_name, len("Architecture"))
format_string = "{:" + str(fmt_len) + "s} {:s}"
print(format_string.format("Architecture", "Disassember Name"))
for architecture in _disassemblers_dict:
if architecture is not None:
name = _disassemblers_dict[architecture].name
if architecture == curr_arch:
name += match_tag
match_tag = ""
print(format_string.format(architecture, name))
if None in _disassemblers_dict:
name = _disassemblers_dict[None].name + match_tag
print(format_string.format("GLOBAL", name))
maint_info_py_disassemblers_cmd()
| cuda-gdb-master | gdb/python/lib/gdb/disassembler.py |
# Styling related hooks.
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for styling."""
import gdb
try:
from pygments import formatters, lexers, highlight
from pygments.token import Error, Comment, Text
from pygments.filters import TokenMergeFilter
_formatter = None
def get_formatter():
global _formatter
if _formatter is None:
_formatter = formatters.TerminalFormatter()
return _formatter
def colorize(filename, contents):
# Don't want any errors.
try:
lexer = lexers.get_lexer_for_filename(filename, stripnl=False)
formatter = get_formatter()
return highlight(contents, lexer, formatter).encode(
gdb.host_charset(), "backslashreplace"
)
except:
return None
class HandleNasmComments(TokenMergeFilter):
@staticmethod
def fix_comments(lexer, stream):
in_comment = False
for ttype, value in stream:
if ttype is Error and value == "#":
in_comment = True
if in_comment:
if ttype is Text and value == "\n":
in_comment = False
else:
ttype = Comment.Single
yield ttype, value
def filter(self, lexer, stream):
f = HandleNasmComments.fix_comments
return super().filter(lexer, f(lexer, stream))
_asm_lexers = {}
def __get_asm_lexer(gdbarch):
lexer_type = "asm"
try:
# For an i386 based architecture, in 'intel' mode, use the nasm
# lexer.
flavor = gdb.parameter("disassembly-flavor")
if flavor == "intel" and gdbarch.name()[:4] == "i386":
lexer_type = "nasm"
except:
# If GDB is built without i386 support then attempting to fetch
# the 'disassembly-flavor' parameter will throw an error, which we
# ignore.
pass
global _asm_lexers
if lexer_type not in _asm_lexers:
_asm_lexers[lexer_type] = lexers.get_lexer_by_name(lexer_type)
_asm_lexers[lexer_type].add_filter(HandleNasmComments())
_asm_lexers[lexer_type].add_filter("raiseonerror")
return _asm_lexers[lexer_type]
def colorize_disasm(content, gdbarch):
# Don't want any errors.
try:
lexer = __get_asm_lexer(gdbarch)
formatter = get_formatter()
return highlight(content, lexer, formatter).rstrip().encode()
except:
return content
except:
def colorize(filename, contents):
return None
def colorize_disasm(content, gdbarch):
return None
| cuda-gdb-master | gdb/python/lib/gdb/styling.py |
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import traceback
import os
import sys
import _gdb
from contextlib import contextmanager
# Python 3 moved "reload"
if sys.version_info >= (3, 4):
from importlib import reload
else:
from imp import reload
from _gdb import *
# Historically, gdb.events was always available, so ensure it's
# still available without an explicit import.
import _gdbevents as events
sys.modules["gdb.events"] = events
class _GdbFile(object):
# These two are needed in Python 3
encoding = "UTF-8"
errors = "strict"
def __init__(self, stream):
self.stream = stream
def close(self):
# Do nothing.
return None
def isatty(self):
return False
def writelines(self, iterable):
for line in iterable:
self.write(line)
def flush(self):
flush(stream=self.stream)
def write(self, s):
write(s, stream=self.stream)
sys.stdout = _GdbFile(STDOUT)
sys.stderr = _GdbFile(STDERR)
# Default prompt hook does nothing.
prompt_hook = None
# Ensure that sys.argv is set to something.
# We do not use PySys_SetArgvEx because it did not appear until 2.6.6.
sys.argv = [""]
# Initial pretty printers.
pretty_printers = []
# Initial type printers.
type_printers = []
# Initial xmethod matchers.
xmethods = []
# Initial frame filters.
frame_filters = {}
# Initial frame unwinders.
frame_unwinders = []
def _execute_unwinders(pending_frame):
"""Internal function called from GDB to execute all unwinders.
Runs each currently enabled unwinder until it finds the one that
can unwind given frame.
Arguments:
pending_frame: gdb.PendingFrame instance.
Returns:
Tuple with:
[0] gdb.UnwindInfo instance
[1] Name of unwinder that claimed the frame (type `str`)
or None, if no unwinder has claimed the frame.
"""
for objfile in objfiles():
for unwinder in objfile.frame_unwinders:
if unwinder.enabled:
unwind_info = unwinder(pending_frame)
if unwind_info is not None:
return (unwind_info, unwinder.name)
for unwinder in current_progspace().frame_unwinders:
if unwinder.enabled:
unwind_info = unwinder(pending_frame)
if unwind_info is not None:
return (unwind_info, unwinder.name)
for unwinder in frame_unwinders:
if unwinder.enabled:
unwind_info = unwinder(pending_frame)
if unwind_info is not None:
return (unwind_info, unwinder.name)
return None
def _execute_file(filepath):
"""This function is used to replace Python 2's PyRun_SimpleFile.
Loads and executes the given file.
We could use the runpy module, but its documentation says:
"Furthermore, any functions and classes defined by the executed code are
not guaranteed to work correctly after a runpy function has returned."
"""
globals = sys.modules["__main__"].__dict__
set_file = False
# Set file (if not set) so that the imported file can use it (e.g. to
# access file-relative paths). This matches what PyRun_SimpleFile does.
if not hasattr(globals, "__file__"):
globals["__file__"] = filepath
set_file = True
try:
with open(filepath, "rb") as file:
# We pass globals also as locals to match what Python does
# in PyRun_SimpleFile.
compiled = compile(file.read(), filepath, "exec")
exec(compiled, globals, globals)
finally:
if set_file:
del globals["__file__"]
# Convenience variable to GDB's python directory
PYTHONDIR = os.path.dirname(os.path.dirname(__file__))
# Auto-load all functions/commands.
# Packages to auto-load.
packages = ["function", "command", "printer"]
# pkgutil.iter_modules is not available prior to Python 2.6. Instead,
# manually iterate the list, collating the Python files in each module
# path. Construct the module name, and import.
def _auto_load_packages():
for package in packages:
location = os.path.join(os.path.dirname(__file__), package)
if os.path.exists(location):
py_files = filter(
lambda x: x.endswith(".py") and x != "__init__.py", os.listdir(location)
)
for py_file in py_files:
# Construct from foo.py, gdb.module.foo
modname = "%s.%s.%s" % (__name__, package, py_file[:-3])
try:
if modname in sys.modules:
# reload modules with duplicate names
reload(__import__(modname))
else:
__import__(modname)
except:
sys.stderr.write(traceback.format_exc() + "\n")
_auto_load_packages()
def GdbSetPythonDirectory(dir):
"""Update sys.path, reload gdb and auto-load packages."""
global PYTHONDIR
try:
sys.path.remove(PYTHONDIR)
except ValueError:
pass
sys.path.insert(0, dir)
PYTHONDIR = dir
# note that reload overwrites the gdb module without deleting existing
# attributes
reload(__import__(__name__))
_auto_load_packages()
def current_progspace():
"Return the current Progspace."
return selected_inferior().progspace
def objfiles():
"Return a sequence of the current program space's objfiles."
return current_progspace().objfiles()
def solib_name(addr):
"""solib_name (Long) -> String.\n\
Return the name of the shared library holding a given address, or None."""
return current_progspace().solib_name(addr)
def block_for_pc(pc):
"Return the block containing the given pc value, or None."
return current_progspace().block_for_pc(pc)
def find_pc_line(pc):
"""find_pc_line (pc) -> Symtab_and_line.
Return the gdb.Symtab_and_line object corresponding to the pc value."""
return current_progspace().find_pc_line(pc)
def set_parameter(name, value):
"""Set the GDB parameter NAME to VALUE."""
# Handle the specific cases of None and booleans here, because
# gdb.parameter can return them, but they can't be passed to 'set'
# this way.
if value is None:
value = "unlimited"
elif isinstance(value, bool):
if value:
value = "on"
else:
value = "off"
execute("set " + name + " " + str(value), to_string=True)
@contextmanager
def with_parameter(name, value):
"""Temporarily set the GDB parameter NAME to VALUE.
Note that this is a context manager."""
old_value = parameter(name)
set_parameter(name, value)
try:
# Nothing that useful to return.
yield None
finally:
set_parameter(name, old_value)
| cuda-gdb-master | gdb/python/lib/gdb/__init__.py |
# Python side of the support for xmethods.
# Copyright (C) 2013-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for defining xmethods"""
import gdb
import re
import sys
class XMethod(object):
"""Base class (or a template) for an xmethod description.
Currently, the description requires only the 'name' and 'enabled'
attributes. Description objects are managed by 'XMethodMatcher'
objects (see below). Note that this is only a template for the
interface of the XMethodMatcher.methods objects. One could use
this class or choose to use an object which supports this exact same
interface. Also, an XMethodMatcher can choose not use it 'methods'
attribute. In such cases this class (or an equivalent) is not used.
Attributes:
name: The name of the xmethod.
enabled: A boolean indicating if the xmethod is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
class XMethodMatcher(object):
"""Abstract base class for matching an xmethod.
When looking for xmethods, GDB invokes the `match' method of a
registered xmethod matcher to match the object type and method name.
The `match' method in concrete classes derived from this class should
return an `XMethodWorker' object, or a list of `XMethodWorker'
objects if there is a match (see below for 'XMethodWorker' class).
Attributes:
name: The name of the matcher.
enabled: A boolean indicating if the matcher is enabled.
methods: A sequence of objects of type 'XMethod', or objects
which have at least the attributes of an 'XMethod' object.
This list is used by the 'enable'/'disable'/'info' commands to
enable/disable/list the xmethods registered with GDB. See
the 'match' method below to know how this sequence is used.
This attribute is None if the matcher chooses not have any
xmethods managed by it.
"""
def __init__(self, name):
"""
Args:
name: An identifying name for the xmethod or the group of
xmethods returned by the `match' method.
"""
self.name = name
self.enabled = True
self.methods = None
def match(self, class_type, method_name):
"""Match class type and method name.
In derived classes, it should return an XMethodWorker object, or a
sequence of 'XMethodWorker' objects. Only those xmethod workers
whose corresponding 'XMethod' descriptor object is enabled should be
returned.
Args:
class_type: The class type (gdb.Type object) to match.
method_name: The name (string) of the method to match.
"""
raise NotImplementedError("XMethodMatcher match")
class XMethodWorker(object):
"""Base class for all xmethod workers defined in Python.
An xmethod worker is an object which matches the method arguments, and
invokes the method when GDB wants it to. Internally, GDB first invokes the
'get_arg_types' method to perform overload resolution. If GDB selects to
invoke this Python xmethod, then it invokes it via the overridden
'__call__' method. The 'get_result_type' method is used to implement
'ptype' on the xmethod.
Derived classes should override the 'get_arg_types', 'get_result_type'
and '__call__' methods.
"""
def get_arg_types(self):
"""Return arguments types of an xmethod.
A sequence of gdb.Type objects corresponding to the arguments of the
xmethod are returned. If the xmethod takes no arguments, then 'None'
or an empty sequence is returned. If the xmethod takes only a single
argument, then a gdb.Type object or a sequence with a single gdb.Type
element is returned.
"""
raise NotImplementedError("XMethodWorker get_arg_types")
def get_result_type(self, *args):
"""Return the type of the result of the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value. These are the same arguments passed to '__call__'.
Returns:
A gdb.Type object representing the type of the result of the
xmethod.
"""
raise NotImplementedError("XMethodWorker get_result_type")
def __call__(self, *args):
"""Invoke the xmethod.
Args:
args: Arguments to the method. Each element of the tuple is a
gdb.Value object. The first element is the 'this' pointer
value.
Returns:
A gdb.Value corresponding to the value returned by the xmethod.
Returns 'None' if the method does not return anything.
"""
raise NotImplementedError("XMethodWorker __call__")
class SimpleXMethodMatcher(XMethodMatcher):
"""A utility class to implement simple xmethod mathers and workers.
See the __init__ method below for information on how instances of this
class can be used.
For simple classes and methods, one can choose to use this class. For
complex xmethods, which need to replace/implement template methods on
possibly template classes, one should implement their own xmethod
matchers and workers. See py-xmethods.py in testsuite/gdb.python
directory of the GDB source tree for examples.
"""
class SimpleXMethodWorker(XMethodWorker):
def __init__(self, method_function, arg_types):
self._arg_types = arg_types
self._method_function = method_function
def get_arg_types(self):
return self._arg_types
def __call__(self, *args):
return self._method_function(*args)
def __init__(
self, name, class_matcher, method_matcher, method_function, *arg_types
):
"""
Args:
name: Name of the xmethod matcher.
class_matcher: A regular expression used to match the name of the
class whose method this xmethod is implementing/replacing.
method_matcher: A regular expression used to match the name of the
method this xmethod is implementing/replacing.
method_function: A Python callable which would be called via the
'invoke' method of the worker returned by the objects of this
class. This callable should accept the object (*this) as the
first argument followed by the rest of the arguments to the
method. All arguments to this function should be gdb.Value
objects.
arg_types: The gdb.Type objects corresponding to the arguments that
this xmethod takes. It can be None, or an empty sequence,
or a single gdb.Type object, or a sequence of gdb.Type objects.
"""
XMethodMatcher.__init__(self, name)
assert callable(method_function), (
"The 'method_function' argument to 'SimpleXMethodMatcher' "
"__init__ method should be a callable."
)
self._method_function = method_function
self._class_matcher = class_matcher
self._method_matcher = method_matcher
self._arg_types = arg_types
def match(self, class_type, method_name):
cm = re.match(self._class_matcher, str(class_type.unqualified().tag))
mm = re.match(self._method_matcher, method_name)
if cm and mm:
return SimpleXMethodMatcher.SimpleXMethodWorker(
self._method_function, self._arg_types
)
# A helper function for register_xmethod_matcher which returns an error
# object if MATCHER is not having the requisite attributes in the proper
# format.
def _validate_xmethod_matcher(matcher):
if not hasattr(matcher, "match"):
return TypeError("Xmethod matcher is missing method: match")
if not hasattr(matcher, "name"):
return TypeError("Xmethod matcher is missing attribute: name")
if not hasattr(matcher, "enabled"):
return TypeError("Xmethod matcher is missing attribute: enabled")
if not isinstance(matcher.name, str):
return TypeError("Attribute 'name' of xmethod matcher is not a " "string")
if matcher.name.find(";") >= 0:
return ValueError("Xmethod matcher name cannot contain ';' in it")
# A helper function for register_xmethod_matcher which looks up an
# xmethod matcher with NAME in LOCUS. Returns the index of the xmethod
# matcher in 'xmethods' sequence attribute of the LOCUS. If NAME is not
# found in LOCUS, then -1 is returned.
def _lookup_xmethod_matcher(locus, name):
for i in range(0, len(locus.xmethods)):
if locus.xmethods[i].name == name:
return i
return -1
def register_xmethod_matcher(locus, matcher, replace=False):
"""Registers a xmethod matcher MATCHER with a LOCUS.
Arguments:
locus: The locus in which the xmethods should be registered.
It can be 'None' to indicate that the xmethods should be
registered globally. Or, it could be a gdb.Objfile or a
gdb.Progspace object in which the xmethods should be
registered.
matcher: The xmethod matcher to register with the LOCUS. It
should be an instance of 'XMethodMatcher' class.
replace: If True, replace any existing xmethod matcher with the
same name in the locus. Otherwise, if a matcher with the same name
exists in the locus, raise an exception.
"""
err = _validate_xmethod_matcher(matcher)
if err:
raise err
if not locus:
locus = gdb
if locus == gdb:
locus_name = "global"
else:
locus_name = locus.filename
index = _lookup_xmethod_matcher(locus, matcher.name)
if index >= 0:
if replace:
del locus.xmethods[index]
else:
raise RuntimeError(
"Xmethod matcher already registered with "
"%s: %s" % (locus_name, matcher.name)
)
if gdb.parameter("verbose"):
gdb.write("Registering xmethod matcher '%s' with %s' ...\n")
locus.xmethods.insert(0, matcher)
| cuda-gdb-master | gdb/python/lib/gdb/xmethod.py |
# Type utilities.
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with gdb.Types."""
import gdb
def get_basic_type(type_):
"""Return the "basic" type of a type.
Arguments:
type_: The type to reduce to its basic type.
Returns:
type_ with const/volatile is stripped away,
and typedefs/references converted to the underlying type.
"""
while (
type_.code == gdb.TYPE_CODE_REF
or type_.code == gdb.TYPE_CODE_RVALUE_REF
or type_.code == gdb.TYPE_CODE_TYPEDEF
):
if type_.code == gdb.TYPE_CODE_REF or type_.code == gdb.TYPE_CODE_RVALUE_REF:
type_ = type_.target()
else:
type_ = type_.strip_typedefs()
return type_.unqualified()
def has_field(type_, field):
"""Return True if a type has the specified field.
Arguments:
type_: The type to examine.
It must be one of gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION.
field: The name of the field to look up.
Returns:
True if the field is present either in type_ or any baseclass.
Raises:
TypeError: The type is not a struct or union.
"""
type_ = get_basic_type(type_)
if type_.code != gdb.TYPE_CODE_STRUCT and type_.code != gdb.TYPE_CODE_UNION:
raise TypeError("not a struct or union")
for f in type_.fields():
if f.is_base_class:
if has_field(f.type, field):
return True
else:
# NOTE: f.name could be None
if f.name == field:
return True
return False
def make_enum_dict(enum_type):
"""Return a dictionary from a program's enum type.
Arguments:
enum_type: The enum to compute the dictionary for.
Returns:
The dictionary of the enum.
Raises:
TypeError: The type is not an enum.
"""
if enum_type.code != gdb.TYPE_CODE_ENUM:
raise TypeError("not an enum type")
enum_dict = {}
for field in enum_type.fields():
# The enum's value is stored in "enumval".
enum_dict[field.name] = field.enumval
return enum_dict
def deep_items(type_):
"""Return an iterator that recursively traverses anonymous fields.
Arguments:
type_: The type to traverse. It should be one of
gdb.TYPE_CODE_STRUCT or gdb.TYPE_CODE_UNION.
Returns:
an iterator similar to gdb.Type.iteritems(), i.e., it returns
pairs of key, value, but for any anonymous struct or union
field that field is traversed recursively, depth-first.
"""
for k, v in type_.iteritems():
if k:
yield k, v
else:
for i in deep_items(v.type):
yield i
class TypePrinter(object):
"""The base class for type printers.
Instances of this type can be used to substitute type names during
'ptype'.
A type printer must have at least 'name' and 'enabled' attributes,
and supply an 'instantiate' method.
The 'instantiate' method must either return None, or return an
object which has a 'recognize' method. This method must accept a
gdb.Type argument and either return None, meaning that the type
was not recognized, or a string naming the type.
"""
def __init__(self, name):
self.name = name
self.enabled = True
def instantiate(self):
return None
# Helper function for computing the list of type recognizers.
def _get_some_type_recognizers(result, plist):
for printer in plist:
if printer.enabled:
inst = printer.instantiate()
if inst is not None:
result.append(inst)
return None
def get_type_recognizers():
"Return a list of the enabled type recognizers for the current context."
result = []
# First try the objfiles.
for objfile in gdb.objfiles():
_get_some_type_recognizers(result, objfile.type_printers)
# Now try the program space.
_get_some_type_recognizers(result, gdb.current_progspace().type_printers)
# Finally, globals.
_get_some_type_recognizers(result, gdb.type_printers)
return result
def apply_type_recognizers(recognizers, type_obj):
"""Apply the given list of type recognizers to the type TYPE_OBJ.
If any recognizer in the list recognizes TYPE_OBJ, returns the name
given by the recognizer. Otherwise, this returns None."""
for r in recognizers:
result = r.recognize(type_obj)
if result is not None:
return result
return None
def register_type_printer(locus, printer):
"""Register a type printer.
PRINTER is the type printer instance.
LOCUS is either an objfile, a program space, or None, indicating
global registration."""
if locus is None:
locus = gdb
locus.type_printers.insert(0, printer)
| cuda-gdb-master | gdb/python/lib/gdb/types.py |
# Frame-filter commands.
# Copyright (C) 2013-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Internal functions for working with frame-filters."""
import gdb
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import itertools
import collections
def get_priority(filter_item):
"""Internal worker function to return the frame-filter's priority
from a frame filter object. This is a fail free function as it is
used in sorting and filtering. If a badly implemented frame
filter does not implement the priority attribute, return zero
(otherwise sorting/filtering will fail and prevent other frame
filters from executing).
Arguments:
filter_item: An object conforming to the frame filter
interface.
Returns:
The priority of the frame filter from the "priority"
attribute, or zero.
"""
# Do not fail here, as the sort will fail. If a filter has not
# (incorrectly) set a priority, set it to zero.
return getattr(filter_item, "priority", 0)
def set_priority(filter_item, priority):
"""Internal worker function to set the frame-filter's priority.
Arguments:
filter_item: An object conforming to the frame filter
interface.
priority: The priority to assign as an integer.
"""
filter_item.priority = priority
def get_enabled(filter_item):
"""Internal worker function to return a filter's enabled state
from a frame filter object. This is a fail free function as it is
used in sorting and filtering. If a badly implemented frame
filter does not implement the enabled attribute, return False
(otherwise sorting/filtering will fail and prevent other frame
filters from executing).
Arguments:
filter_item: An object conforming to the frame filter
interface.
Returns:
The enabled state of the frame filter from the "enabled"
attribute, or False.
"""
# If the filter class is badly implemented when called from the
# Python filter command, do not cease filter operations, just set
# enabled to False.
return getattr(filter_item, "enabled", False)
def set_enabled(filter_item, state):
"""Internal Worker function to set the frame-filter's enabled
state.
Arguments:
filter_item: An object conforming to the frame filter
interface.
state: True or False, depending on desired state.
"""
filter_item.enabled = state
def return_list(name):
"""Internal Worker function to return the frame filter
dictionary, depending on the name supplied as an argument. If the
name is not "all", "global" or "progspace", it is assumed to name
an object-file.
Arguments:
name: The name of the list, as specified by GDB user commands.
Returns:
A dictionary object for a single specified dictionary, or a
list containing all the items for "all"
Raises:
gdb.GdbError: A dictionary of that name cannot be found.
"""
# If all dictionaries are wanted in the case of "all" we
# cannot return a combined dictionary as keys() may clash in
# between different dictionaries. As we just want all the frame
# filters to enable/disable them all, just return the combined
# items() as a chained iterator of dictionary values.
if name == "all":
glob = gdb.frame_filters.values()
prog = gdb.current_progspace().frame_filters.values()
return_iter = itertools.chain(glob, prog)
for objfile in gdb.objfiles():
return_iter = itertools.chain(return_iter, objfile.frame_filters.values())
return return_iter
if name == "global":
return gdb.frame_filters
else:
if name == "progspace":
cp = gdb.current_progspace()
return cp.frame_filters
else:
for objfile in gdb.objfiles():
if name == objfile.filename:
return objfile.frame_filters
msg = "Cannot find frame-filter dictionary for '" + name + "'"
raise gdb.GdbError(msg)
def _sort_list():
"""Internal Worker function to merge all known frame-filter
lists, prune any filters with the state set to "disabled", and
sort the list on the frame-filter's "priority" attribute.
Returns:
sorted_list: A sorted, pruned list of frame filters to
execute.
"""
all_filters = return_list("all")
sorted_frame_filters = sorted(all_filters, key=get_priority, reverse=True)
sorted_frame_filters = filter(get_enabled, sorted_frame_filters)
return sorted_frame_filters
def execute_frame_filters(frame, frame_low, frame_high):
"""Internal function called from GDB that will execute the chain
of frame filters. Each filter is executed in priority order.
After the execution completes, slice the iterator to frame_low -
frame_high range.
Arguments:
frame: The initial frame.
frame_low: The low range of the slice. If this is a negative
integer then it indicates a backward slice (ie bt -4) which
counts backward from the last frame in the backtrace.
frame_high: The high range of the slice. If this is -1 then
it indicates all frames until the end of the stack from
frame_low.
Returns:
frame_iterator: The sliced iterator after all frame
filters have had a change to execute, or None if no frame
filters are registered.
"""
# Get a sorted list of frame filters.
sorted_list = list(_sort_list())
# Check to see if there are any frame-filters. If not, just
# return None and let default backtrace printing occur.
if len(sorted_list) == 0:
return None
frame_iterator = FrameIterator(frame)
# Apply a basic frame decorator to all gdb.Frames. This unifies
# the interface. Python 3.x moved the itertools.imap
# functionality to map(), so check if it is available.
if hasattr(itertools, "imap"):
frame_iterator = itertools.imap(FrameDecorator, frame_iterator)
else:
frame_iterator = map(FrameDecorator, frame_iterator)
for ff in sorted_list:
frame_iterator = ff.filter(frame_iterator)
# Slicing
# Is this a slice from the end of the backtrace, ie bt -2?
if frame_low < 0:
count = 0
slice_length = abs(frame_low)
# We cannot use MAXLEN argument for deque as it is 2.6 onwards
# and some GDB versions might be < 2.6.
sliced = collections.deque()
for frame_item in frame_iterator:
if count >= slice_length:
sliced.popleft()
count = count + 1
sliced.append(frame_item)
return iter(sliced)
# -1 for frame_high means until the end of the backtrace. Set to
# None if that is the case, to indicate to itertools.islice to
# slice to the end of the iterator.
if frame_high == -1:
frame_high = None
else:
# As frames start from 0, add one to frame_high so islice
# correctly finds the end
frame_high = frame_high + 1
sliced = itertools.islice(frame_iterator, frame_low, frame_high)
return sliced
| cuda-gdb-master | gdb/python/lib/gdb/frames.py |
# Extended prompt utilities.
# Copyright (C) 2011-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Extended prompt library functions."""
import gdb
import os
def _prompt_pwd(ignore):
"The current working directory."
return os.getcwd()
def _prompt_object_attr(func, what, attr, nattr):
"""Internal worker for fetching GDB attributes."""
if attr is None:
attr = nattr
try:
obj = func()
except gdb.error:
return "<no %s>" % what
if hasattr(obj, attr):
result = getattr(obj, attr)
if callable(result):
result = result()
return result
else:
return "<no attribute %s on current %s>" % (attr, what)
def _prompt_frame(attr):
"The selected frame; an argument names a frame parameter."
return _prompt_object_attr(gdb.selected_frame, "frame", attr, "name")
def _prompt_thread(attr):
"The selected thread; an argument names a thread parameter."
return _prompt_object_attr(gdb.selected_thread, "thread", attr, "num")
def _prompt_version(attr):
"The version of GDB."
return gdb.VERSION
def _prompt_esc(attr):
"The ESC character."
return "\033"
def _prompt_bs(attr):
"A backslash."
return "\\"
def _prompt_n(attr):
"A newline."
return "\n"
def _prompt_r(attr):
"A carriage return."
return "\r"
def _prompt_param(attr):
"A parameter's value; the argument names the parameter."
return gdb.parameter(attr)
def _prompt_noprint_begin(attr):
"Begins a sequence of non-printing characters."
return "\001"
def _prompt_noprint_end(attr):
"Ends a sequence of non-printing characters."
return "\002"
prompt_substitutions = {
"e": _prompt_esc,
"\\": _prompt_bs,
"n": _prompt_n,
"r": _prompt_r,
"v": _prompt_version,
"w": _prompt_pwd,
"f": _prompt_frame,
"t": _prompt_thread,
"p": _prompt_param,
"[": _prompt_noprint_begin,
"]": _prompt_noprint_end,
}
def prompt_help():
"""Generate help dynamically from the __doc__ strings of attribute
functions."""
result = ""
keys = sorted(prompt_substitutions.keys())
for key in keys:
result += " \\%s\t%s\n" % (key, prompt_substitutions[key].__doc__)
result += """
A substitution can be used in a simple form, like "\\f".
An argument can also be passed to it, like "\\f{name}".
The meaning of the argument depends on the particular substitution."""
return result
def substitute_prompt(prompt):
"Perform substitutions on PROMPT."
result = ""
plen = len(prompt)
i = 0
while i < plen:
if prompt[i] == "\\":
i = i + 1
if i >= plen:
break
cmdch = prompt[i]
if cmdch in prompt_substitutions:
cmd = prompt_substitutions[cmdch]
if i + 1 < plen and prompt[i + 1] == "{":
j = i + 1
while j < plen and prompt[j] != "}":
j = j + 1
# Just ignore formatting errors.
if j >= plen or prompt[j] != "}":
arg = None
else:
arg = prompt[i + 2 : j]
i = j
else:
arg = None
result += str(cmd(arg))
else:
# Unrecognized escapes are turned into the escaped
# character itself.
result += prompt[i]
else:
result += prompt[i]
i = i + 1
return result
| cuda-gdb-master | gdb/python/lib/gdb/prompt.py |
# Copyright (C) 2013-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
class FrameDecorator(object):
"""Basic implementation of a Frame Decorator"""
""" This base frame decorator decorates a frame or another frame
decorator, and provides convenience methods. If this object is
wrapping a frame decorator, defer to that wrapped object's method
if it has one. This allows for frame decorators that have
sub-classed FrameDecorator object, but also wrap other frame
decorators on the same frame to correctly execute.
E.g
If the result of frame filters running means we have one gdb.Frame
wrapped by multiple frame decorators, all sub-classed from
FrameDecorator, the resulting hierarchy will be:
Decorator1
-- (wraps) Decorator2
-- (wraps) FrameDecorator
-- (wraps) gdb.Frame
In this case we have two frame decorators, both of which are
sub-classed from FrameDecorator. If Decorator1 just overrides the
'function' method, then all of the other methods are carried out
by the super-class FrameDecorator. But Decorator2 may have
overriden other methods, so FrameDecorator will look at the
'base' parameter and defer to that class's methods. And so on,
down the chain."""
# 'base' can refer to a gdb.Frame or another frame decorator. In
# the latter case, the child class will have called the super
# method and _base will be an object conforming to the Frame Filter
# class.
def __init__(self, base):
self._base = base
@staticmethod
def _is_limited_frame(frame):
"""Internal utility to determine if the frame is special or
limited."""
sal = frame.find_sal()
if (
not sal.symtab
or not sal.symtab.filename
or frame.type() == gdb.DUMMY_FRAME
or frame.type() == gdb.SIGTRAMP_FRAME
):
return True
return False
def elided(self):
"""Return any elided frames that this class might be
wrapping, or None."""
if hasattr(self._base, "elided"):
return self._base.elided()
return None
def function(self):
"""Return the name of the frame's function or an address of
the function of the frame. First determine if this is a
special frame. If not, try to determine filename from GDB's
frame internal function API. Finally, if a name cannot be
determined return the address. If this function returns an
address, GDB will attempt to determine the function name from
its internal minimal symbols store (for example, for inferiors
without debug-info)."""
# Both gdb.Frame, and FrameDecorator have a method called
# "function", so determine which object this is.
if not isinstance(self._base, gdb.Frame):
if hasattr(self._base, "function"):
# If it is not a gdb.Frame, and there is already a
# "function" method, use that.
return self._base.function()
frame = self.inferior_frame()
if frame.type() == gdb.DUMMY_FRAME:
return "<function called from gdb>"
elif frame.type() == gdb.SIGTRAMP_FRAME:
return "<signal handler called>"
func = frame.function()
# If we cannot determine the function name, return the
# address. If GDB detects an integer value from this function
# it will attempt to find the function name from minimal
# symbols via its own internal functions.
if func is None:
pc = frame.pc()
return pc
return str(func)
def address(self):
"""Return the address of the frame's pc"""
if hasattr(self._base, "address"):
return self._base.address()
frame = self.inferior_frame()
return frame.pc()
def filename(self):
"""Return the filename associated with this frame, detecting
and returning the appropriate library name is this is a shared
library."""
if hasattr(self._base, "filename"):
return self._base.filename()
frame = self.inferior_frame()
sal = frame.find_sal()
if not sal.symtab or not sal.symtab.filename:
pc = frame.pc()
return gdb.solib_name(pc)
else:
return sal.symtab.filename
def frame_args(self):
"""Return an iterable of frame arguments for this frame, if
any. The iterable object contains objects conforming with the
Symbol/Value interface. If there are no frame arguments, or
if this frame is deemed to be a special case, return None."""
if hasattr(self._base, "frame_args"):
return self._base.frame_args()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
args = FrameVars(frame)
return args.fetch_frame_args()
def frame_locals(self):
"""Return an iterable of local variables for this frame, if
any. The iterable object contains objects conforming with the
Symbol/Value interface. If there are no frame locals, or if
this frame is deemed to be a special case, return None."""
if hasattr(self._base, "frame_locals"):
return self._base.frame_locals()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
args = FrameVars(frame)
return args.fetch_frame_locals()
def line(self):
"""Return line number information associated with the frame's
pc. If symbol table/line information does not exist, or if
this frame is deemed to be a special case, return None"""
if hasattr(self._base, "line"):
return self._base.line()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
sal = frame.find_sal()
if sal:
return sal.line
else:
return None
def inferior_frame(self):
"""Return the gdb.Frame underpinning this frame decorator."""
# If 'base' is a frame decorator, we want to call its inferior
# frame method. If '_base' is a gdb.Frame, just return that.
if hasattr(self._base, "inferior_frame"):
return self._base.inferior_frame()
return self._base
class SymValueWrapper(object):
"""A container class conforming to the Symbol/Value interface
which holds frame locals or frame arguments."""
def __init__(self, symbol, value):
self.sym = symbol
self.val = value
def value(self):
"""Return the value associated with this symbol, or None"""
return self.val
def symbol(self):
"""Return the symbol, or Python text, associated with this
symbol, or None"""
return self.sym
class FrameVars(object):
"""Utility class to fetch and store frame local variables, or
frame arguments."""
def __init__(self, frame):
self.frame = frame
self.symbol_class = {
gdb.SYMBOL_LOC_STATIC: True,
gdb.SYMBOL_LOC_REGISTER: True,
gdb.SYMBOL_LOC_ARG: True,
gdb.SYMBOL_LOC_REF_ARG: True,
gdb.SYMBOL_LOC_LOCAL: True,
gdb.SYMBOL_LOC_REGPARM_ADDR: True,
gdb.SYMBOL_LOC_COMPUTED: True,
}
def fetch_b(self, sym):
"""Local utility method to determine if according to Symbol
type whether it should be included in the iterator. Not all
symbols are fetched, and only symbols that return
True from this method should be fetched."""
# SYM may be a string instead of a symbol in the case of
# synthetic local arguments or locals. If that is the case,
# always fetch.
if isinstance(sym, str):
return True
sym_type = sym.addr_class
return self.symbol_class.get(sym_type, False)
def fetch_frame_locals(self):
"""Public utility method to fetch frame local variables for
the stored frame. Frame arguments are not fetched. If there
are no frame local variables, return an empty list."""
lvars = []
try:
block = self.frame.block()
except RuntimeError:
block = None
while block is not None:
if block.is_global or block.is_static:
break
for sym in block:
if sym.is_argument:
continue
if self.fetch_b(sym):
lvars.append(SymValueWrapper(sym, None))
block = block.superblock
return lvars
def fetch_frame_args(self):
"""Public utility method to fetch frame arguments for the
stored frame. Frame arguments are the only type fetched. If
there are no frame argument variables, return an empty list."""
args = []
try:
block = self.frame.block()
except RuntimeError:
block = None
while block is not None:
if block.function is not None:
break
block = block.superblock
if block is not None:
for sym in block:
if not sym.is_argument:
continue
args.append(SymValueWrapper(sym, None))
return args
| cuda-gdb-master | gdb/python/lib/gdb/FrameDecorator.py |
# Pretty-printer utilities.
# Copyright (C) 2010-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for working with pretty-printers."""
import gdb
import gdb.types
import re
import sys
class PrettyPrinter(object):
"""A basic pretty-printer.
Attributes:
name: A unique string among all printers for the context in which
it is defined (objfile, progspace, or global(gdb)), and should
meaningfully describe what can be pretty-printed.
E.g., "StringPiece" or "protobufs".
subprinters: An iterable object with each element having a `name'
attribute, and, potentially, "enabled" attribute.
Or this is None if there are no subprinters.
enabled: A boolean indicating if the printer is enabled.
Subprinters are for situations where "one" pretty-printer is actually a
collection of several printers. E.g., The libstdc++ pretty-printer has
a pretty-printer for each of several different types, based on regexps.
"""
# While one might want to push subprinters into the subclass, it's
# present here to formalize such support to simplify
# commands/pretty_printers.py.
def __init__(self, name, subprinters=None):
self.name = name
self.subprinters = subprinters
self.enabled = True
def __call__(self, val):
# The subclass must define this.
raise NotImplementedError("PrettyPrinter __call__")
class SubPrettyPrinter(object):
"""Baseclass for sub-pretty-printers.
Sub-pretty-printers needn't use this, but it formalizes what's needed.
Attributes:
name: The name of the subprinter.
enabled: A boolean indicating if the subprinter is enabled.
"""
def __init__(self, name):
self.name = name
self.enabled = True
def register_pretty_printer(obj, printer, replace=False):
"""Register pretty-printer PRINTER with OBJ.
The printer is added to the front of the search list, thus one can override
an existing printer if one needs to. Use a different name when overriding
an existing printer, otherwise an exception will be raised; multiple
printers with the same name are disallowed.
Arguments:
obj: Either an objfile, progspace, or None (in which case the printer
is registered globally).
printer: Either a function of one argument (old way) or any object
which has attributes: name, enabled, __call__.
replace: If True replace any existing copy of the printer.
Otherwise if the printer already exists raise an exception.
Returns:
Nothing.
Raises:
TypeError: A problem with the type of the printer.
ValueError: The printer's name contains a semicolon ";".
RuntimeError: A printer with the same name is already registered.
If the caller wants the printer to be listable and disableable, it must
follow the PrettyPrinter API. This applies to the old way (functions) too.
If printer is an object, __call__ is a method of two arguments:
self, and the value to be pretty-printed. See PrettyPrinter.
"""
# Watch for both __name__ and name.
# Functions get the former for free, but we don't want to use an
# attribute named __foo__ for pretty-printers-as-objects.
# If printer has both, we use `name'.
if not hasattr(printer, "__name__") and not hasattr(printer, "name"):
raise TypeError("printer missing attribute: name")
if hasattr(printer, "name") and not hasattr(printer, "enabled"):
raise TypeError("printer missing attribute: enabled")
if not hasattr(printer, "__call__"):
raise TypeError("printer missing attribute: __call__")
if hasattr(printer, "name"):
name = printer.name
else:
name = printer.__name__
if obj is None or obj is gdb:
if gdb.parameter("verbose"):
gdb.write("Registering global %s pretty-printer ...\n" % name)
obj = gdb
else:
if gdb.parameter("verbose"):
gdb.write(
"Registering %s pretty-printer for %s ...\n" % (name, obj.filename)
)
# Printers implemented as functions are old-style. In order to not risk
# breaking anything we do not check __name__ here.
if hasattr(printer, "name"):
if not isinstance(printer.name, str):
raise TypeError("printer name is not a string")
# If printer provides a name, make sure it doesn't contain ";".
# Semicolon is used by the info/enable/disable pretty-printer commands
# to delimit subprinters.
if printer.name.find(";") >= 0:
raise ValueError("semicolon ';' in printer name")
# Also make sure the name is unique.
# Alas, we can't do the same for functions and __name__, they could
# all have a canonical name like "lookup_function".
# PERF: gdb records printers in a list, making this inefficient.
i = 0
for p in obj.pretty_printers:
if hasattr(p, "name") and p.name == printer.name:
if replace:
del obj.pretty_printers[i]
break
else:
raise RuntimeError(
"pretty-printer already registered: %s" % printer.name
)
i = i + 1
obj.pretty_printers.insert(0, printer)
class RegexpCollectionPrettyPrinter(PrettyPrinter):
"""Class for implementing a collection of regular-expression based pretty-printers.
Intended usage:
pretty_printer = RegexpCollectionPrettyPrinter("my_library")
pretty_printer.add_printer("myclass1", "^myclass1$", MyClass1Printer)
...
pretty_printer.add_printer("myclassN", "^myclassN$", MyClassNPrinter)
register_pretty_printer(obj, pretty_printer)
"""
class RegexpSubprinter(SubPrettyPrinter):
def __init__(self, name, regexp, gen_printer):
super(RegexpCollectionPrettyPrinter.RegexpSubprinter, self).__init__(name)
self.regexp = regexp
self.gen_printer = gen_printer
self.compiled_re = re.compile(regexp)
def __init__(self, name):
super(RegexpCollectionPrettyPrinter, self).__init__(name, [])
def add_printer(self, name, regexp, gen_printer):
"""Add a printer to the list.
The printer is added to the end of the list.
Arguments:
name: The name of the subprinter.
regexp: The regular expression, as a string.
gen_printer: A function/method that given a value returns an
object to pretty-print it.
Returns:
Nothing.
"""
# NOTE: A previous version made the name of each printer the regexp.
# That makes it awkward to pass to the enable/disable commands (it's
# cumbersome to make a regexp of a regexp). So now the name is a
# separate parameter.
self.subprinters.append(self.RegexpSubprinter(name, regexp, gen_printer))
def __call__(self, val):
"""Lookup the pretty-printer for the provided value."""
# Get the type name.
typename = gdb.types.get_basic_type(val.type).tag
if not typename:
typename = val.type.name
if not typename:
return None
# Iterate over table of type regexps to determine
# if a printer is registered for that type.
# Return an instantiation of the printer if found.
for printer in self.subprinters:
if printer.enabled and printer.compiled_re.search(typename):
return printer.gen_printer(val)
# Cannot find a pretty printer. Return None.
return None
# A helper class for printing enum types. This class is instantiated
# with a list of enumerators to print a particular Value.
class _EnumInstance:
def __init__(self, enumerators, val):
self.enumerators = enumerators
self.val = val
def to_string(self):
flag_list = []
v = int(self.val)
any_found = False
for (e_name, e_value) in self.enumerators:
if v & e_value != 0:
flag_list.append(e_name)
v = v & ~e_value
any_found = True
if not any_found or v != 0:
# Leftover value.
flag_list.append("<unknown: 0x%x>" % v)
return "0x%x [%s]" % (int(self.val), " | ".join(flag_list))
class FlagEnumerationPrinter(PrettyPrinter):
"""A pretty-printer which can be used to print a flag-style enumeration.
A flag-style enumeration is one where the enumerators are or'd
together to create values. The new printer will print these
symbolically using '|' notation. The printer must be registered
manually. This printer is most useful when an enum is flag-like,
but has some overlap. GDB's built-in printing will not handle
this case, but this printer will attempt to."""
def __init__(self, enum_type):
super(FlagEnumerationPrinter, self).__init__(enum_type)
self.initialized = False
def __call__(self, val):
if not self.initialized:
self.initialized = True
flags = gdb.lookup_type(self.name)
self.enumerators = []
for field in flags.fields():
self.enumerators.append((field.name, field.enumval))
# Sorting the enumerators by value usually does the right
# thing.
self.enumerators.sort(key=lambda x: x[1])
if self.enabled:
return _EnumInstance(self.enumerators, val)
else:
return None
# Builtin pretty-printers.
# The set is defined as empty, and files in printing/*.py add their printers
# to this with add_builtin_pretty_printer.
_builtin_pretty_printers = RegexpCollectionPrettyPrinter("builtin")
register_pretty_printer(None, _builtin_pretty_printers)
# Add a builtin pretty-printer.
def add_builtin_pretty_printer(name, regexp, printer):
_builtin_pretty_printers.add_printer(name, regexp, printer)
| cuda-gdb-master | gdb/python/lib/gdb/printing.py |
# Pretty-printers for bounds registers.
# Copyright (C) 2013-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb.printing
class MpxBound128Printer:
"""Adds size field to a mpx __gdb_builtin_type_bound128 type."""
def __init__(self, val):
self.val = val
def to_string(self):
upper = self.val["ubound"]
lower = self.val["lbound"]
size = upper - lower
if size > -1:
size = size + 1
result = "{lbound = %s, ubound = %s} : size %s" % (lower, upper, size)
return result
gdb.printing.add_builtin_pretty_printer(
"mpx_bound128", "^builtin_type_bound128", MpxBound128Printer
)
| cuda-gdb-master | gdb/python/lib/gdb/printer/bound_registers.py |
# Copyright (C) 2014-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| cuda-gdb-master | gdb/python/lib/gdb/printer/__init__.py |
# Copyright (C) 2012-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
| cuda-gdb-master | gdb/python/lib/gdb/function/__init__.py |
# Useful gdb string convenience functions.
# Copyright (C) 2012-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""$_memeq, $_strlen, $_streq, $_regex"""
import gdb
import re
class _MemEq(gdb.Function):
"""$_memeq - compare bytes of memory.
Usage: $_memeq (A, B, LEN)
Returns:
True if LEN bytes at A and B compare equally."""
def __init__(self):
super(_MemEq, self).__init__("_memeq")
def invoke(self, a, b, length):
if length < 0:
raise ValueError("length must be non-negative")
if length == 0:
return True
# The argument(s) to vector are [low_bound,]high_bound.
byte_vector = gdb.lookup_type("char").vector(length - 1)
ptr_byte_vector = byte_vector.pointer()
a_ptr = a.reinterpret_cast(ptr_byte_vector)
b_ptr = b.reinterpret_cast(ptr_byte_vector)
return a_ptr.dereference() == b_ptr.dereference()
class _StrLen(gdb.Function):
"""$_strlen - compute string length.
Usage: $_strlen (A)
Returns:
Length of string A, assumed to be a string in the current language."""
def __init__(self):
super(_StrLen, self).__init__("_strlen")
def invoke(self, a):
s = a.string()
return len(s)
class _StrEq(gdb.Function):
"""$_streq - check string equality.
Usage: $_streq (A, B)
Returns:
True if A and B are identical strings in the current language.
Example (amd64-linux):
catch syscall open
cond $bpnum $_streq((char*) $rdi, "foo")"""
def __init__(self):
super(_StrEq, self).__init__("_streq")
def invoke(self, a, b):
return a.string() == b.string()
class _RegEx(gdb.Function):
"""$_regex - check if a string matches a regular expression.
Usage: $_regex (STRING, REGEX)
Returns:
True if string STRING (in the current language) matches the
regular expression REGEX."""
def __init__(self):
super(_RegEx, self).__init__("_regex")
def invoke(self, string, regex):
s = string.string()
r = re.compile(regex.string())
return bool(r.match(s))
# GDB will import us automagically via gdb/__init__.py.
_MemEq()
_StrLen()
_StrEq()
_RegEx()
| cuda-gdb-master | gdb/python/lib/gdb/function/strfns.py |
# Copyright (C) 2016-2022 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
class _AsString(gdb.Function):
"""Return the string representation of a value.
Usage: $_as_string (VALUE)
Arguments:
VALUE: any value
Returns:
The string representation of the value."""
def __init__(self):
super(_AsString, self).__init__("_as_string")
def invoke(self, val):
return str(val)
_AsString()
| cuda-gdb-master | gdb/python/lib/gdb/function/as_string.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.